VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 105462

Last change on this file since 105462 was 103802, checked in by vboxsync, 9 months ago

VMM/PDMAllCritSect.cpp: Build fix for dbgopt build

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 46.1 KB
Line 
1/* $Id: PDMAllCritSect.cpp 103802 2024-03-12 09:10:55Z vboxsync $ */
2/** @file
3 * PDM - Write-Only Critical Section, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PDM_CRITSECT
33#include "PDMInternal.h"
34#include <VBox/vmm/pdmcritsect.h>
35#include <VBox/vmm/mm.h>
36#include <VBox/vmm/vmm.h>
37#include <VBox/vmm/vmcc.h>
38#include <VBox/err.h>
39#include <VBox/vmm/hm.h>
40
41#include <VBox/log.h>
42#include <iprt/asm.h>
43#include <iprt/assert.h>
44#ifdef IN_RING3
45# include <iprt/lockvalidator.h>
46#endif
47#if defined(IN_RING3) || defined(IN_RING0)
48# include <iprt/semaphore.h>
49#endif
50#ifdef IN_RING0
51# include <iprt/time.h>
52#endif
53#if defined(IN_RING3) || defined(IN_RING0)
54# include <iprt/thread.h>
55#endif
56
57
58/*********************************************************************************************************************************
59* Defined Constants And Macros *
60*********************************************************************************************************************************/
61/** The number loops to spin for in ring-3. */
62#define PDMCRITSECT_SPIN_COUNT_R3 20
63/** The number loops to spin for in ring-0. */
64#define PDMCRITSECT_SPIN_COUNT_R0 256
65/** The number loops to spin for in the raw-mode context. */
66#define PDMCRITSECT_SPIN_COUNT_RC 256
67
68
69/** Skips some of the overly paranoid atomic updates.
70 * Makes some assumptions about cache coherence, though not brave enough not to
71 * always end with an atomic update. */
72#define PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
73
74/* Undefine the automatic VBOX_STRICT API mappings. */
75#undef PDMCritSectEnter
76#undef PDMCritSectTryEnter
77
78
79/**
80 * Gets the ring-3 native thread handle of the calling thread.
81 *
82 * @returns native thread handle (ring-3).
83 * @param pVM The cross context VM structure.
84 * @param pCritSect The critical section. This is used in R0 and RC.
85 */
86DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PVMCC pVM, PCPDMCRITSECT pCritSect)
87{
88#ifdef IN_RING3
89 RT_NOREF(pVM, pCritSect);
90 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
91
92#elif defined(IN_RING0)
93 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
94 NIL_RTNATIVETHREAD);
95 RTNATIVETHREAD hNativeSelf = GVMMR0GetRing3ThreadForSelf(pVM);
96 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
97
98#else
99# error "Invalid context"
100#endif
101 return hNativeSelf;
102}
103
104
105#ifdef IN_RING0
106/**
107 * Marks the critical section as corrupted.
108 */
109DECL_NO_INLINE(static, int) pdmCritSectCorrupted(PPDMCRITSECT pCritSect, const char *pszMsg)
110{
111 ASMAtomicWriteU32(&pCritSect->s.Core.u32Magic, PDMCRITSECT_MAGIC_CORRUPTED);
112 LogRel(("PDMCritSect: %s pCritSect=%p\n", pszMsg, pCritSect));
113 return VERR_PDM_CRITSECT_IPE;
114}
115#endif
116
117
118/**
119 * Tail code called when we've won the battle for the lock.
120 *
121 * @returns VINF_SUCCESS.
122 *
123 * @param pCritSect The critical section.
124 * @param hNativeSelf The native handle of this thread.
125 * @param pSrcPos The source position of the lock operation.
126 */
127DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
128{
129 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
130 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
131 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
132
133# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
134 pCritSect->s.Core.cNestings = 1;
135# else
136 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
137# endif
138 Assert(pCritSect->s.Core.cNestings == 1);
139 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
140
141# ifdef PDMCRITSECT_STRICT
142 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
143# else
144 NOREF(pSrcPos);
145# endif
146# ifdef IN_RING3
147 if (pSrcPos)
148 Log12Func(("%p: uId=%p ln=%u fn=%s\n", pCritSect, pSrcPos->uId, pSrcPos->uLine, pSrcPos->pszFunction));
149 else
150# endif
151 Log12Func(("%p\n", pCritSect));
152
153 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
154 return VINF_SUCCESS;
155}
156
157
158#if defined(IN_RING3) || defined(IN_RING0)
159/**
160 * Deals with the contended case in ring-3 and ring-0.
161 *
162 * @retval VINF_SUCCESS on success.
163 * @retval VERR_SEM_DESTROYED if destroyed.
164 *
165 * @param pVM The cross context VM structure.
166 * @param pVCpu The cross context virtual CPU structure if ring-0 and on
167 * an EMT, otherwise NULL.
168 * @param pCritSect The critsect.
169 * @param hNativeSelf The native thread handle.
170 * @param pSrcPos The source position of the lock operation.
171 * @param rcBusy The status code to return when we're in RC or R0
172 */
173static int pdmR3R0CritSectEnterContended(PVMCC pVM, PVMCPU pVCpu, PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf,
174 PCRTLOCKVALSRCPOS pSrcPos, int rcBusy)
175{
176# ifdef IN_RING0
177 /*
178 * If we've got queued critical section leave operations and rcBusy isn't
179 * VINF_SUCCESS, return to ring-3 immediately to avoid deadlocks.
180 */
181 if ( !pVCpu
182 || !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT)
183 || rcBusy == VINF_SUCCESS )
184 { /* likely */ }
185 else
186 {
187 /** @todo statistics. */
188 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
189 return rcBusy;
190 }
191# endif
192
193 /*
194 * Start waiting.
195 */
196 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
197 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
198# ifdef IN_RING3
199 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
200# else
201 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
202# endif
203
204 /*
205 * The wait loop.
206 *
207 * This handles VERR_TIMEOUT and VERR_INTERRUPTED.
208 */
209 STAM_REL_PROFILE_START(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
210 PSUPDRVSESSION const pSession = pVM->pSession;
211 SUPSEMEVENT const hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
212# ifdef IN_RING3
213# ifdef PDMCRITSECT_STRICT
214 RTTHREAD const hThreadSelf = RTThreadSelfAutoAdopt();
215 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
216 if (RT_FAILURE(rc2))
217 return rc2;
218# else
219 RTTHREAD const hThreadSelf = RTThreadSelf();
220# endif
221# else /* IN_RING0 */
222 uint64_t const tsStart = RTTimeNanoTS();
223 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
224 uint64_t cNsMaxTotal = cNsMaxTotalDef;
225 uint64_t const cNsMaxRetry = RT_NS_15SEC;
226 uint32_t cMsMaxOne = RT_MS_5SEC;
227 bool fNonInterruptible = false;
228# endif
229 for (;;)
230 {
231 /*
232 * Do the wait.
233 *
234 * In ring-3 this gets cluttered by lock validation and thread state
235 * maintenance.
236 *
237 * In ring-0 we have to deal with the possibility that the thread has
238 * been signalled and the interruptible wait function returning
239 * immediately. In that case we do normal R0/RC rcBusy handling.
240 *
241 * We always do a timed wait here, so the event handle is revalidated
242 * regularly and we won't end up stuck waiting for a destroyed critsect.
243 */
244 /** @todo Make SUPSemEventClose wake up all waiters. */
245# ifdef IN_RING3
246# ifdef PDMCRITSECT_STRICT
247 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
248 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
249 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
250 if (RT_FAILURE(rc9))
251 return rc9;
252# else
253 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
254# endif
255 int const rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_MS_5SEC);
256 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
257# else /* IN_RING0 */
258 int const rc = !fNonInterruptible
259 ? SUPSemEventWaitNoResume(pSession, hEvent, cMsMaxOne)
260 : SUPSemEventWait(pSession, hEvent, cMsMaxOne);
261 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hOwner=%p)\n",
262 pCritSect, rc, RTTimeNanoTS() - tsStart, cMsMaxOne, pCritSect->s.Core.NativeThreadOwner));
263# endif /* IN_RING0 */
264
265 /*
266 * Make sure the critical section hasn't been delete before continuing.
267 */
268 if (RT_LIKELY(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC))
269 { /* likely */ }
270 else
271 {
272 LogRel(("PDMCritSectEnter: Destroyed while waiting; pCritSect=%p rc=%Rrc\n", pCritSect, rc));
273 return VERR_SEM_DESTROYED;
274 }
275
276 /*
277 * Most likely we're here because we got signalled.
278 */
279 if (rc == VINF_SUCCESS)
280 {
281 STAM_REL_PROFILE_STOP(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
282 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
283 }
284
285 /*
286 * Timeout and interrupted waits needs careful handling in ring-0
287 * because we're cooperating with ring-3 on this critical section
288 * and thus need to make absolutely sure we won't get stuck here.
289 *
290 * The r0 interrupted case means something is pending (termination,
291 * signal, APC, debugger, whatever), so we must try our best to
292 * return to the caller and to ring-3 so it can be dealt with.
293 */
294 if (RT_LIKELY(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED))
295 {
296# ifdef IN_RING0
297 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
298 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
299 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
300 ("rcTerm=%Rrc\n", rcTerm));
301 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
302 cNsMaxTotal = RT_NS_1MIN;
303
304 if (rc == VERR_TIMEOUT)
305 {
306 /* Try return get out of here with a non-VINF_SUCCESS status if
307 the thread is terminating or if the timeout has been exceeded. */
308 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectVerrTimeout);
309 if ( rcTerm != VINF_THREAD_IS_TERMINATING
310 && cNsElapsed <= cNsMaxTotal)
311 continue;
312 }
313 else
314 {
315 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
316 we will try non-interruptible sleep for a while to help resolve the issue
317 w/o guru'ing. */
318 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectVerrInterrupted);
319 if ( rcTerm != VINF_THREAD_IS_TERMINATING
320 && rcBusy == VINF_SUCCESS
321 && pVCpu != NULL
322 && cNsElapsed <= cNsMaxTotal)
323 {
324 if (!fNonInterruptible)
325 {
326 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectNonInterruptibleWaits);
327 fNonInterruptible = true;
328 cMsMaxOne = 32;
329 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
330 if (cNsLeft > RT_NS_10SEC)
331 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
332 }
333 continue;
334 }
335 }
336
337 /*
338 * Let try get out of here. We must very carefully undo the
339 * cLockers increment we did using compare-and-exchange so that
340 * we don't race the semaphore signalling in PDMCritSectLeave
341 * and end up with spurious wakeups and two owners at once.
342 */
343 uint32_t cNoIntWaits = 0;
344 uint32_t cCmpXchgs = 0;
345 int32_t cLockers = ASMAtomicReadS32(&pCritSect->s.Core.cLockers);
346 for (;;)
347 {
348 if (pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC)
349 {
350 if (cLockers > 0 && cCmpXchgs < _64M)
351 {
352 bool fRc = ASMAtomicCmpXchgExS32(&pCritSect->s.Core.cLockers, cLockers - 1, cLockers, &cLockers);
353 if (fRc)
354 {
355 LogFunc(("Aborting wait on %p (rc=%Rrc rcTerm=%Rrc cNsElapsed=%'RU64) -> %Rrc\n", pCritSect,
356 rc, rcTerm, cNsElapsed, rcBusy != VINF_SUCCESS ? rcBusy : rc));
357 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatAbortedCritSectEnters);
358 return rcBusy != VINF_SUCCESS ? rcBusy : rc;
359 }
360 cCmpXchgs++;
361 if ((cCmpXchgs & 0xffff) == 0)
362 Log11Func(("%p: cLockers=%d cCmpXchgs=%u (hOwner=%p)\n",
363 pCritSect, cLockers, cCmpXchgs, pCritSect->s.Core.NativeThreadOwner));
364 ASMNopPause();
365 continue;
366 }
367
368 if (cLockers == 0)
369 {
370 /*
371 * We are racing someone in PDMCritSectLeave.
372 *
373 * For the VERR_TIMEOUT case we'll just retry taking it the normal
374 * way for a while. For VERR_INTERRUPTED we're in for more fun as
375 * the previous owner might not have signalled the semaphore yet,
376 * so we'll do a short non-interruptible wait instead and then guru.
377 */
378 if ( rc == VERR_TIMEOUT
379 && RTTimeNanoTS() - tsStart <= cNsMaxTotal + cNsMaxRetry)
380 break;
381
382 if ( rc == VERR_INTERRUPTED
383 && ( cNoIntWaits == 0
384 || RTTimeNanoTS() - (tsStart + cNsElapsed) < RT_NS_100MS))
385 {
386 int const rc2 = SUPSemEventWait(pSession, hEvent, 1 /*ms*/);
387 if (rc2 == VINF_SUCCESS)
388 {
389 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectEntersWhileAborting);
390 STAM_REL_PROFILE_STOP(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
391 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
392 }
393 cNoIntWaits++;
394 cLockers = ASMAtomicReadS32(&pCritSect->s.Core.cLockers);
395 continue;
396 }
397 }
398 else
399 LogFunc(("Critical section %p has a broken cLockers count. Aborting.\n", pCritSect));
400
401 /* Sabotage the critical section and return error to caller. */
402 ASMAtomicWriteU32(&pCritSect->s.Core.u32Magic, PDMCRITSECT_MAGIC_FAILED_ABORT);
403 LogRel(("PDMCritSectEnter: Failed to abort wait on pCritSect=%p (rc=%Rrc rcTerm=%Rrc)\n",
404 pCritSect, rc, rcTerm));
405 return VERR_PDM_CRITSECT_ABORT_FAILED;
406 }
407 LogRel(("PDMCritSectEnter: Destroyed while aborting wait; pCritSect=%p/%#x rc=%Rrc rcTerm=%Rrc\n",
408 pCritSect, pCritSect->s.Core.u32Magic, rc, rcTerm));
409 return VERR_SEM_DESTROYED;
410 }
411
412 /* We get here if we timed out. Just retry now that it
413 appears someone left already. */
414 Assert(rc == VERR_TIMEOUT);
415 cMsMaxOne = 10 /*ms*/;
416
417# else /* IN_RING3 */
418 RT_NOREF(pVM, pVCpu, rcBusy);
419# endif /* IN_RING3 */
420 }
421 /*
422 * Any other return code is fatal.
423 */
424 else
425 {
426 AssertMsgFailed(("rc=%Rrc\n", rc));
427 return RT_FAILURE_NP(rc) ? rc : -rc;
428 }
429 }
430 /* won't get here */
431}
432#endif /* IN_RING3 || IN_RING0 */
433
434
435/**
436 * Common worker for the debug and normal APIs.
437 *
438 * @returns VINF_SUCCESS if entered successfully.
439 * @returns rcBusy when encountering a busy critical section in RC/R0.
440 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
441 * during the operation.
442 *
443 * @param pVM The cross context VM structure.
444 * @param pCritSect The PDM critical section to enter.
445 * @param rcBusy The status code to return when we're in RC or R0
446 * @param pSrcPos The source position of the lock operation.
447 */
448DECL_FORCE_INLINE(int) pdmCritSectEnter(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
449{
450 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
451 Assert(pCritSect->s.Core.cNestings >= 0);
452#if defined(VBOX_STRICT) && defined(IN_RING0)
453 /* Hope we're not messing with critical sections while in the no-block
454 zone, that would complicate things a lot. */
455 PVMCPUCC pVCpuAssert = VMMGetCpu(pVM);
456 Assert(pVCpuAssert && VMMRZCallRing3IsEnabled(pVCpuAssert));
457#endif
458
459 /*
460 * If the critical section has already been destroyed, then inform the caller.
461 */
462 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
463 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
464 VERR_SEM_DESTROYED);
465
466 /*
467 * See if we're lucky.
468 */
469 /* NOP ... */
470 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
471 { /* We're more likely to end up here with real critsects than a NOP one. */ }
472 else
473 return VINF_SUCCESS;
474
475 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
476 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
477 /* ... not owned ... */
478 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
479 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
480
481 /* ... or nested. */
482 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
483 {
484 Assert(pCritSect->s.Core.cNestings >= 1);
485# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
486 pCritSect->s.Core.cNestings += 1;
487# else
488 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
489# endif
490 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
491 Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, pCritSect->s.Core.cNestings, pCritSect->s.Core.cLockers));
492 return VINF_SUCCESS;
493 }
494
495 /*
496 * Spin for a bit without incrementing the counter.
497 */
498 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
499 * cpu systems. */
500 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
501 while (cSpinsLeft-- > 0)
502 {
503 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
504 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
505 ASMNopPause();
506 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
507 cli'ed pendingpreemption check up front using sti w/ instruction fusing
508 for avoiding races. Hmm ... This is assuming the other party is actually
509 executing code on another CPU ... which we could keep track of if we
510 wanted. */
511 }
512
513#ifdef IN_RING3
514 /*
515 * Take the slow path.
516 */
517 NOREF(rcBusy);
518 return pdmR3R0CritSectEnterContended(pVM, NULL, pCritSect, hNativeSelf, pSrcPos, rcBusy);
519
520#elif defined(IN_RING0)
521# if 1 /* new code */
522 /*
523 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
524 * account when waiting on contended locks.
525 *
526 * While we usually (it can be VINF_SUCCESS) have the option of returning
527 * rcBusy and force the caller to go back to ring-3 and to re-start the work
528 * there, it's almost always more efficient to try wait for the lock here.
529 * The rcBusy will be used if we encounter an VERR_INTERRUPTED situation
530 * though.
531 */
532 PVMCPUCC pVCpu = VMMGetCpu(pVM);
533 if (pVCpu)
534 {
535 VMMR0EMTBLOCKCTX Ctx;
536 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pCritSect, &Ctx);
537 if (rc == VINF_SUCCESS)
538 {
539 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
540
541 rc = pdmR3R0CritSectEnterContended(pVM, pVCpu, pCritSect, hNativeSelf, pSrcPos, rcBusy);
542
543 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
544 }
545 else
546 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
547 return rc;
548 }
549
550 /* Non-EMT. */
551 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
552 return pdmR3R0CritSectEnterContended(pVM, NULL, pCritSect, hNativeSelf, pSrcPos, rcBusy);
553
554# else /* old code: */
555 /*
556 * We preemption hasn't been disabled, we can block here in ring-0.
557 */
558 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
559 && ASMIntAreEnabled())
560 return pdmR3R0CritSectEnterContended(pVM, VMMGetCpu(pVM), pCritSect, hNativeSelf, pSrcPos, rcBusy);
561
562 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
563
564 /*
565 * Call ring-3 to acquire the critical section?
566 */
567 if (rcBusy == VINF_SUCCESS)
568 {
569 PVMCPUCC pVCpu = VMMGetCpu(pVM);
570 AssertReturn(pVCpu, VERR_PDM_CRITSECT_IPE);
571 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
572 }
573
574 /*
575 * Return busy.
576 */
577 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
578 return rcBusy;
579# endif /* old code */
580#else
581# error "Unsupported context"
582#endif
583}
584
585
586/**
587 * Enters a PDM critical section.
588 *
589 * @returns VINF_SUCCESS if entered successfully.
590 * @returns rcBusy when encountering a busy critical section in RC/R0.
591 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
592 * during the operation.
593 *
594 * @param pVM The cross context VM structure.
595 * @param pCritSect The PDM critical section to enter.
596 * @param rcBusy The status code to return when we're in RC or R0
597 * and the section is busy. Pass VINF_SUCCESS to
598 * acquired the critical section thru a ring-3
599 * call if necessary.
600 *
601 * @note Even callers setting @a rcBusy to VINF_SUCCESS must either handle
602 * possible failures in ring-0 or apply
603 * PDM_CRITSECT_RELEASE_ASSERT_RC(),
604 * PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(),
605 * PDM_CRITSECT_RELEASE_ASSERT_RC_DRV() or
606 * PDM_CRITSECT_RELEASE_ASSERT_RC_USB() to the return value of this
607 * function.
608 */
609VMMDECL(DECL_CHECK_RETURN_NOT_R3(int)) PDMCritSectEnter(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy)
610{
611#ifndef PDMCRITSECT_STRICT
612 return pdmCritSectEnter(pVM, pCritSect, rcBusy, NULL);
613#else
614 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
615 return pdmCritSectEnter(pVM, pCritSect, rcBusy, &SrcPos);
616#endif
617}
618
619
620/**
621 * Enters a PDM critical section, with location information for debugging.
622 *
623 * @returns VINF_SUCCESS if entered successfully.
624 * @returns rcBusy when encountering a busy critical section in RC/R0.
625 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
626 * during the operation.
627 *
628 * @param pVM The cross context VM structure.
629 * @param pCritSect The PDM critical section to enter.
630 * @param rcBusy The status code to return when we're in RC or R0
631 * and the section is busy. Pass VINF_SUCCESS to
632 * acquired the critical section thru a ring-3
633 * call if necessary.
634 * @param uId Some kind of locking location ID. Typically a
635 * return address up the stack. Optional (0).
636 * @param SRC_POS The source position where to lock is being
637 * acquired from. Optional.
638 */
639VMMDECL(DECL_CHECK_RETURN_NOT_R3(int))
640PDMCritSectEnterDebug(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
641{
642#ifdef PDMCRITSECT_STRICT
643 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
644 return pdmCritSectEnter(pVM, pCritSect, rcBusy, &SrcPos);
645#else
646 NOREF(uId); RT_SRC_POS_NOREF();
647 return pdmCritSectEnter(pVM, pCritSect, rcBusy, NULL);
648#endif
649}
650
651
652/**
653 * Common worker for the debug and normal APIs.
654 *
655 * @retval VINF_SUCCESS on success.
656 * @retval VERR_SEM_BUSY if the critsect was owned.
657 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
658 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
659 * during the operation.
660 *
661 * @param pVM The cross context VM structure.
662 * @param pCritSect The critical section.
663 * @param pSrcPos The source position of the lock operation.
664 */
665static int pdmCritSectTryEnter(PVMCC pVM, PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
666{
667 /*
668 * If the critical section has already been destroyed, then inform the caller.
669 */
670 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
671 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
672 VERR_SEM_DESTROYED);
673
674 /*
675 * See if we're lucky.
676 */
677 /* NOP ... */
678 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
679 { /* We're more likely to end up here with real critsects than a NOP one. */ }
680 else
681 return VINF_SUCCESS;
682
683 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
684 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
685 /* ... not owned ... */
686 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
687 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
688
689 /* ... or nested. */
690 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
691 {
692 Assert(pCritSect->s.Core.cNestings >= 1);
693# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
694 pCritSect->s.Core.cNestings += 1;
695# else
696 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
697# endif
698 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
699 Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, pCritSect->s.Core.cNestings, pCritSect->s.Core.cLockers));
700 return VINF_SUCCESS;
701 }
702
703 /* no spinning */
704
705 /*
706 * Return busy.
707 */
708#ifdef IN_RING3
709 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
710#else
711 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
712#endif
713 LogFlow(("PDMCritSectTryEnter: locked\n"));
714 return VERR_SEM_BUSY;
715}
716
717
718/**
719 * Try enter a critical section.
720 *
721 * @retval VINF_SUCCESS on success.
722 * @retval VERR_SEM_BUSY if the critsect was owned.
723 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
724 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
725 * during the operation.
726 *
727 * @param pVM The cross context VM structure.
728 * @param pCritSect The critical section.
729 */
730VMMDECL(DECL_CHECK_RETURN(int)) PDMCritSectTryEnter(PVMCC pVM, PPDMCRITSECT pCritSect)
731{
732#ifndef PDMCRITSECT_STRICT
733 return pdmCritSectTryEnter(pVM, pCritSect, NULL);
734#else
735 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
736 return pdmCritSectTryEnter(pVM, pCritSect, &SrcPos);
737#endif
738}
739
740
741/**
742 * Try enter a critical section, with location information for debugging.
743 *
744 * @retval VINF_SUCCESS on success.
745 * @retval VERR_SEM_BUSY if the critsect was owned.
746 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
747 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
748 * during the operation.
749 *
750 * @param pVM The cross context VM structure.
751 * @param pCritSect The critical section.
752 * @param uId Some kind of locking location ID. Typically a
753 * return address up the stack. Optional (0).
754 * @param SRC_POS The source position where to lock is being
755 * acquired from. Optional.
756 */
757VMMDECL(DECL_CHECK_RETURN(int))
758PDMCritSectTryEnterDebug(PVMCC pVM, PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
759{
760#ifdef PDMCRITSECT_STRICT
761 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
762 return pdmCritSectTryEnter(pVM, pCritSect, &SrcPos);
763#else
764 NOREF(uId); RT_SRC_POS_NOREF();
765 return pdmCritSectTryEnter(pVM, pCritSect, NULL);
766#endif
767}
768
769
770#ifdef IN_RING3
771/**
772 * Enters a PDM critical section.
773 *
774 * @returns VINF_SUCCESS if entered successfully.
775 * @returns rcBusy when encountering a busy critical section in GC/R0.
776 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
777 * during the operation.
778 *
779 * @param pVM The cross context VM structure.
780 * @param pCritSect The PDM critical section to enter.
781 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
782 */
783VMMR3DECL(int) PDMR3CritSectEnterEx(PVM pVM, PPDMCRITSECT pCritSect, bool fCallRing3)
784{
785 int rc = PDMCritSectEnter(pVM, pCritSect, VERR_IGNORED);
786 if ( rc == VINF_SUCCESS
787 && fCallRing3
788 && pCritSect->s.Core.pValidatorRec
789 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
790 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
791 return rc;
792}
793#endif /* IN_RING3 */
794
795
796/**
797 * Leaves a critical section entered with PDMCritSectEnter().
798 *
799 * @returns Indication whether we really exited the critical section.
800 * @retval VINF_SUCCESS if we really exited.
801 * @retval VINF_SEM_NESTED if we only reduced the nesting count.
802 * @retval VERR_NOT_OWNER if you somehow ignore release assertions.
803 *
804 * @param pVM The cross context VM structure.
805 * @param pCritSect The PDM critical section to leave.
806 *
807 * @remarks Can be called from no-ring-3-call context in ring-0 (TM/VirtualSync)
808 * where we'll queue leaving operation for ring-3 processing.
809 */
810VMMDECL(int) PDMCritSectLeave(PVMCC pVM, PPDMCRITSECT pCritSect)
811{
812 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
813 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
814
815 /*
816 * Check for NOP sections before asserting ownership.
817 */
818 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
819 { /* We're more likely to end up here with real critsects than a NOP one. */ }
820 else
821 return VINF_SUCCESS;
822
823 /*
824 * Always check that the caller is the owner (screw performance).
825 */
826 RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
827 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, pCritSect->s.Core.NativeThreadOwner == hNativeSelf && hNativeSelf != NIL_RTNATIVETHREAD,
828 ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
829 pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
830 pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings),
831 VERR_NOT_OWNER);
832
833 /*
834 * Nested leave.
835 */
836 int32_t const cNestings = pCritSect->s.Core.cNestings;
837 Assert(cNestings >= 1);
838 if (cNestings > 1)
839 {
840#ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
841 pCritSect->s.Core.cNestings = cNestings - 1;
842#else
843 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, cNestings - 1);
844#endif
845 int32_t const cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
846 Assert(cLockers >= 0); RT_NOREF(cLockers);
847 Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, cNestings - 1, cLockers));
848 return VINF_SEM_NESTED;
849 }
850
851 Log12Func(("%p: cNestings=%d cLockers=%d hOwner=%p - leave for real\n",
852 pCritSect, cNestings, pCritSect->s.Core.cLockers, pCritSect->s.Core.NativeThreadOwner));
853
854#ifdef IN_RING3
855 /*
856 * Ring-3: Leave for real.
857 */
858 SUPSEMEVENT const hEventToSignal = pCritSect->s.hEventToSignal;
859 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
860
861# if defined(PDMCRITSECT_STRICT)
862 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
863 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
864# endif
865 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
866
867# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
868 //pCritSect->s.Core.cNestings = 0; /* not really needed */
869 pCritSect->s.Core.NativeThreadOwner = NIL_RTNATIVETHREAD;
870# else
871 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
872 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
873# endif
874 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
875
876 /* Stop profiling and decrement lockers. */
877 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
878 ASMCompilerBarrier();
879 int32_t const cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
880 if (cLockers < 0)
881 AssertMsg(cLockers == -1, ("cLockers=%d\n", cLockers));
882 else
883 {
884 /* Someone is waiting, wake up one of them. */
885 Assert(cLockers < _8K);
886 Log8(("PDMCritSectLeave: Waking up %p (cLockers=%u)\n", pCritSect, cLockers));
887 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
888 int rc = SUPSemEventSignal(pVM->pSession, hEvent);
889 AssertRC(rc);
890 }
891
892 /* Signal exit event. */
893 if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
894 { /* likely */ }
895 else
896 {
897 Log8(("PDMCritSectLeave: Signalling %#p (%p)\n", hEventToSignal, pCritSect));
898 int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
899 AssertRC(rc);
900 }
901
902 return VINF_SUCCESS;
903
904
905#elif defined(IN_RING0)
906 /*
907 * Ring-0: Try leave for real, depends on host and context.
908 */
909 SUPSEMEVENT const hEventToSignal = pCritSect->s.hEventToSignal;
910 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
911 PVMCPUCC pVCpu = VMMGetCpu(pVM);
912 bool fQueueOnTrouble = false; /* Set this to true to test queueing. */
913 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
914 || VMMRZCallRing3IsEnabled(pVCpu)
915 || RTSemEventIsSignalSafe()
916 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
917 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
918 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
919 || (fQueueOnTrouble = ( hEventToSignal == NIL_SUPSEMEVENT
920 && ASMAtomicUoReadS32(&pCritSect->s.Core.cLockers) == 0)) )
921 {
922 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
923
924# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
925 //pCritSect->s.Core.cNestings = 0; /* not really needed */
926 pCritSect->s.Core.NativeThreadOwner = NIL_RTNATIVETHREAD;
927# else
928 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
929 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
930# endif
931 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
932
933 /*
934 * Stop profiling and decrement lockers.
935 */
936 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
937 ASMCompilerBarrier();
938
939 bool fQueueIt = false;
940 int32_t cLockers;
941 if (!fQueueOnTrouble)
942 cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
943 else
944 {
945 cLockers = -1;
946 if (!ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
947 fQueueIt = true;
948 }
949 if (!fQueueIt)
950 {
951 VMMR0EMTBLOCKCTX Ctx;
952 bool fLeaveCtx = false;
953 if (cLockers < 0)
954 AssertMsg(cLockers == -1, ("cLockers=%d\n", cLockers));
955 else
956 {
957 /* Someone is waiting, wake up one of them. */
958 Assert(cLockers < _8K);
959 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
960 if (!RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
961 {
962 int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
963 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
964 fLeaveCtx = true;
965 }
966 int rc = SUPSemEventSignal(pVM->pSession, hEvent);
967 AssertRC(rc);
968 }
969
970 /*
971 * Signal exit event.
972 */
973 if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
974 { /* likely */ }
975 else
976 {
977 if (!fLeaveCtx && pVCpu != NULL && !RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
978 {
979 int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
980 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
981 fLeaveCtx = true;
982 }
983 Log8(("Signalling %#p\n", hEventToSignal));
984 int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
985 AssertRC(rc);
986 }
987
988 /*
989 * Restore HM context if needed.
990 */
991 if (!fLeaveCtx)
992 { /* contention should be unlikely */ }
993 else
994 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
995
996# ifdef DEBUG_bird
997 VMMTrashVolatileXMMRegs();
998# endif
999 return VINF_SUCCESS;
1000 }
1001
1002 /*
1003 * Darn, someone raced in on us. Restore the state (this works only
1004 * because the semaphore is effectively controlling ownership).
1005 */
1006 bool fRc;
1007 RTNATIVETHREAD hMessOwner = NIL_RTNATIVETHREAD;
1008 ASMAtomicCmpXchgExHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf, NIL_RTNATIVETHREAD, fRc, &hMessOwner);
1009 AssertLogRelMsgReturn(fRc, ("pCritSect=%p hMessOwner=%p\n", pCritSect, hMessOwner),
1010 pdmCritSectCorrupted(pCritSect, "owner race"));
1011 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
1012# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
1013 //pCritSect->s.Core.cNestings = 1;
1014 Assert(pCritSect->s.Core.cNestings == 1);
1015# else
1016 //Assert(pCritSect->s.Core.cNestings == 0);
1017 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
1018# endif
1019 Assert(hEventToSignal == NIL_SUPSEMEVENT);
1020 }
1021
1022
1023#else /* IN_RC */
1024 /*
1025 * Raw-mode: Try leave it.
1026 */
1027# error "This context is not use..."
1028 if (pCritSect->s.Core.cLockers == 0)
1029 {
1030# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
1031 //pCritSect->s.Core.cNestings = 0; /* not really needed */
1032# else
1033 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
1034# endif
1035 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
1036 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
1037
1038 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
1039 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
1040 return VINF_SUCCESS;
1041
1042 /*
1043 * Darn, someone raced in on us. Restore the state (this works only
1044 * because the semaphore is effectively controlling ownership).
1045 */
1046 bool fRc;
1047 RTNATIVETHREAD hMessOwner = NIL_RTNATIVETHREAD;
1048 ASMAtomicCmpXchgExHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf, NIL_RTNATIVETHREAD, fRc, &hMessOwner);
1049 AssertLogRelMsgReturn(fRc, ("pCritSect=%p hMessOwner=%p\n", pCritSect, hMessOwner),
1050 pdmCritSectCorrupted(pCritSect, "owner race"));
1051 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
1052# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
1053 //pCritSect->s.Core.cNestings = 1;
1054 Assert(pCritSect->s.Core.cNestings == 1);
1055# else
1056 //Assert(pCritSect->s.Core.cNestings == 0);
1057 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
1058# endif
1059 }
1060#endif /* IN_RC */
1061
1062
1063#ifndef IN_RING3
1064 /*
1065 * Ring-0/raw-mode: Unable to leave. Queue the leave for ring-3.
1066 */
1067 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
1068# ifndef IN_RING0
1069 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1070# endif
1071 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
1072 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
1073 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves), ("%d\n", i), VERR_PDM_CRITSECT_IPE);
1074 pVCpu->pdm.s.apQueuedCritSectLeaves[i] = pCritSect->s.pSelfR3;
1075 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1076 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectLeaves[i])
1077 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectLeaves[i] & HOST_PAGE_OFFSET_MASK)
1078 == ((uintptr_t)pCritSect & HOST_PAGE_OFFSET_MASK),
1079 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectLeaves[i], pCritSect),
1080 pdmCritSectCorrupted(pCritSect, "Invalid pSelfR3 value"));
1081 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); /** @todo handle VMCPU_FF_PDM_CRITSECT in ring-0 outside the no-call-ring-3 part. */
1082 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* unnecessary paranoia */
1083 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1084 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
1085
1086 return VINF_SUCCESS;
1087#endif /* IN_RING3 */
1088}
1089
1090
1091#if defined(IN_RING0) || defined(IN_RING3)
1092/**
1093 * Schedule a event semaphore for signalling upon critsect exit.
1094 *
1095 * @returns VINF_SUCCESS on success.
1096 * @returns VERR_TOO_MANY_SEMAPHORES if an event was already scheduled.
1097 * @returns VERR_NOT_OWNER if we're not the critsect owner (ring-3 only).
1098 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
1099 *
1100 * @param pCritSect The critical section.
1101 * @param hEventToSignal The support driver event semaphore that should be
1102 * signalled.
1103 */
1104VMMDECL(int) PDMHCCritSectScheduleExitEvent(PPDMCRITSECT pCritSect, SUPSEMEVENT hEventToSignal)
1105{
1106 AssertPtr(pCritSect);
1107 Assert(!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP));
1108 Assert(hEventToSignal != NIL_SUPSEMEVENT);
1109# ifdef IN_RING3
1110 if (RT_UNLIKELY(!RTCritSectIsOwner(&pCritSect->s.Core)))
1111 return VERR_NOT_OWNER;
1112# endif
1113 if (RT_LIKELY( pCritSect->s.hEventToSignal == NIL_RTSEMEVENT
1114 || pCritSect->s.hEventToSignal == hEventToSignal))
1115 {
1116 pCritSect->s.hEventToSignal = hEventToSignal;
1117 return VINF_SUCCESS;
1118 }
1119 return VERR_TOO_MANY_SEMAPHORES;
1120}
1121#endif /* IN_RING0 || IN_RING3 */
1122
1123
1124/**
1125 * Checks the caller is the owner of the critical section.
1126 *
1127 * @returns true if owner.
1128 * @returns false if not owner.
1129 * @param pVM The cross context VM structure.
1130 * @param pCritSect The critical section.
1131 */
1132VMMDECL(bool) PDMCritSectIsOwner(PVMCC pVM, PCPDMCRITSECT pCritSect)
1133{
1134#ifdef IN_RING3
1135 RT_NOREF(pVM);
1136 return RTCritSectIsOwner(&pCritSect->s.Core);
1137#else
1138 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1139 if ( !pVCpu
1140 || pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
1141 return false;
1142 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
1143 || pCritSect->s.Core.cNestings > 1;
1144#endif
1145}
1146
1147
1148/**
1149 * Checks the specified VCPU is the owner of the critical section.
1150 *
1151 * @returns true if owner.
1152 * @returns false if not owner.
1153 * @param pVCpu The cross context virtual CPU structure.
1154 * @param pCritSect The critical section.
1155 */
1156VMMDECL(bool) PDMCritSectIsOwnerEx(PVMCPUCC pVCpu, PCPDMCRITSECT pCritSect)
1157{
1158#ifdef IN_RING3
1159 NOREF(pVCpu);
1160 return RTCritSectIsOwner(&pCritSect->s.Core);
1161#else
1162 Assert(VMCC_GET_CPU(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu) == pVCpu);
1163 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
1164 return false;
1165 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
1166 || pCritSect->s.Core.cNestings > 1;
1167#endif
1168}
1169
1170
1171/**
1172 * Checks if anyone is waiting on the critical section we own.
1173 *
1174 * @returns true if someone is waiting.
1175 * @returns false if no one is waiting.
1176 * @param pVM The cross context VM structure.
1177 * @param pCritSect The critical section.
1178 */
1179VMMDECL(bool) PDMCritSectHasWaiters(PVMCC pVM, PCPDMCRITSECT pCritSect)
1180{
1181 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
1182 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pVM, pCritSect)); RT_NOREF(pVM);
1183 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
1184}
1185
1186
1187/**
1188 * Checks if a critical section is initialized or not.
1189 *
1190 * @returns true if initialized.
1191 * @returns false if not initialized.
1192 * @param pCritSect The critical section.
1193 */
1194VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
1195{
1196 return RTCritSectIsInitialized(&pCritSect->s.Core);
1197}
1198
1199
1200/**
1201 * Gets the recursion depth.
1202 *
1203 * @returns The recursion depth.
1204 * @param pCritSect The critical section.
1205 */
1206VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
1207{
1208 return RTCritSectGetRecursion(&pCritSect->s.Core);
1209}
1210
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette