VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 90348

Last change on this file since 90348 was 90348, checked in by vboxsync, 4 years ago

VMM: Removed the VM pointers from the internal critsect structures. bugref:9218 bugref:10074

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 30.6 KB
Line 
1/* $Id: PDMAllCritSect.cpp 90348 2021-07-26 21:01:38Z vboxsync $ */
2/** @file
3 * PDM - Write-Only Critical Section, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsect.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vmcc.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** The number loops to spin for in ring-3. */
48#define PDMCRITSECT_SPIN_COUNT_R3 20
49/** The number loops to spin for in ring-0. */
50#define PDMCRITSECT_SPIN_COUNT_R0 256
51/** The number loops to spin for in the raw-mode context. */
52#define PDMCRITSECT_SPIN_COUNT_RC 256
53
54
55/** Skips some of the overly paranoid atomic updates.
56 * Makes some assumptions about cache coherence, though not brave enough not to
57 * always end with an atomic update. */
58#define PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
59
60/* Undefine the automatic VBOX_STRICT API mappings. */
61#undef PDMCritSectEnter
62#undef PDMCritSectTryEnter
63
64
65/**
66 * Gets the ring-3 native thread handle of the calling thread.
67 *
68 * @returns native thread handle (ring-3).
69 * @param pVM The cross context VM structure.
70 * @param pCritSect The critical section. This is used in R0 and RC.
71 */
72DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PVMCC pVM, PCPDMCRITSECT pCritSect)
73{
74#ifdef IN_RING3
75 RT_NOREF(pVM, pCritSect);
76 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
77#else
78 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
79 NIL_RTNATIVETHREAD);
80 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
81 RTNATIVETHREAD hNativeSelf = pVCpu ? pVCpu->hNativeThread : NIL_RTNATIVETHREAD; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
82#endif
83 return hNativeSelf;
84}
85
86
87/**
88 * Tail code called when we've won the battle for the lock.
89 *
90 * @returns VINF_SUCCESS.
91 *
92 * @param pCritSect The critical section.
93 * @param hNativeSelf The native handle of this thread.
94 * @param pSrcPos The source position of the lock operation.
95 */
96DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
97{
98 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
99 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
100
101# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
102 pCritSect->s.Core.cNestings = 1;
103# else
104 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
105# endif
106 Assert(pCritSect->s.Core.cNestings == 1);
107 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
108
109# ifdef PDMCRITSECT_STRICT
110 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
111# else
112 NOREF(pSrcPos);
113# endif
114
115 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
116 return VINF_SUCCESS;
117}
118
119
120#if defined(IN_RING3) || defined(IN_RING0)
121/**
122 * Deals with the contended case in ring-3 and ring-0.
123 *
124 * @retval VINF_SUCCESS on success.
125 * @retval VERR_SEM_DESTROYED if destroyed.
126 *
127 * @param pVM The cross context VM structure.
128 * @param pCritSect The critsect.
129 * @param hNativeSelf The native thread handle.
130 * @param pSrcPos The source position of the lock operation.
131 */
132static int pdmR3R0CritSectEnterContended(PVMCC pVM, PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
133{
134 /*
135 * Start waiting.
136 */
137 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
138 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
139# ifdef IN_RING3
140 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
141# else
142 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
143# endif
144
145 /*
146 * The wait loop.
147 */
148 PSUPDRVSESSION pSession = pVM->pSession;
149 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
150# ifdef IN_RING3
151# ifdef PDMCRITSECT_STRICT
152 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
153 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
154 if (RT_FAILURE(rc2))
155 return rc2;
156# else
157 RTTHREAD hThreadSelf = RTThreadSelf();
158# endif
159# endif
160 for (;;)
161 {
162 /*
163 * Do the wait.
164 *
165 * In ring-3 this gets cluttered by lock validation and thread state
166 * maintainence.
167 *
168 * In ring-0 we have to deal with the possibility that the thread has
169 * been signalled and the interruptible wait function returning
170 * immediately. In that case we do normal R0/RC rcBusy handling.
171 */
172# ifdef IN_RING3
173# ifdef PDMCRITSECT_STRICT
174 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
175 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
176 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
177 if (RT_FAILURE(rc9))
178 return rc9;
179# else
180 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
181# endif
182 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
183 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
184# else /* IN_RING0 */
185 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
186# endif /* IN_RING0 */
187
188 /*
189 * Deal with the return code and critsect destruction.
190 */
191 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
192 return VERR_SEM_DESTROYED;
193 if (rc == VINF_SUCCESS)
194 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
195 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
196
197# ifdef IN_RING0
198 /* Something is pending (signal, APC, debugger, whatever), just go back
199 to ring-3 so the kernel can deal with it when leaving kernel context.
200
201 Note! We've incremented cLockers already and cannot safely decrement
202 it without creating a race with PDMCritSectLeave, resulting in
203 spurious wakeups. */
204 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
205 rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL);
206 AssertRC(rc);
207# else
208 RT_NOREF(pVM);
209# endif
210 }
211 /* won't get here */
212}
213#endif /* IN_RING3 || IN_RING0 */
214
215
216/**
217 * Common worker for the debug and normal APIs.
218 *
219 * @returns VINF_SUCCESS if entered successfully.
220 * @returns rcBusy when encountering a busy critical section in GC/R0.
221 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
222 * during the operation.
223 *
224 * @param pVM The cross context VM structure.
225 * @param pCritSect The PDM critical section to enter.
226 * @param rcBusy The status code to return when we're in GC or R0
227 * @param pSrcPos The source position of the lock operation.
228 */
229DECL_FORCE_INLINE(int) pdmCritSectEnter(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
230{
231 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
232 Assert(pCritSect->s.Core.cNestings >= 0);
233
234 /*
235 * If the critical section has already been destroyed, then inform the caller.
236 */
237 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
238 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
239 VERR_SEM_DESTROYED);
240
241 /*
242 * See if we're lucky.
243 */
244 /* NOP ... */
245 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
246 { /* We're more likely to end up here with real critsects than a NOP one. */ }
247 else
248 return VINF_SUCCESS;
249
250 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
251 /* ... not owned ... */
252 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
253 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
254
255 /* ... or nested. */
256 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
257 {
258 Assert(pCritSect->s.Core.cNestings >= 1);
259# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
260 pCritSect->s.Core.cNestings += 1;
261# else
262 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
263# endif
264 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
265 return VINF_SUCCESS;
266 }
267
268 /*
269 * Spin for a bit without incrementing the counter.
270 */
271 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
272 * cpu systems. */
273 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
274 while (cSpinsLeft-- > 0)
275 {
276 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
277 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
278 ASMNopPause();
279 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
280 cli'ed pendingpreemption check up front using sti w/ instruction fusing
281 for avoiding races. Hmm ... This is assuming the other party is actually
282 executing code on another CPU ... which we could keep track of if we
283 wanted. */
284 }
285
286#ifdef IN_RING3
287 /*
288 * Take the slow path.
289 */
290 NOREF(rcBusy);
291 return pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos);
292
293#else
294# ifdef IN_RING0
295 /*
296 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
297 * account when waiting on contended locks.
298 *
299 * While we usually (it can be VINF_SUCCESS) have to option via the rcBusy
300 * parameter of going to back to ring-3 and to re-start the work there, it's
301 * almost always more efficient to try wait for the lock here. The rcBusy
302 * will be used if we encounter an VERR_INTERRUPTED situation though.
303 *
304 * We must never block if VMMRZCallRing3Disable is active.
305 */
306
307 /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
308 * and would be better off switching out of that while waiting for
309 * the lock. Several of the locks jumps back to ring-3 just to
310 * get the lock, the ring-3 code will then call the kernel to do
311 * the lock wait and when the call return it will call ring-0
312 * again and resume via in setjmp style. Not very efficient. */
313# if 0
314 if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
315 * callers not prepared for longjmp/blocking to
316 * use PDMCritSectTryEnter. */
317 {
318 /*
319 * Leave HM context while waiting if necessary.
320 */
321 int rc;
322 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
323 {
324 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000);
325 rc = pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos);
326 }
327 else
328 {
329 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
330 PVMCC pVM = pCritSect->s.CTX_SUFF(pVM);
331 PVMCPUCC pVCpu = VMMGetCpu(pVM);
332 HMR0Leave(pVM, pVCpu);
333 RTThreadPreemptRestore(NIL_RTTHREAD, XXX);
334
335 rc = pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos);
336
337 RTThreadPreemptDisable(NIL_RTTHREAD, XXX);
338 HMR0Enter(pVM, pVCpu);
339 }
340 return rc;
341 }
342# else
343 /*
344 * We preemption hasn't been disabled, we can block here in ring-0.
345 */
346 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
347 && ASMIntAreEnabled())
348 return pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos);
349# endif
350# endif /* IN_RING0 */
351
352 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
353
354 /*
355 * Call ring-3 to acquire the critical section?
356 */
357 if (rcBusy == VINF_SUCCESS)
358 {
359 PVMCPUCC pVCpu = VMMGetCpu(pVM);
360 AssertReturn(pVCpu, VERR_PDM_CRITSECT_IPE);
361 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
362 }
363
364 /*
365 * Return busy.
366 */
367 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
368 return rcBusy;
369#endif /* !IN_RING3 */
370}
371
372
373/**
374 * Enters a PDM critical section.
375 *
376 * @returns VINF_SUCCESS if entered successfully.
377 * @returns rcBusy when encountering a busy critical section in RC/R0.
378 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
379 * during the operation.
380 *
381 * @param pVM The cross context VM structure.
382 * @param pCritSect The PDM critical section to enter.
383 * @param rcBusy The status code to return when we're in RC or R0
384 * and the section is busy. Pass VINF_SUCCESS to
385 * acquired the critical section thru a ring-3
386 * call if necessary.
387 */
388VMMDECL(int) PDMCritSectEnter(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy)
389{
390#ifndef PDMCRITSECT_STRICT
391 return pdmCritSectEnter(pVM, pCritSect, rcBusy, NULL);
392#else
393 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
394 return pdmCritSectEnter(pVM, pCritSect, rcBusy, &SrcPos);
395#endif
396}
397
398
399/**
400 * Enters a PDM critical section, with location information for debugging.
401 *
402 * @returns VINF_SUCCESS if entered successfully.
403 * @returns rcBusy when encountering a busy critical section in RC/R0.
404 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
405 * during the operation.
406 *
407 * @param pVM The cross context VM structure.
408 * @param pCritSect The PDM critical section to enter.
409 * @param rcBusy The status code to return when we're in RC or R0
410 * and the section is busy. Pass VINF_SUCCESS to
411 * acquired the critical section thru a ring-3
412 * call if necessary.
413 * @param uId Some kind of locking location ID. Typically a
414 * return address up the stack. Optional (0).
415 * @param SRC_POS The source position where to lock is being
416 * acquired from. Optional.
417 */
418VMMDECL(int) PDMCritSectEnterDebug(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
419{
420#ifdef PDMCRITSECT_STRICT
421 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
422 return pdmCritSectEnter(pVM, pCritSect, rcBusy, &SrcPos);
423#else
424 NOREF(uId); RT_SRC_POS_NOREF();
425 return pdmCritSectEnter(pVM, pCritSect, rcBusy, NULL);
426#endif
427}
428
429
430/**
431 * Common worker for the debug and normal APIs.
432 *
433 * @retval VINF_SUCCESS on success.
434 * @retval VERR_SEM_BUSY if the critsect was owned.
435 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
436 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
437 * during the operation.
438 *
439 * @param pVM The cross context VM structure.
440 * @param pCritSect The critical section.
441 * @param pSrcPos The source position of the lock operation.
442 */
443static int pdmCritSectTryEnter(PVMCC pVM, PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
444{
445 /*
446 * If the critical section has already been destroyed, then inform the caller.
447 */
448 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
449 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
450 VERR_SEM_DESTROYED);
451
452 /*
453 * See if we're lucky.
454 */
455 /* NOP ... */
456 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
457 { /* We're more likely to end up here with real critsects than a NOP one. */ }
458 else
459 return VINF_SUCCESS;
460
461 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
462 /* ... not owned ... */
463 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
464 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
465
466 /* ... or nested. */
467 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
468 {
469 Assert(pCritSect->s.Core.cNestings >= 1);
470# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
471 pCritSect->s.Core.cNestings += 1;
472# else
473 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
474# endif
475 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
476 return VINF_SUCCESS;
477 }
478
479 /* no spinning */
480
481 /*
482 * Return busy.
483 */
484#ifdef IN_RING3
485 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
486#else
487 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
488#endif
489 LogFlow(("PDMCritSectTryEnter: locked\n"));
490 return VERR_SEM_BUSY;
491}
492
493
494/**
495 * Try enter a critical section.
496 *
497 * @retval VINF_SUCCESS on success.
498 * @retval VERR_SEM_BUSY if the critsect was owned.
499 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
500 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
501 * during the operation.
502 *
503 * @param pVM The cross context VM structure.
504 * @param pCritSect The critical section.
505 */
506VMMDECL(int) PDMCritSectTryEnter(PVMCC pVM, PPDMCRITSECT pCritSect)
507{
508#ifndef PDMCRITSECT_STRICT
509 return pdmCritSectTryEnter(pVM, pCritSect, NULL);
510#else
511 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
512 return pdmCritSectTryEnter(pVM, pCritSect, &SrcPos);
513#endif
514}
515
516
517/**
518 * Try enter a critical section, with location information for debugging.
519 *
520 * @retval VINF_SUCCESS on success.
521 * @retval VERR_SEM_BUSY if the critsect was owned.
522 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
523 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
524 * during the operation.
525 *
526 * @param pVM The cross context VM structure.
527 * @param pCritSect The critical section.
528 * @param uId Some kind of locking location ID. Typically a
529 * return address up the stack. Optional (0).
530 * @param SRC_POS The source position where to lock is being
531 * acquired from. Optional.
532 */
533VMMDECL(int) PDMCritSectTryEnterDebug(PVMCC pVM, PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
534{
535#ifdef PDMCRITSECT_STRICT
536 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
537 return pdmCritSectTryEnter(pVM, pCritSect, &SrcPos);
538#else
539 NOREF(uId); RT_SRC_POS_NOREF();
540 return pdmCritSectTryEnter(pVM, pCritSect, NULL);
541#endif
542}
543
544
545#ifdef IN_RING3
546/**
547 * Enters a PDM critical section.
548 *
549 * @returns VINF_SUCCESS if entered successfully.
550 * @returns rcBusy when encountering a busy critical section in GC/R0.
551 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
552 * during the operation.
553 *
554 * @param pVM The cross context VM structure.
555 * @param pCritSect The PDM critical section to enter.
556 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
557 */
558VMMR3DECL(int) PDMR3CritSectEnterEx(PVM pVM, PPDMCRITSECT pCritSect, bool fCallRing3)
559{
560 int rc = PDMCritSectEnter(pVM, pCritSect, VERR_IGNORED);
561 if ( rc == VINF_SUCCESS
562 && fCallRing3
563 && pCritSect->s.Core.pValidatorRec
564 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
565 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
566 return rc;
567}
568#endif /* IN_RING3 */
569
570
571/**
572 * Leaves a critical section entered with PDMCritSectEnter().
573 *
574 * @returns Indication whether we really exited the critical section.
575 * @retval VINF_SUCCESS if we really exited.
576 * @retval VINF_SEM_NESTED if we only reduced the nesting count.
577 * @retval VERR_NOT_OWNER if you somehow ignore release assertions.
578 *
579 * @param pVM The cross context VM structure.
580 * @param pCritSect The PDM critical section to leave.
581 */
582VMMDECL(int) PDMCritSectLeave(PVMCC pVM, PPDMCRITSECT pCritSect)
583{
584 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
585 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
586
587 /* Check for NOP sections before asserting ownership. */
588 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
589 { /* We're more likely to end up here with real critsects than a NOP one. */ }
590 else
591 return VINF_SUCCESS;
592
593 /*
594 * Always check that the caller is the owner (screw performance).
595 */
596 RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
597 AssertReleaseMsgReturn(pCritSect->s.Core.NativeThreadOwner == hNativeSelf,
598 ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
599 pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
600 pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings),
601 VERR_NOT_OWNER);
602
603 /*
604 * Nested leave.
605 */
606 int32_t const cNestings = pCritSect->s.Core.cNestings;
607 Assert(cNestings >= 1);
608 if (cNestings > 1)
609 {
610# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
611 pCritSect->s.Core.cNestings = cNestings - 1;
612# else
613 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, cNestings - 1);
614# endif
615 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
616 Assert(pCritSect->s.Core.cLockers >= 0);
617 return VINF_SEM_NESTED;
618 }
619
620#ifdef IN_RING0
621# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
622 if (1) /* SUPSemEventSignal is safe */
623# else
624 if (ASMIntAreEnabled())
625# endif
626#endif
627#if defined(IN_RING3) || defined(IN_RING0)
628 {
629 /*
630 * Leave for real.
631 */
632 /* update members. */
633 SUPSEMEVENT hEventToSignal = pCritSect->s.hEventToSignal;
634 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
635# ifdef IN_RING3
636# if defined(PDMCRITSECT_STRICT)
637 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
638 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
639# endif
640 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
641# endif
642# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
643 //pCritSect->s.Core.cNestings = 0; /* not really needed */
644 pCritSect->s.Core.NativeThreadOwner = NIL_RTNATIVETHREAD;
645# else
646 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
647 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
648# endif
649 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
650
651 /* stop and decrement lockers. */
652 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
653 ASMCompilerBarrier();
654 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) < 0)
655 { /* hopefully likely */ }
656 else
657 {
658 /* Someone is waiting, wake up one of them. */
659 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
660 PSUPDRVSESSION pSession = pVM->pSession;
661 int rc = SUPSemEventSignal(pSession, hEvent);
662 AssertRC(rc);
663 }
664
665 /* Signal exit event. */
666 if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
667 { /* likely */ }
668 else
669 {
670 Log8(("Signalling %#p\n", hEventToSignal));
671 int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
672 AssertRC(rc);
673 }
674
675# if defined(DEBUG_bird) && defined(IN_RING0)
676 VMMTrashVolatileXMMRegs();
677# endif
678 }
679#endif /* IN_RING3 || IN_RING0 */
680#ifdef IN_RING0
681 else
682#endif
683#if defined(IN_RING0) || defined(IN_RC)
684 {
685 /*
686 * Try leave it.
687 */
688 if (pCritSect->s.Core.cLockers == 0)
689 {
690# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
691 //pCritSect->s.Core.cNestings = 0; /* not really needed */
692# else
693 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
694# endif
695 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
696 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
697 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
698
699 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
700 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
701 return VINF_SUCCESS;
702
703 /* darn, someone raced in on us. */
704 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
705 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
706# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
707 //pCritSect->s.Core.cNestings = 1;
708 Assert(pCritSect->s.Core.cNestings == 1);
709# else
710 //Assert(pCritSect->s.Core.cNestings == 0);
711 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
712# endif
713 }
714 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
715
716 /*
717 * Queue the request.
718 */
719 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
720 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
721 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
722 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
723 pVCpu->pdm.s.apQueuedCritSectLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
724 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
725 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
726 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
727 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
728 }
729#endif /* IN_RING0 || IN_RC */
730
731 return VINF_SUCCESS;
732}
733
734
735#if defined(IN_RING0) || defined(IN_RING3)
736/**
737 * Schedule a event semaphore for signalling upon critsect exit.
738 *
739 * @returns VINF_SUCCESS on success.
740 * @returns VERR_TOO_MANY_SEMAPHORES if an event was already scheduled.
741 * @returns VERR_NOT_OWNER if we're not the critsect owner (ring-3 only).
742 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
743 *
744 * @param pCritSect The critical section.
745 * @param hEventToSignal The support driver event semaphore that should be
746 * signalled.
747 */
748VMMDECL(int) PDMHCCritSectScheduleExitEvent(PPDMCRITSECT pCritSect, SUPSEMEVENT hEventToSignal)
749{
750 AssertPtr(pCritSect);
751 Assert(!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP));
752 Assert(hEventToSignal != NIL_SUPSEMEVENT);
753# ifdef IN_RING3
754 if (RT_UNLIKELY(!RTCritSectIsOwner(&pCritSect->s.Core)))
755 return VERR_NOT_OWNER;
756# endif
757 if (RT_LIKELY( pCritSect->s.hEventToSignal == NIL_RTSEMEVENT
758 || pCritSect->s.hEventToSignal == hEventToSignal))
759 {
760 pCritSect->s.hEventToSignal = hEventToSignal;
761 return VINF_SUCCESS;
762 }
763 return VERR_TOO_MANY_SEMAPHORES;
764}
765#endif /* IN_RING0 || IN_RING3 */
766
767
768/**
769 * Checks the caller is the owner of the critical section.
770 *
771 * @returns true if owner.
772 * @returns false if not owner.
773 * @param pVM The cross context VM structure.
774 * @param pCritSect The critical section.
775 */
776VMMDECL(bool) PDMCritSectIsOwner(PVMCC pVM, PCPDMCRITSECT pCritSect)
777{
778#ifdef IN_RING3
779 RT_NOREF(pVM);
780 return RTCritSectIsOwner(&pCritSect->s.Core);
781#else
782 PVMCPUCC pVCpu = VMMGetCpu(pVM);
783 if ( !pVCpu
784 || pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
785 return false;
786 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
787 || pCritSect->s.Core.cNestings > 1;
788#endif
789}
790
791
792/**
793 * Checks the specified VCPU is the owner of the critical section.
794 *
795 * @returns true if owner.
796 * @returns false if not owner.
797 * @param pVCpu The cross context virtual CPU structure.
798 * @param pCritSect The critical section.
799 */
800VMMDECL(bool) PDMCritSectIsOwnerEx(PVMCPUCC pVCpu, PCPDMCRITSECT pCritSect)
801{
802#ifdef IN_RING3
803 NOREF(pVCpu);
804 return RTCritSectIsOwner(&pCritSect->s.Core);
805#else
806 Assert(VMCC_GET_CPU(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu) == pVCpu);
807 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
808 return false;
809 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
810 || pCritSect->s.Core.cNestings > 1;
811#endif
812}
813
814
815/**
816 * Checks if anyone is waiting on the critical section we own.
817 *
818 * @returns true if someone is waiting.
819 * @returns false if no one is waiting.
820 * @param pVM The cross context VM structure.
821 * @param pCritSect The critical section.
822 */
823VMMDECL(bool) PDMCritSectHasWaiters(PVMCC pVM, PCPDMCRITSECT pCritSect)
824{
825 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
826 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pVM, pCritSect)); RT_NOREF(pVM);
827 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
828}
829
830
831/**
832 * Checks if a critical section is initialized or not.
833 *
834 * @returns true if initialized.
835 * @returns false if not initialized.
836 * @param pCritSect The critical section.
837 */
838VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
839{
840 return RTCritSectIsInitialized(&pCritSect->s.Core);
841}
842
843
844/**
845 * Gets the recursion depth.
846 *
847 * @returns The recursion depth.
848 * @param pCritSect The critical section.
849 */
850VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
851{
852 return RTCritSectGetRecursion(&pCritSect->s.Core);
853}
854
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette