VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 90347

Last change on this file since 90347 was 90346, checked in by vboxsync, 3 years ago
  • VMM: Pass pVM to PDMCritSect APIs. bugref:9218 bugref:10074
  • DrvNetShaper: Do bandwidth allocation via PDMDrvHlp. bugref:10074
  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 30.7 KB
Line 
1/* $Id: PDMAllCritSect.cpp 90346 2021-07-26 19:55:53Z vboxsync $ */
2/** @file
3 * PDM - Write-Only Critical Section, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsect.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vmcc.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** The number loops to spin for in ring-3. */
48#define PDMCRITSECT_SPIN_COUNT_R3 20
49/** The number loops to spin for in ring-0. */
50#define PDMCRITSECT_SPIN_COUNT_R0 256
51/** The number loops to spin for in the raw-mode context. */
52#define PDMCRITSECT_SPIN_COUNT_RC 256
53
54
55/** Skips some of the overly paranoid atomic updates.
56 * Makes some assumptions about cache coherence, though not brave enough not to
57 * always end with an atomic update. */
58#define PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
59
60/* Undefine the automatic VBOX_STRICT API mappings. */
61#undef PDMCritSectEnter
62#undef PDMCritSectTryEnter
63
64
65/**
66 * Gets the ring-3 native thread handle of the calling thread.
67 *
68 * @returns native thread handle (ring-3).
69 * @param pVM The cross context VM structure.
70 * @param pCritSect The critical section. This is used in R0 and RC.
71 */
72DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PVMCC pVM, PCPDMCRITSECT pCritSect)
73{
74#ifdef IN_RING3
75 RT_NOREF(pVM, pCritSect);
76 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
77#else
78 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
79 NIL_RTNATIVETHREAD);
80 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
81 RTNATIVETHREAD hNativeSelf = pVCpu ? pVCpu->hNativeThread : NIL_RTNATIVETHREAD; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
82#endif
83 return hNativeSelf;
84}
85
86
87/**
88 * Tail code called when we've won the battle for the lock.
89 *
90 * @returns VINF_SUCCESS.
91 *
92 * @param pCritSect The critical section.
93 * @param hNativeSelf The native handle of this thread.
94 * @param pSrcPos The source position of the lock operation.
95 */
96DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
97{
98 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
99 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
100
101# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
102 pCritSect->s.Core.cNestings = 1;
103# else
104 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
105# endif
106 Assert(pCritSect->s.Core.cNestings == 1);
107 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
108
109# ifdef PDMCRITSECT_STRICT
110 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
111# else
112 NOREF(pSrcPos);
113# endif
114
115 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
116 return VINF_SUCCESS;
117}
118
119
120#if defined(IN_RING3) || defined(IN_RING0)
121/**
122 * Deals with the contended case in ring-3 and ring-0.
123 *
124 * @retval VINF_SUCCESS on success.
125 * @retval VERR_SEM_DESTROYED if destroyed.
126 *
127 * @param pVM The cross context VM structure.
128 * @param pCritSect The critsect.
129 * @param hNativeSelf The native thread handle.
130 * @param pSrcPos The source position of the lock operation.
131 */
132static int pdmR3R0CritSectEnterContended(PVMCC pVM, PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
133{
134 /*
135 * Start waiting.
136 */
137 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
138 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
139# ifdef IN_RING3
140 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
141# else
142 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
143# endif
144
145 /*
146 * The wait loop.
147 */
148 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
149 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
150# ifdef IN_RING3
151# ifdef PDMCRITSECT_STRICT
152 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
153 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
154 if (RT_FAILURE(rc2))
155 return rc2;
156# else
157 RTTHREAD hThreadSelf = RTThreadSelf();
158# endif
159# endif
160 for (;;)
161 {
162 /*
163 * Do the wait.
164 *
165 * In ring-3 this gets cluttered by lock validation and thread state
166 * maintainence.
167 *
168 * In ring-0 we have to deal with the possibility that the thread has
169 * been signalled and the interruptible wait function returning
170 * immediately. In that case we do normal R0/RC rcBusy handling.
171 */
172# ifdef IN_RING3
173# ifdef PDMCRITSECT_STRICT
174 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
175 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
176 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
177 if (RT_FAILURE(rc9))
178 return rc9;
179# else
180 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
181# endif
182 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
183 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
184# else /* IN_RING0 */
185 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
186# endif /* IN_RING0 */
187
188 /*
189 * Deal with the return code and critsect destruction.
190 */
191 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
192 return VERR_SEM_DESTROYED;
193 if (rc == VINF_SUCCESS)
194 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
195 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
196
197# ifdef IN_RING0
198 /* Something is pending (signal, APC, debugger, whatever), just go back
199 to ring-3 so the kernel can deal with it when leaving kernel context.
200
201 Note! We've incremented cLockers already and cannot safely decrement
202 it without creating a race with PDMCritSectLeave, resulting in
203 spurious wakeups. */
204 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
205 rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL);
206 AssertRC(rc);
207# else
208 RT_NOREF(pVM);
209# endif
210 }
211 /* won't get here */
212}
213#endif /* IN_RING3 || IN_RING0 */
214
215
216/**
217 * Common worker for the debug and normal APIs.
218 *
219 * @returns VINF_SUCCESS if entered successfully.
220 * @returns rcBusy when encountering a busy critical section in GC/R0.
221 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
222 * during the operation.
223 *
224 * @param pVM The cross context VM structure.
225 * @param pCritSect The PDM critical section to enter.
226 * @param rcBusy The status code to return when we're in GC or R0
227 * @param pSrcPos The source position of the lock operation.
228 */
229DECL_FORCE_INLINE(int) pdmCritSectEnter(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
230{
231 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
232 Assert(pCritSect->s.Core.cNestings >= 0);
233
234 /*
235 * If the critical section has already been destroyed, then inform the caller.
236 */
237 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
238 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
239 VERR_SEM_DESTROYED);
240
241 /*
242 * See if we're lucky.
243 */
244 /* NOP ... */
245 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
246 { /* We're more likely to end up here with real critsects than a NOP one. */ }
247 else
248 return VINF_SUCCESS;
249
250 Assert(pCritSect->s.CTX_SUFF(pVM) == pVM); RT_NOREF(pVM);
251 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
252 /* ... not owned ... */
253 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
254 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
255
256 /* ... or nested. */
257 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
258 {
259 Assert(pCritSect->s.Core.cNestings >= 1);
260# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
261 pCritSect->s.Core.cNestings += 1;
262# else
263 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
264# endif
265 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
266 return VINF_SUCCESS;
267 }
268
269 /*
270 * Spin for a bit without incrementing the counter.
271 */
272 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
273 * cpu systems. */
274 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
275 while (cSpinsLeft-- > 0)
276 {
277 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
278 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
279 ASMNopPause();
280 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
281 cli'ed pendingpreemption check up front using sti w/ instruction fusing
282 for avoiding races. Hmm ... This is assuming the other party is actually
283 executing code on another CPU ... which we could keep track of if we
284 wanted. */
285 }
286
287#ifdef IN_RING3
288 /*
289 * Take the slow path.
290 */
291 NOREF(rcBusy);
292 return pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos);
293
294#else
295# ifdef IN_RING0
296 /*
297 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
298 * account when waiting on contended locks.
299 *
300 * While we usually (it can be VINF_SUCCESS) have to option via the rcBusy
301 * parameter of going to back to ring-3 and to re-start the work there, it's
302 * almost always more efficient to try wait for the lock here. The rcBusy
303 * will be used if we encounter an VERR_INTERRUPTED situation though.
304 *
305 * We must never block if VMMRZCallRing3Disable is active.
306 */
307
308 /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
309 * and would be better off switching out of that while waiting for
310 * the lock. Several of the locks jumps back to ring-3 just to
311 * get the lock, the ring-3 code will then call the kernel to do
312 * the lock wait and when the call return it will call ring-0
313 * again and resume via in setjmp style. Not very efficient. */
314# if 0
315 if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
316 * callers not prepared for longjmp/blocking to
317 * use PDMCritSectTryEnter. */
318 {
319 /*
320 * Leave HM context while waiting if necessary.
321 */
322 int rc;
323 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
324 {
325 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000);
326 rc = pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos);
327 }
328 else
329 {
330 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
331 PVMCC pVM = pCritSect->s.CTX_SUFF(pVM);
332 PVMCPUCC pVCpu = VMMGetCpu(pVM);
333 HMR0Leave(pVM, pVCpu);
334 RTThreadPreemptRestore(NIL_RTTHREAD, XXX);
335
336 rc = pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos);
337
338 RTThreadPreemptDisable(NIL_RTTHREAD, XXX);
339 HMR0Enter(pVM, pVCpu);
340 }
341 return rc;
342 }
343# else
344 /*
345 * We preemption hasn't been disabled, we can block here in ring-0.
346 */
347 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
348 && ASMIntAreEnabled())
349 return pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos);
350# endif
351# endif /* IN_RING0 */
352
353 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
354
355 /*
356 * Call ring-3 to acquire the critical section?
357 */
358 if (rcBusy == VINF_SUCCESS)
359 {
360 PVMCPUCC pVCpu = VMMGetCpu(pVM);
361 AssertReturn(pVCpu, VERR_PDM_CRITSECT_IPE);
362 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
363 }
364
365 /*
366 * Return busy.
367 */
368 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
369 return rcBusy;
370#endif /* !IN_RING3 */
371}
372
373
374/**
375 * Enters a PDM critical section.
376 *
377 * @returns VINF_SUCCESS if entered successfully.
378 * @returns rcBusy when encountering a busy critical section in RC/R0.
379 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
380 * during the operation.
381 *
382 * @param pVM The cross context VM structure.
383 * @param pCritSect The PDM critical section to enter.
384 * @param rcBusy The status code to return when we're in RC or R0
385 * and the section is busy. Pass VINF_SUCCESS to
386 * acquired the critical section thru a ring-3
387 * call if necessary.
388 */
389VMMDECL(int) PDMCritSectEnter(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy)
390{
391#ifndef PDMCRITSECT_STRICT
392 return pdmCritSectEnter(pVM, pCritSect, rcBusy, NULL);
393#else
394 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
395 return pdmCritSectEnter(pVM, pCritSect, rcBusy, &SrcPos);
396#endif
397}
398
399
400/**
401 * Enters a PDM critical section, with location information for debugging.
402 *
403 * @returns VINF_SUCCESS if entered successfully.
404 * @returns rcBusy when encountering a busy critical section in RC/R0.
405 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
406 * during the operation.
407 *
408 * @param pVM The cross context VM structure.
409 * @param pCritSect The PDM critical section to enter.
410 * @param rcBusy The status code to return when we're in RC or R0
411 * and the section is busy. Pass VINF_SUCCESS to
412 * acquired the critical section thru a ring-3
413 * call if necessary.
414 * @param uId Some kind of locking location ID. Typically a
415 * return address up the stack. Optional (0).
416 * @param SRC_POS The source position where to lock is being
417 * acquired from. Optional.
418 */
419VMMDECL(int) PDMCritSectEnterDebug(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
420{
421#ifdef PDMCRITSECT_STRICT
422 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
423 return pdmCritSectEnter(pVM, pCritSect, rcBusy, &SrcPos);
424#else
425 NOREF(uId); RT_SRC_POS_NOREF();
426 return pdmCritSectEnter(pVM, pCritSect, rcBusy, NULL);
427#endif
428}
429
430
431/**
432 * Common worker for the debug and normal APIs.
433 *
434 * @retval VINF_SUCCESS on success.
435 * @retval VERR_SEM_BUSY if the critsect was owned.
436 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
437 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
438 * during the operation.
439 *
440 * @param pVM The cross context VM structure.
441 * @param pCritSect The critical section.
442 * @param pSrcPos The source position of the lock operation.
443 */
444static int pdmCritSectTryEnter(PVMCC pVM, PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
445{
446 /*
447 * If the critical section has already been destroyed, then inform the caller.
448 */
449 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
450 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
451 VERR_SEM_DESTROYED);
452
453 /*
454 * See if we're lucky.
455 */
456 /* NOP ... */
457 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
458 { /* We're more likely to end up here with real critsects than a NOP one. */ }
459 else
460 return VINF_SUCCESS;
461
462 Assert(pCritSect->s.CTX_SUFF(pVM) == pVM);
463 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
464 /* ... not owned ... */
465 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
466 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
467
468 /* ... or nested. */
469 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
470 {
471 Assert(pCritSect->s.Core.cNestings >= 1);
472# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
473 pCritSect->s.Core.cNestings += 1;
474# else
475 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
476# endif
477 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
478 return VINF_SUCCESS;
479 }
480
481 /* no spinning */
482
483 /*
484 * Return busy.
485 */
486#ifdef IN_RING3
487 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
488#else
489 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
490#endif
491 LogFlow(("PDMCritSectTryEnter: locked\n"));
492 return VERR_SEM_BUSY;
493}
494
495
496/**
497 * Try enter a critical section.
498 *
499 * @retval VINF_SUCCESS on success.
500 * @retval VERR_SEM_BUSY if the critsect was owned.
501 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
502 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
503 * during the operation.
504 *
505 * @param pVM The cross context VM structure.
506 * @param pCritSect The critical section.
507 */
508VMMDECL(int) PDMCritSectTryEnter(PVMCC pVM, PPDMCRITSECT pCritSect)
509{
510#ifndef PDMCRITSECT_STRICT
511 return pdmCritSectTryEnter(pVM, pCritSect, NULL);
512#else
513 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
514 return pdmCritSectTryEnter(pVM, pCritSect, &SrcPos);
515#endif
516}
517
518
519/**
520 * Try enter a critical section, with location information for debugging.
521 *
522 * @retval VINF_SUCCESS on success.
523 * @retval VERR_SEM_BUSY if the critsect was owned.
524 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
525 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
526 * during the operation.
527 *
528 * @param pVM The cross context VM structure.
529 * @param pCritSect The critical section.
530 * @param uId Some kind of locking location ID. Typically a
531 * return address up the stack. Optional (0).
532 * @param SRC_POS The source position where to lock is being
533 * acquired from. Optional.
534 */
535VMMDECL(int) PDMCritSectTryEnterDebug(PVMCC pVM, PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
536{
537#ifdef PDMCRITSECT_STRICT
538 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
539 return pdmCritSectTryEnter(pVM, pCritSect, &SrcPos);
540#else
541 NOREF(uId); RT_SRC_POS_NOREF();
542 return pdmCritSectTryEnter(pVM, pCritSect, NULL);
543#endif
544}
545
546
547#ifdef IN_RING3
548/**
549 * Enters a PDM critical section.
550 *
551 * @returns VINF_SUCCESS if entered successfully.
552 * @returns rcBusy when encountering a busy critical section in GC/R0.
553 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
554 * during the operation.
555 *
556 * @param pVM The cross context VM structure.
557 * @param pCritSect The PDM critical section to enter.
558 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
559 */
560VMMR3DECL(int) PDMR3CritSectEnterEx(PVM pVM, PPDMCRITSECT pCritSect, bool fCallRing3)
561{
562 int rc = PDMCritSectEnter(pVM, pCritSect, VERR_IGNORED);
563 if ( rc == VINF_SUCCESS
564 && fCallRing3
565 && pCritSect->s.Core.pValidatorRec
566 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
567 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
568 return rc;
569}
570#endif /* IN_RING3 */
571
572
573/**
574 * Leaves a critical section entered with PDMCritSectEnter().
575 *
576 * @returns Indication whether we really exited the critical section.
577 * @retval VINF_SUCCESS if we really exited.
578 * @retval VINF_SEM_NESTED if we only reduced the nesting count.
579 * @retval VERR_NOT_OWNER if you somehow ignore release assertions.
580 *
581 * @param pVM The cross context VM structure.
582 * @param pCritSect The PDM critical section to leave.
583 */
584VMMDECL(int) PDMCritSectLeave(PVMCC pVM, PPDMCRITSECT pCritSect)
585{
586 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
587 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
588
589 /* Check for NOP sections before asserting ownership. */
590 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
591 { /* We're more likely to end up here with real critsects than a NOP one. */ }
592 else
593 return VINF_SUCCESS;
594
595 /*
596 * Always check that the caller is the owner (screw performance).
597 */
598 Assert(pCritSect->s.CTX_SUFF(pVM) == pVM); RT_NOREF(pVM);
599 RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
600 AssertReleaseMsgReturn(pCritSect->s.Core.NativeThreadOwner == hNativeSelf,
601 ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
602 pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
603 pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings),
604 VERR_NOT_OWNER);
605
606 /*
607 * Nested leave.
608 */
609 int32_t const cNestings = pCritSect->s.Core.cNestings;
610 Assert(cNestings >= 1);
611 if (cNestings > 1)
612 {
613# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
614 pCritSect->s.Core.cNestings = cNestings - 1;
615# else
616 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, cNestings - 1);
617# endif
618 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
619 Assert(pCritSect->s.Core.cLockers >= 0);
620 return VINF_SEM_NESTED;
621 }
622
623#ifdef IN_RING0
624# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
625 if (1) /* SUPSemEventSignal is safe */
626# else
627 if (ASMIntAreEnabled())
628# endif
629#endif
630#if defined(IN_RING3) || defined(IN_RING0)
631 {
632 /*
633 * Leave for real.
634 */
635 /* update members. */
636 SUPSEMEVENT hEventToSignal = pCritSect->s.hEventToSignal;
637 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
638# ifdef IN_RING3
639# if defined(PDMCRITSECT_STRICT)
640 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
641 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
642# endif
643 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
644# endif
645# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
646 //pCritSect->s.Core.cNestings = 0; /* not really needed */
647 pCritSect->s.Core.NativeThreadOwner = NIL_RTNATIVETHREAD;
648# else
649 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
650 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
651# endif
652 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
653
654 /* stop and decrement lockers. */
655 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
656 ASMCompilerBarrier();
657 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) < 0)
658 { /* hopefully likely */ }
659 else
660 {
661 /* Someone is waiting, wake up one of them. */
662 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
663 PSUPDRVSESSION pSession = pVM->pSession;
664 int rc = SUPSemEventSignal(pSession, hEvent);
665 AssertRC(rc);
666 }
667
668 /* Signal exit event. */
669 if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
670 { /* likely */ }
671 else
672 {
673 Log8(("Signalling %#p\n", hEventToSignal));
674 int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
675 AssertRC(rc);
676 }
677
678# if defined(DEBUG_bird) && defined(IN_RING0)
679 VMMTrashVolatileXMMRegs();
680# endif
681 }
682#endif /* IN_RING3 || IN_RING0 */
683#ifdef IN_RING0
684 else
685#endif
686#if defined(IN_RING0) || defined(IN_RC)
687 {
688 /*
689 * Try leave it.
690 */
691 if (pCritSect->s.Core.cLockers == 0)
692 {
693# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
694 //pCritSect->s.Core.cNestings = 0; /* not really needed */
695# else
696 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
697# endif
698 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
699 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
700 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
701
702 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
703 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
704 return VINF_SUCCESS;
705
706 /* darn, someone raced in on us. */
707 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
708 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
709# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
710 //pCritSect->s.Core.cNestings = 1;
711 Assert(pCritSect->s.Core.cNestings == 1);
712# else
713 //Assert(pCritSect->s.Core.cNestings == 0);
714 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
715# endif
716 }
717 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
718
719 /*
720 * Queue the request.
721 */
722 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
723 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
724 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
725 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
726 pVCpu->pdm.s.apQueuedCritSectLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
727 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
728 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
729 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
730 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
731 }
732#endif /* IN_RING0 || IN_RC */
733
734 return VINF_SUCCESS;
735}
736
737
738#if defined(IN_RING0) || defined(IN_RING3)
739/**
740 * Schedule a event semaphore for signalling upon critsect exit.
741 *
742 * @returns VINF_SUCCESS on success.
743 * @returns VERR_TOO_MANY_SEMAPHORES if an event was already scheduled.
744 * @returns VERR_NOT_OWNER if we're not the critsect owner (ring-3 only).
745 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
746 *
747 * @param pCritSect The critical section.
748 * @param hEventToSignal The support driver event semaphore that should be
749 * signalled.
750 */
751VMMDECL(int) PDMHCCritSectScheduleExitEvent(PPDMCRITSECT pCritSect, SUPSEMEVENT hEventToSignal)
752{
753 AssertPtr(pCritSect);
754 Assert(!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP));
755 Assert(hEventToSignal != NIL_SUPSEMEVENT);
756# ifdef IN_RING3
757 if (RT_UNLIKELY(!RTCritSectIsOwner(&pCritSect->s.Core)))
758 return VERR_NOT_OWNER;
759# endif
760 if (RT_LIKELY( pCritSect->s.hEventToSignal == NIL_RTSEMEVENT
761 || pCritSect->s.hEventToSignal == hEventToSignal))
762 {
763 pCritSect->s.hEventToSignal = hEventToSignal;
764 return VINF_SUCCESS;
765 }
766 return VERR_TOO_MANY_SEMAPHORES;
767}
768#endif /* IN_RING0 || IN_RING3 */
769
770
771/**
772 * Checks the caller is the owner of the critical section.
773 *
774 * @returns true if owner.
775 * @returns false if not owner.
776 * @param pVM The cross context VM structure.
777 * @param pCritSect The critical section.
778 */
779VMMDECL(bool) PDMCritSectIsOwner(PVMCC pVM, PCPDMCRITSECT pCritSect)
780{
781#ifdef IN_RING3
782 RT_NOREF(pVM);
783 return RTCritSectIsOwner(&pCritSect->s.Core);
784#else
785 PVMCPUCC pVCpu = VMMGetCpu(pVM);
786 if ( !pVCpu
787 || pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
788 return false;
789 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
790 || pCritSect->s.Core.cNestings > 1;
791#endif
792}
793
794
795/**
796 * Checks the specified VCPU is the owner of the critical section.
797 *
798 * @returns true if owner.
799 * @returns false if not owner.
800 * @param pVCpu The cross context virtual CPU structure.
801 * @param pCritSect The critical section.
802 */
803VMMDECL(bool) PDMCritSectIsOwnerEx(PVMCPUCC pVCpu, PCPDMCRITSECT pCritSect)
804{
805#ifdef IN_RING3
806 NOREF(pVCpu);
807 return RTCritSectIsOwner(&pCritSect->s.Core);
808#else
809 Assert(VMCC_GET_CPU(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu) == pVCpu);
810 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
811 return false;
812 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
813 || pCritSect->s.Core.cNestings > 1;
814#endif
815}
816
817
818/**
819 * Checks if anyone is waiting on the critical section we own.
820 *
821 * @returns true if someone is waiting.
822 * @returns false if no one is waiting.
823 * @param pVM The cross context VM structure.
824 * @param pCritSect The critical section.
825 */
826VMMDECL(bool) PDMCritSectHasWaiters(PVMCC pVM, PCPDMCRITSECT pCritSect)
827{
828 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
829 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pVM, pCritSect)); RT_NOREF(pVM);
830 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
831}
832
833
834/**
835 * Checks if a critical section is initialized or not.
836 *
837 * @returns true if initialized.
838 * @returns false if not initialized.
839 * @param pCritSect The critical section.
840 */
841VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
842{
843 return RTCritSectIsInitialized(&pCritSect->s.Core);
844}
845
846
847/**
848 * Gets the recursion depth.
849 *
850 * @returns The recursion depth.
851 * @param pCritSect The critical section.
852 */
853VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
854{
855 return RTCritSectGetRecursion(&pCritSect->s.Core);
856}
857
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette