VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 20776

Last change on this file since 20776 was 20755, checked in by vboxsync, 16 years ago

PDMAllCritSect: try some serious paranoia and see if it makes any changes for #3992 (it should not, if it does, then we are in trouble in IPRT).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 16.8 KB
Line 
1/* $Id: PDMAllCritSect.cpp 20755 2009-06-21 23:41:32Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
27#include "PDMInternal.h"
28#include <VBox/pdmcritsect.h>
29#include <VBox/mm.h>
30#include <VBox/vm.h>
31#include <VBox/err.h>
32#include <VBox/hwaccm.h>
33
34#include <VBox/log.h>
35#include <iprt/asm.h>
36#include <iprt/assert.h>
37#ifdef IN_RING3
38# include <iprt/semaphore.h>
39#endif
40
41
42/*******************************************************************************
43* Defined Constants And Macros *
44*******************************************************************************/
45/** The number loops to spin for in ring-3. */
46#define PDMCRITSECT_SPIN_COUNT_R3 20
47/** The number loops to spin for in ring-0. */
48#define PDMCRITSECT_SPIN_COUNT_R0 256
49/** The number loops to spin for in the raw-mode context. */
50#define PDMCRITSECT_SPIN_COUNT_RC 256
51
52/** @def PDMCRITSECT_STRICT
53 * Enables/disables PDM critsect strictness like deadlock detection. */
54#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
55# define PDMCRITSECT_STRICT
56#endif
57
58
59/**
60 * Gets the ring-3 native thread handle of the calling thread.
61 *
62 * @returns native thread handle (ring-3).
63 * @param pCritSect The critical section. This is used in R0 and RC.
64 */
65DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PPDMCRITSECT pCritSect)
66{
67#ifdef IN_RING3
68 NOREF(pCritSect);
69 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
70#else
71 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
72 VERR_SEM_DESTROYED);
73 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
74 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
75 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
76#endif
77 return hNativeSelf;
78}
79
80
81/**
82 * Tail code called when we've wont the battle for the lock.
83 *
84 * @returns VINF_SUCCESS.
85 *
86 * @param pCritSect The critical section.
87 * @param hNativeSelf The native handle of this thread.
88 */
89DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf)
90{
91 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
92 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
93
94 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
95 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
96
97# if defined(PDMCRITSECT_STRICT) && defined(IN_RING3)
98 pCritSect->s.Core.Strict.pszEnterFile = NULL;
99 pCritSect->s.Core.Strict.u32EnterLine = 0;
100 pCritSect->s.Core.Strict.uEnterId = 0;
101 RTTHREAD hSelf = RTThreadSelf();
102 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, hSelf);
103 RTThreadWriteLockInc(hSelf);
104# endif
105
106 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
107 return VINF_SUCCESS;
108}
109
110
111#ifdef IN_RING3
112/**
113 * Deals with the contended case in ring-3.
114 *
115 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
116 * @param pCritSect The critsect.
117 * @param hNativeSelf The native thread handle.
118 */
119static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf)
120{
121 /*
122 * Start waiting.
123 */
124 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
125 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
126 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
127
128 /*
129 * The wait loop.
130 */
131 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
132 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
133# ifdef PDMCRITSECT_STRICT
134 RTTHREAD hSelf = RTThreadSelf();
135 if (hSelf == NIL_RTTHREAD)
136 RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &hSelf);
137# endif
138 for (;;)
139 {
140# ifdef PDMCRITSECT_STRICT
141 RTThreadBlocking(hSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, NULL, 0, 0);
142# endif
143 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
144# ifdef PDMCRITSECT_STRICT
145 RTThreadUnblocked(hSelf, RTTHREADSTATE_CRITSECT);
146# endif
147 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
148 return VERR_SEM_DESTROYED;
149 if (rc == VINF_SUCCESS)
150 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
151 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
152 }
153 /* won't get here */
154}
155#endif /* IN_RING3 */
156
157
158/**
159 * Enters a PDM critical section.
160 *
161 * @returns VINF_SUCCESS if entered successfully.
162 * @returns rcBusy when encountering a busy critical section in GC/R0.
163 * @returns VERR_SEM_DESTROYED if the critical section is dead.
164 *
165 * @param pCritSect The PDM critical section to enter.
166 * @param rcBusy The status code to return when we're in GC or R0
167 * and the section is busy.
168 */
169VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
170{
171 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
172
173 /*
174 * If the critical section has already been destroyed, then inform the caller.
175 */
176 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);
177
178 /*
179 * See if we're lucky.
180 */
181 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
182 /* Not owned ... */
183 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
184 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
185
186 /* ... or nested. */
187 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
188 {
189 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
190 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
191 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
192 return VINF_SUCCESS;
193 }
194
195 /*
196 * Spin for a bit without incrementing the counter.
197 */
198 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
199 * cpu systems. */
200 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
201 while (cSpinsLeft-- > 0)
202 {
203 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
204 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
205 /** @todo need pause/nop instruction here! */
206 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
207 cli'ed pendingpreemption check up front using sti w/ instruction fusing
208 for avoiding races. Hmm ... This is assuming the other party is actually
209 executing code on another CPU... */
210 }
211
212#ifdef IN_RING3
213 /*
214 * Take the slow path.
215 */
216 return pdmR3CritSectEnterContended(pCritSect, hNativeSelf);
217#else
218 /*
219 * Return busy.
220 */
221 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
222 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
223 return rcBusy;
224#endif
225}
226
227
228/**
229 * Try enter a critical section.
230 *
231 * @retval VINF_SUCCESS on success.
232 * @retval VERR_SEM_BUSY if the critsect was owned.
233 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
234 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
235 *
236 * @param pCritSect The critical section.
237 */
238VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
239{
240 /*
241 * If the critical section has already been destroyed, then inform the caller.
242 */
243 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);
244
245 /*
246 * See if we're lucky.
247 */
248 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
249 /* Not owned ... */
250 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
251 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
252
253 /* ... or nested. */
254 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
255 {
256 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
257 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
258 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
259 return VINF_SUCCESS;
260 }
261
262 /* no spinning */
263
264 /*
265 * Return busy.
266 */
267#ifdef IN_RING3
268 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
269#else
270 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
271#endif
272 LogFlow(("PDMCritSectTryEnter: locked\n"));
273 return VERR_SEM_BUSY;
274}
275
276
277#ifdef IN_RING3
278/**
279 * Enters a PDM critical section.
280 *
281 * @returns VINF_SUCCESS if entered successfully.
282 * @returns rcBusy when encountering a busy critical section in GC/R0.
283 * @returns VERR_SEM_DESTROYED if the critical section is dead.
284 *
285 * @param pCritSect The PDM critical section to enter.
286 * @param fCallHost Whether this is a VMMGCCallHost() or VMMR0CallHost() request.
287 */
288VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallHost)
289{
290 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
291 if ( rc == VINF_SUCCESS
292 && fCallHost
293 && pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD)
294 {
295 RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner);
296 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD);
297 }
298 return rc;
299}
300#endif /* IN_RING3 */
301
302
303/**
304 * Leaves a critical section entered with PDMCritSectEnter().
305 *
306 * @param pCritSect The PDM critical section to leave.
307 */
308VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
309{
310 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
311 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
312 Assert(pCritSect->s.Core.cNestings >= 1);
313
314 /*
315 * Nested leave.
316 */
317 if (pCritSect->s.Core.cNestings > 1)
318 {
319 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
320 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
321 return;
322 }
323
324#if defined(IN_RING3) || defined(IN_RING0)
325 /*
326 * Leave for real.
327 */
328 /* update members. */
329# ifdef IN_RING3
330 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
331 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
332# if defined(PDMCRITSECT_STRICT)
333 if (pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD)
334 RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner);
335 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD);
336# endif
337# endif
338 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
339 Assert(pCritSect->s.Core.Strict.ThreadOwner == NIL_RTTHREAD);
340 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
341 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
342
343 /* stop and decrement lockers. */
344 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
345 ASMCompilerBarrier();
346 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
347 {
348 /* Someone is waiting, wake up one of them. */
349 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
350 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
351 int rc = SUPSemEventSignal(pSession, hEvent);
352 AssertRC(rc);
353 }
354
355# ifdef IN_RING3
356 /* Signal exit event. */
357 if (hEventToSignal != NIL_RTSEMEVENT)
358 {
359 LogBird(("Signalling %#x\n", hEventToSignal));
360 int rc = RTSemEventSignal(hEventToSignal);
361 AssertRC(rc);
362 }
363# endif
364
365#else /* IN_RC */
366 /*
367 * Try leave it.
368 */
369 if (pCritSect->s.Core.cLockers == 0)
370 {
371 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
372 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
373 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
374 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
375
376 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
377 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
378 return;
379
380 /* darn, someone raced in on us. */
381 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
382 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
383 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
384 }
385 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
386
387 /*
388 * Queue the request.
389 */
390 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
391 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
392 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
393 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
394 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
395 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
396 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
397 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
398 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
399 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
400#endif /* IN_RC */
401}
402
403
404#if defined(IN_RING3) || defined(IN_RING0)
405/**
406 * Process the critical sections queued for ring-3 'leave'.
407 *
408 * @param pVCpu The VMCPU handle.
409 */
410VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
411{
412 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
413
414 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
415 for (RTUINT i = 0; i < c; i++)
416 {
417# ifdef IN_RING3
418 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
419# else
420 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
421# endif
422
423 PDMCritSectLeave(pCritSect);
424 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
425 }
426
427 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
428 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
429}
430#endif /* IN_RING3 || IN_RING0 */
431
432
433/**
434 * Checks the caller is the owner of the critical section.
435 *
436 * @returns true if owner.
437 * @returns false if not owner.
438 * @param pCritSect The critical section.
439 */
440VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
441{
442#ifdef IN_RING3
443 return RTCritSectIsOwner(&pCritSect->s.Core);
444#else
445 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
446 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
447 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
448 return false;
449 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
450#endif
451}
452
453
454/**
455 * Checks the specified VCPU is the owner of the critical section.
456 *
457 * @returns true if owner.
458 * @returns false if not owner.
459 * @param pCritSect The critical section.
460 * @param idCpu VCPU id
461 */
462VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu)
463{
464#ifdef IN_RING3
465 NOREF(idCpu);
466 return RTCritSectIsOwner(&pCritSect->s.Core);
467#else
468 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
469 AssertPtr(pVM);
470 Assert(idCpu < pVM->cCPUs);
471 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
472 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
473#endif
474}
475
476
477/**
478 * Checks if somebody currently owns the critical section.
479 *
480 * @returns true if locked.
481 * @returns false if not locked.
482 *
483 * @param pCritSect The critical section.
484 *
485 * @remarks This doesn't prove that no deadlocks will occur later on; it's
486 * just a debugging tool
487 */
488VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
489{
490 return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
491 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
492}
493
494
495/**
496 * Checks if a critical section is initialized or not.
497 *
498 * @returns true if initialized.
499 * @returns false if not initialized.
500 * @param pCritSect The critical section.
501 */
502VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
503{
504 return RTCritSectIsInitialized(&pCritSect->s.Core);
505}
506
507
508/**
509 * Gets the recursion depth.
510 *
511 * @returns The recursion depth.
512 * @param pCritSect The critical section.
513 */
514VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
515{
516 return RTCritSectGetRecursion(&pCritSect->s.Core);
517}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette