/* $Id: spinlock-generic.cpp 93115 2022-01-01 11:31:46Z vboxsync $ */ /** @file * IPRT - Spinlock, generic implementation. */ /* * Copyright (C) 2006-2022 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; * you can redistribute it and/or modify it under the terms of the GNU * General Public License (GPL) as published by the Free Software * Foundation, in version 2 as it comes in the "COPYING" file of the * VirtualBox OSE distribution. VirtualBox OSE is distributed in the * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. * * The contents of this file may alternatively be used under the terms * of the Common Development and Distribution License Version 1.0 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the * VirtualBox OSE distribution, in which case the provisions of the * CDDL are applicable instead of those of the GPL. * * You may elect to license modified versions of this file under the * terms and conditions of either the GPL or the CDDL or both. */ /********************************************************************************************************************************* * Defined Constants And Macros * *********************************************************************************************************************************/ /** @def RT_CFG_SPINLOCK_GENERIC_DO_SLEEP * Force cpu yields after spinning the number of times indicated by the define. * If 0 we will spin forever. */ #define RT_CFG_SPINLOCK_GENERIC_DO_SLEEP 100000 /********************************************************************************************************************************* * Header Files * *********************************************************************************************************************************/ #include #include "internal/iprt.h" #include #include #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) # include #endif #include #include #if RT_CFG_SPINLOCK_GENERIC_DO_SLEEP # include #endif #include "internal/magics.h" /********************************************************************************************************************************* * Structures and Typedefs * *********************************************************************************************************************************/ /** * Generic spinlock structure. */ typedef struct RTSPINLOCKINTERNAL { /** Spinlock magic value (RTSPINLOCK_GEN_MAGIC). */ uint32_t u32Magic; /** The spinlock creation flags. */ uint32_t fFlags; /** The spinlock. */ uint32_t volatile fLocked; /** The saved CPU interrupt. */ uint32_t volatile fIntSaved; } RTSPINLOCKINTERNAL, *PRTSPINLOCKINTERNAL; RTDECL(int) RTSpinlockCreate(PRTSPINLOCK pSpinlock, uint32_t fFlags, const char *pszName) { PRTSPINLOCKINTERNAL pThis; AssertReturn(fFlags == RTSPINLOCK_FLAGS_INTERRUPT_SAFE || fFlags == RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, VERR_INVALID_PARAMETER); RT_NOREF_PV(pszName); /* * Allocate. */ pThis = (PRTSPINLOCKINTERNAL)RTMemAlloc(sizeof(*pThis)); if (!pThis) return VERR_NO_MEMORY; /* * Initialize and return. */ pThis->u32Magic = RTSPINLOCK_GEN_MAGIC; pThis->fFlags = fFlags; pThis->fIntSaved = 0; ASMAtomicWriteU32(&pThis->fLocked, 0); *pSpinlock = pThis; return VINF_SUCCESS; } RT_EXPORT_SYMBOL(RTSpinlockCreate); RTDECL(int) RTSpinlockDestroy(RTSPINLOCK Spinlock) { /* * Validate input. */ PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock; if (!pThis) return VERR_INVALID_PARAMETER; if (pThis->u32Magic != RTSPINLOCK_GEN_MAGIC) { AssertMsgFailed(("Invalid spinlock %p magic=%#x\n", pThis, pThis->u32Magic)); return VERR_INVALID_PARAMETER; } ASMAtomicIncU32(&pThis->u32Magic); RTMemFree(pThis); return VINF_SUCCESS; } RT_EXPORT_SYMBOL(RTSpinlockDestroy); RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock) { PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock; AssertMsg(pThis && pThis->u32Magic == RTSPINLOCK_GEN_MAGIC, ("pThis=%p u32Magic=%08x\n", pThis, pThis ? (int)pThis->u32Magic : 0)); if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE) { #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) uint32_t fIntSaved = ASMGetFlags(); #endif #if RT_CFG_SPINLOCK_GENERIC_DO_SLEEP for (;;) { #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMIntDisable(); #endif for (int c = RT_CFG_SPINLOCK_GENERIC_DO_SLEEP; c > 0; c--) { if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0)) { # if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) pThis->fIntSaved = fIntSaved; # endif return; } ASMNopPause(); } #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMSetFlags(fIntSaved); #endif RTThreadYield(); } #else for (;;) { #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMIntDisable(); #endif if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0)) { # if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) pThis->fIntSaved = fIntSaved; # endif return; } #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMSetFlags(fIntSaved); #endif ASMNopPause(); } #endif } else { #if RT_CFG_SPINLOCK_GENERIC_DO_SLEEP for (;;) { for (int c = RT_CFG_SPINLOCK_GENERIC_DO_SLEEP; c > 0; c--) { if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0)) return; ASMNopPause(); } RTThreadYield(); } #else while (!ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0)) ASMNopPause(); #endif } } RT_EXPORT_SYMBOL(RTSpinlockAcquire); RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock) { PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock; AssertMsg(pThis && pThis->u32Magic == RTSPINLOCK_GEN_MAGIC, ("pThis=%p u32Magic=%08x\n", pThis, pThis ? (int)pThis->u32Magic : 0)); if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE) { #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) uint32_t fIntSaved = pThis->fIntSaved; pThis->fIntSaved = 0; #endif if (!ASMAtomicCmpXchgU32(&pThis->fLocked, 0, 1)) AssertMsgFailed(("Spinlock %p was not locked!\n", pThis)); #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMSetFlags(fIntSaved); #endif } else { if (!ASMAtomicCmpXchgU32(&pThis->fLocked, 0, 1)) AssertMsgFailed(("Spinlock %p was not locked!\n", pThis)); } } RT_EXPORT_SYMBOL(RTSpinlockRelease);