VirtualBox

source: vbox/trunk/src/VBox/Additions/haiku/include/lock.h@ 87760

Last change on this file since 87760 was 82968, checked in by vboxsync, 5 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 9.4 KB
Line 
1/* $Id: lock.h 82968 2020-02-04 10:35:17Z vboxsync $ */
2/** @file
3 * Lock.h - Haiku, private locking internals.
4 */
5
6/*
7 * Copyright (C) 2012-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * This code is based on:
20 *
21 * VirtualBox Guest Additions for Haiku.
22 *
23 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
24 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
25 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
26 * Distributed under the terms of the MIT License.
27 */
28
29/** @todo r=ramshankar: Eventually this file should be shipped by Haiku and
30 * should be removed from the VBox tree. */
31
32#ifndef GA_INCLUDED_HAIKU_lock_h
33#define GA_INCLUDED_HAIKU_lock_h
34#ifndef RT_WITHOUT_PRAGMA_ONCE
35# pragma once
36#endif
37
38#include <OS.h>
39
40
41struct mutex_waiter;
42
43typedef struct mutex {
44 const char* name;
45 struct mutex_waiter* waiters;
46#if KDEBUG
47 thread_id holder;
48#else
49 int32 count;
50 uint16 ignore_unlock_count;
51#endif
52 uint8 flags;
53} mutex;
54
55#define MUTEX_FLAG_CLONE_NAME 0x1
56
57
58typedef struct recursive_lock {
59 mutex lock;
60#if !KDEBUG
61 thread_id holder;
62#endif
63 int recursion;
64} recursive_lock;
65
66
67struct rw_lock_waiter;
68
69typedef struct rw_lock {
70 const char* name;
71 struct rw_lock_waiter* waiters;
72 thread_id holder;
73 vint32 count;
74 int32 owner_count;
75 int16 active_readers;
76 // Only > 0 while a writer is waiting: number
77 // of active readers when the first waiting
78 // writer started waiting.
79 int16 pending_readers;
80 // Number of readers that have already
81 // incremented "count", but have not yet started
82 // to wait at the time the last writer unlocked.
83 uint32 flags;
84} rw_lock;
85
86#define RW_LOCK_WRITER_COUNT_BASE 0x10000
87
88#define RW_LOCK_FLAG_CLONE_NAME 0x1
89
90
91#if KDEBUG
92# define KDEBUG_RW_LOCK_DEBUG 0
93 // Define to 1 if you want to use ASSERT_READ_LOCKED_RW_LOCK().
94 // The rw_lock will just behave like a recursive locker then.
95# define ASSERT_LOCKED_RECURSIVE(r) \
96 { ASSERT(find_thread(NULL) == (r)->lock.holder); }
97# define ASSERT_LOCKED_MUTEX(m) { ASSERT(find_thread(NULL) == (m)->holder); }
98# define ASSERT_WRITE_LOCKED_RW_LOCK(l) \
99 { ASSERT(find_thread(NULL) == (l)->holder); }
100# if KDEBUG_RW_LOCK_DEBUG
101# define ASSERT_READ_LOCKED_RW_LOCK(l) \
102 { ASSERT(find_thread(NULL) == (l)->holder); }
103# else
104# define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false)
105# endif
106#else
107# define ASSERT_LOCKED_RECURSIVE(r) do {} while (false)
108# define ASSERT_LOCKED_MUTEX(m) do {} while (false)
109# define ASSERT_WRITE_LOCKED_RW_LOCK(m) do {} while (false)
110# define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false)
111#endif
112
113
114// static initializers
115#if KDEBUG
116# define MUTEX_INITIALIZER(name) { name, NULL, -1, 0 }
117# define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), 0 }
118#else
119# define MUTEX_INITIALIZER(name) { name, NULL, 0, 0, 0 }
120# define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), -1, 0 }
121#endif
122
123#define RW_LOCK_INITIALIZER(name) { name, NULL, -1, 0, 0, 0 }
124
125
126#if KDEBUG
127# define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->lock.holder)
128#else
129# define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->holder)
130#endif
131
132
133#ifdef __cplusplus
134extern "C" {
135#endif
136
137extern void recursive_lock_init(recursive_lock *lock, const char *name);
138 // name is *not* cloned nor freed in recursive_lock_destroy()
139extern void recursive_lock_init_etc(recursive_lock *lock, const char *name,
140 uint32 flags);
141extern void recursive_lock_destroy(recursive_lock *lock);
142extern status_t recursive_lock_lock(recursive_lock *lock);
143extern status_t recursive_lock_trylock(recursive_lock *lock);
144extern void recursive_lock_unlock(recursive_lock *lock);
145extern int32 recursive_lock_get_recursion(recursive_lock *lock);
146
147extern void rw_lock_init(rw_lock* lock, const char* name);
148 // name is *not* cloned nor freed in rw_lock_destroy()
149extern void rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags);
150extern void rw_lock_destroy(rw_lock* lock);
151extern status_t rw_lock_write_lock(rw_lock* lock);
152
153extern void mutex_init(mutex* lock, const char* name);
154 // name is *not* cloned nor freed in mutex_destroy()
155extern void mutex_init_etc(mutex* lock, const char* name, uint32 flags);
156extern void mutex_destroy(mutex* lock);
157extern status_t mutex_switch_lock(mutex* from, mutex* to);
158 // Unlocks "from" and locks "to" such that unlocking and starting to wait
159 // for the lock is atomically. I.e. if "from" guards the object "to" belongs
160 // to, the operation is safe as long as "from" is held while destroying
161 // "to".
162extern status_t mutex_switch_from_read_lock(rw_lock* from, mutex* to);
163 // Like mutex_switch_lock(), just for a switching from a read-locked
164 // rw_lock.
165
166
167// implementation private:
168
169extern status_t _rw_lock_read_lock(rw_lock* lock);
170extern status_t _rw_lock_read_lock_with_timeout(rw_lock* lock,
171 uint32 timeoutFlags, bigtime_t timeout);
172extern void _rw_lock_read_unlock(rw_lock* lock, bool threadsLocked);
173extern void _rw_lock_write_unlock(rw_lock* lock, bool threadsLocked);
174
175extern status_t _mutex_lock(mutex* lock, bool threadsLocked);
176extern void _mutex_unlock(mutex* lock, bool threadsLocked);
177extern status_t _mutex_trylock(mutex* lock);
178extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags,
179 bigtime_t timeout);
180
181
182static inline status_t
183rw_lock_read_lock(rw_lock* lock)
184{
185#if KDEBUG_RW_LOCK_DEBUG
186 return rw_lock_write_lock(lock);
187#else
188 int32 oldCount = atomic_add(&lock->count, 1);
189 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
190 return _rw_lock_read_lock(lock);
191 return B_OK;
192#endif
193}
194
195
196static inline status_t
197rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
198 bigtime_t timeout)
199{
200#if KDEBUG_RW_LOCK_DEBUG
201 return mutex_lock_with_timeout(lock, timeoutFlags, timeout);
202#else
203 int32 oldCount = atomic_add(&lock->count, 1);
204 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
205 return _rw_lock_read_lock_with_timeout(lock, timeoutFlags, timeout);
206 return B_OK;
207#endif
208}
209
210
211static inline void
212rw_lock_read_unlock(rw_lock* lock)
213{
214#if KDEBUG_RW_LOCK_DEBUG
215 rw_lock_write_unlock(lock);
216#else
217 int32 oldCount = atomic_add(&lock->count, -1);
218 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
219 _rw_lock_read_unlock(lock, false);
220#endif
221}
222
223
224static inline void
225rw_lock_write_unlock(rw_lock* lock)
226{
227 _rw_lock_write_unlock(lock, false);
228}
229
230
231static inline status_t
232mutex_lock(mutex* lock)
233{
234#if KDEBUG
235 return _mutex_lock(lock, false);
236#else
237 if (atomic_add(&lock->count, -1) < 0)
238 return _mutex_lock(lock, false);
239 return B_OK;
240#endif
241}
242
243
244static inline status_t
245mutex_lock_threads_locked(mutex* lock)
246{
247#if KDEBUG
248 return _mutex_lock(lock, true);
249#else
250 if (atomic_add(&lock->count, -1) < 0)
251 return _mutex_lock(lock, true);
252 return B_OK;
253#endif
254}
255
256
257static inline status_t
258mutex_trylock(mutex* lock)
259{
260#if KDEBUG
261 return _mutex_trylock(lock);
262#else
263 if (atomic_test_and_set(&lock->count, -1, 0) != 0)
264 return B_WOULD_BLOCK;
265 return B_OK;
266#endif
267}
268
269
270static inline status_t
271mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
272{
273#if KDEBUG
274 return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
275#else
276 if (atomic_add(&lock->count, -1) < 0)
277 return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
278 return B_OK;
279#endif
280}
281
282
283static inline void
284mutex_unlock(mutex* lock)
285{
286#if !KDEBUG
287 if (atomic_add(&lock->count, 1) < -1)
288#endif
289 _mutex_unlock(lock, false);
290}
291
292
293static inline void
294mutex_transfer_lock(mutex* lock, thread_id thread)
295{
296#if KDEBUG
297 lock->holder = thread;
298#endif
299}
300
301
302extern void lock_debug_init();
303
304#ifdef __cplusplus
305}
306#endif
307
308#endif /* !GA_INCLUDED_HAIKU_lock_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette