Mercurial > hg > sv-dependency-builds
comparison osx/include/kj/mutex.h @ 49:3ab5a40c4e3b
Add Capnp and KJ builds for OSX
author | Chris Cannam <cannam@all-day-breakfast.com> |
---|---|
date | Tue, 25 Oct 2016 14:48:23 +0100 |
parents | |
children | 0994c39f1e94 |
comparison
equal
deleted
inserted
replaced
48:9530b331f8c1 | 49:3ab5a40c4e3b |
---|---|
1 // Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors | |
2 // Licensed under the MIT License: | |
3 // | |
4 // Permission is hereby granted, free of charge, to any person obtaining a copy | |
5 // of this software and associated documentation files (the "Software"), to deal | |
6 // in the Software without restriction, including without limitation the rights | |
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
8 // copies of the Software, and to permit persons to whom the Software is | |
9 // furnished to do so, subject to the following conditions: | |
10 // | |
11 // The above copyright notice and this permission notice shall be included in | |
12 // all copies or substantial portions of the Software. | |
13 // | |
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
20 // THE SOFTWARE. | |
21 | |
22 #ifndef KJ_MUTEX_H_ | |
23 #define KJ_MUTEX_H_ | |
24 | |
25 #if defined(__GNUC__) && !KJ_HEADER_WARNINGS | |
26 #pragma GCC system_header | |
27 #endif | |
28 | |
29 #include "memory.h" | |
30 #include <inttypes.h> | |
31 | |
32 #if __linux__ && !defined(KJ_USE_FUTEX) | |
33 #define KJ_USE_FUTEX 1 | |
34 #endif | |
35 | |
36 #if !KJ_USE_FUTEX && !_WIN32 | |
37 // On Linux we use futex. On other platforms we wrap pthreads. | |
38 // TODO(someday): Write efficient low-level locking primitives for other platforms. | |
39 #include <pthread.h> | |
40 #endif | |
41 | |
42 namespace kj { | |
43 | |
44 // ======================================================================================= | |
45 // Private details -- public interfaces follow below. | |
46 | |
47 namespace _ { // private | |
48 | |
49 class Mutex { | |
50 // Internal implementation details. See `MutexGuarded<T>`. | |
51 | |
52 public: | |
53 Mutex(); | |
54 ~Mutex(); | |
55 KJ_DISALLOW_COPY(Mutex); | |
56 | |
57 enum Exclusivity { | |
58 EXCLUSIVE, | |
59 SHARED | |
60 }; | |
61 | |
62 void lock(Exclusivity exclusivity); | |
63 void unlock(Exclusivity exclusivity); | |
64 | |
65 void assertLockedByCaller(Exclusivity exclusivity); | |
66 // In debug mode, assert that the mutex is locked by the calling thread, or if that is | |
67 // non-trivial, assert that the mutex is locked (which should be good enough to catch problems | |
68 // in unit tests). In non-debug builds, do nothing. | |
69 | |
70 private: | |
71 #if KJ_USE_FUTEX | |
72 uint futex; | |
73 // bit 31 (msb) = set if exclusive lock held | |
74 // bit 30 (msb) = set if threads are waiting for exclusive lock | |
75 // bits 0-29 = count of readers; If an exclusive lock is held, this is the count of threads | |
76 // waiting for a read lock, otherwise it is the count of threads that currently hold a read | |
77 // lock. | |
78 | |
79 static constexpr uint EXCLUSIVE_HELD = 1u << 31; | |
80 static constexpr uint EXCLUSIVE_REQUESTED = 1u << 30; | |
81 static constexpr uint SHARED_COUNT_MASK = EXCLUSIVE_REQUESTED - 1; | |
82 | |
83 #elif _WIN32 | |
84 uintptr_t srwLock; // Actually an SRWLOCK, but don't want to #include <windows.h> in header. | |
85 | |
86 #else | |
87 mutable pthread_rwlock_t mutex; | |
88 #endif | |
89 }; | |
90 | |
91 class Once { | |
92 // Internal implementation details. See `Lazy<T>`. | |
93 | |
94 public: | |
95 #if KJ_USE_FUTEX | |
96 inline Once(bool startInitialized = false) | |
97 : futex(startInitialized ? INITIALIZED : UNINITIALIZED) {} | |
98 #else | |
99 Once(bool startInitialized = false); | |
100 ~Once(); | |
101 #endif | |
102 KJ_DISALLOW_COPY(Once); | |
103 | |
104 class Initializer { | |
105 public: | |
106 virtual void run() = 0; | |
107 }; | |
108 | |
109 void runOnce(Initializer& init); | |
110 | |
111 #if _WIN32 // TODO(perf): Can we make this inline on win32 somehow? | |
112 bool isInitialized() noexcept; | |
113 | |
114 #else | |
115 inline bool isInitialized() noexcept { | |
116 // Fast path check to see if runOnce() would simply return immediately. | |
117 #if KJ_USE_FUTEX | |
118 return __atomic_load_n(&futex, __ATOMIC_ACQUIRE) == INITIALIZED; | |
119 #else | |
120 return __atomic_load_n(&state, __ATOMIC_ACQUIRE) == INITIALIZED; | |
121 #endif | |
122 } | |
123 #endif | |
124 | |
125 void reset(); | |
126 // Returns the state from initialized to uninitialized. It is an error to call this when | |
127 // not already initialized, or when runOnce() or isInitialized() might be called concurrently in | |
128 // another thread. | |
129 | |
130 private: | |
131 #if KJ_USE_FUTEX | |
132 uint futex; | |
133 | |
134 enum State { | |
135 UNINITIALIZED, | |
136 INITIALIZING, | |
137 INITIALIZING_WITH_WAITERS, | |
138 INITIALIZED | |
139 }; | |
140 | |
141 #elif _WIN32 | |
142 uintptr_t initOnce; // Actually an INIT_ONCE, but don't want to #include <windows.h> in header. | |
143 | |
144 #else | |
145 enum State { | |
146 UNINITIALIZED, | |
147 INITIALIZED | |
148 }; | |
149 State state; | |
150 pthread_mutex_t mutex; | |
151 #endif | |
152 }; | |
153 | |
154 } // namespace _ (private) | |
155 | |
156 // ======================================================================================= | |
157 // Public interface | |
158 | |
159 template <typename T> | |
160 class Locked { | |
161 // Return type for `MutexGuarded<T>::lock()`. `Locked<T>` provides access to the guarded object | |
162 // and unlocks the mutex when it goes out of scope. | |
163 | |
164 public: | |
165 KJ_DISALLOW_COPY(Locked); | |
166 inline Locked(): mutex(nullptr), ptr(nullptr) {} | |
167 inline Locked(Locked&& other): mutex(other.mutex), ptr(other.ptr) { | |
168 other.mutex = nullptr; | |
169 other.ptr = nullptr; | |
170 } | |
171 inline ~Locked() { | |
172 if (mutex != nullptr) mutex->unlock(isConst<T>() ? _::Mutex::SHARED : _::Mutex::EXCLUSIVE); | |
173 } | |
174 | |
175 inline Locked& operator=(Locked&& other) { | |
176 if (mutex != nullptr) mutex->unlock(isConst<T>() ? _::Mutex::SHARED : _::Mutex::EXCLUSIVE); | |
177 mutex = other.mutex; | |
178 ptr = other.ptr; | |
179 other.mutex = nullptr; | |
180 other.ptr = nullptr; | |
181 return *this; | |
182 } | |
183 | |
184 inline void release() { | |
185 if (mutex != nullptr) mutex->unlock(isConst<T>() ? _::Mutex::SHARED : _::Mutex::EXCLUSIVE); | |
186 mutex = nullptr; | |
187 ptr = nullptr; | |
188 } | |
189 | |
190 inline T* operator->() { return ptr; } | |
191 inline const T* operator->() const { return ptr; } | |
192 inline T& operator*() { return *ptr; } | |
193 inline const T& operator*() const { return *ptr; } | |
194 inline T* get() { return ptr; } | |
195 inline const T* get() const { return ptr; } | |
196 inline operator T*() { return ptr; } | |
197 inline operator const T*() const { return ptr; } | |
198 | |
199 private: | |
200 _::Mutex* mutex; | |
201 T* ptr; | |
202 | |
203 inline Locked(_::Mutex& mutex, T& value): mutex(&mutex), ptr(&value) {} | |
204 | |
205 template <typename U> | |
206 friend class MutexGuarded; | |
207 }; | |
208 | |
209 template <typename T> | |
210 class MutexGuarded { | |
211 // An object of type T, guarded by a mutex. In order to access the object, you must lock it. | |
212 // | |
213 // Write locks are not "recursive" -- trying to lock again in a thread that already holds a lock | |
214 // will deadlock. Recursive write locks are usually a sign of bad design. | |
215 // | |
216 // Unfortunately, **READ LOCKS ARE NOT RECURSIVE** either. Common sense says they should be. | |
217 // But on many operating systems (BSD, OSX), recursively read-locking a pthread_rwlock is | |
218 // actually unsafe. The problem is that writers are "prioritized" over readers, so a read lock | |
219 // request will block if any write lock requests are outstanding. So, if thread A takes a read | |
220 // lock, thread B requests a write lock (and starts waiting), and then thread A tries to take | |
221 // another read lock recursively, the result is deadlock. | |
222 | |
223 public: | |
224 template <typename... Params> | |
225 explicit MutexGuarded(Params&&... params); | |
226 // Initialize the mutex-guarded object by passing the given parameters to its constructor. | |
227 | |
228 Locked<T> lockExclusive() const; | |
229 // Exclusively locks the object and returns it. The returned `Locked<T>` can be passed by | |
230 // move, similar to `Own<T>`. | |
231 // | |
232 // This method is declared `const` in accordance with KJ style rules which say that constness | |
233 // should be used to indicate thread-safety. It is safe to share a const pointer between threads, | |
234 // but it is not safe to share a mutable pointer. Since the whole point of MutexGuarded is to | |
235 // be shared between threads, its methods should be const, even though locking it produces a | |
236 // non-const pointer to the contained object. | |
237 | |
238 Locked<const T> lockShared() const; | |
239 // Lock the value for shared access. Multiple shared locks can be taken concurrently, but cannot | |
240 // be held at the same time as a non-shared lock. | |
241 | |
242 inline const T& getWithoutLock() const { return value; } | |
243 inline T& getWithoutLock() { return value; } | |
244 // Escape hatch for cases where some external factor guarantees that it's safe to get the | |
245 // value. You should treat these like const_cast -- be highly suspicious of any use. | |
246 | |
247 inline const T& getAlreadyLockedShared() const; | |
248 inline T& getAlreadyLockedShared(); | |
249 inline T& getAlreadyLockedExclusive() const; | |
250 // Like `getWithoutLock()`, but asserts that the lock is already held by the calling thread. | |
251 | |
252 private: | |
253 mutable _::Mutex mutex; | |
254 mutable T value; | |
255 }; | |
256 | |
257 template <typename T> | |
258 class MutexGuarded<const T> { | |
259 // MutexGuarded cannot guard a const type. This would be pointless anyway, and would complicate | |
260 // the implementation of Locked<T>, which uses constness to decide what kind of lock it holds. | |
261 static_assert(sizeof(T) < 0, "MutexGuarded's type cannot be const."); | |
262 }; | |
263 | |
264 template <typename T> | |
265 class Lazy { | |
266 // A lazily-initialized value. | |
267 | |
268 public: | |
269 template <typename Func> | |
270 T& get(Func&& init); | |
271 template <typename Func> | |
272 const T& get(Func&& init) const; | |
273 // The first thread to call get() will invoke the given init function to construct the value. | |
274 // Other threads will block until construction completes, then return the same value. | |
275 // | |
276 // `init` is a functor(typically a lambda) which takes `SpaceFor<T>&` as its parameter and returns | |
277 // `Own<T>`. If `init` throws an exception, the exception is propagated out of that thread's | |
278 // call to `get()`, and subsequent calls behave as if `get()` hadn't been called at all yet -- | |
279 // in other words, subsequent calls retry initialization until it succeeds. | |
280 | |
281 private: | |
282 mutable _::Once once; | |
283 mutable SpaceFor<T> space; | |
284 mutable Own<T> value; | |
285 | |
286 template <typename Func> | |
287 class InitImpl; | |
288 }; | |
289 | |
290 // ======================================================================================= | |
291 // Inline implementation details | |
292 | |
293 template <typename T> | |
294 template <typename... Params> | |
295 inline MutexGuarded<T>::MutexGuarded(Params&&... params) | |
296 : value(kj::fwd<Params>(params)...) {} | |
297 | |
298 template <typename T> | |
299 inline Locked<T> MutexGuarded<T>::lockExclusive() const { | |
300 mutex.lock(_::Mutex::EXCLUSIVE); | |
301 return Locked<T>(mutex, value); | |
302 } | |
303 | |
304 template <typename T> | |
305 inline Locked<const T> MutexGuarded<T>::lockShared() const { | |
306 mutex.lock(_::Mutex::SHARED); | |
307 return Locked<const T>(mutex, value); | |
308 } | |
309 | |
310 template <typename T> | |
311 inline const T& MutexGuarded<T>::getAlreadyLockedShared() const { | |
312 #ifdef KJ_DEBUG | |
313 mutex.assertLockedByCaller(_::Mutex::SHARED); | |
314 #endif | |
315 return value; | |
316 } | |
317 template <typename T> | |
318 inline T& MutexGuarded<T>::getAlreadyLockedShared() { | |
319 #ifdef KJ_DEBUG | |
320 mutex.assertLockedByCaller(_::Mutex::SHARED); | |
321 #endif | |
322 return value; | |
323 } | |
324 template <typename T> | |
325 inline T& MutexGuarded<T>::getAlreadyLockedExclusive() const { | |
326 #ifdef KJ_DEBUG | |
327 mutex.assertLockedByCaller(_::Mutex::EXCLUSIVE); | |
328 #endif | |
329 return const_cast<T&>(value); | |
330 } | |
331 | |
332 template <typename T> | |
333 template <typename Func> | |
334 class Lazy<T>::InitImpl: public _::Once::Initializer { | |
335 public: | |
336 inline InitImpl(const Lazy<T>& lazy, Func&& func): lazy(lazy), func(kj::fwd<Func>(func)) {} | |
337 | |
338 void run() override { | |
339 lazy.value = func(lazy.space); | |
340 } | |
341 | |
342 private: | |
343 const Lazy<T>& lazy; | |
344 Func func; | |
345 }; | |
346 | |
347 template <typename T> | |
348 template <typename Func> | |
349 inline T& Lazy<T>::get(Func&& init) { | |
350 if (!once.isInitialized()) { | |
351 InitImpl<Func> initImpl(*this, kj::fwd<Func>(init)); | |
352 once.runOnce(initImpl); | |
353 } | |
354 return *value; | |
355 } | |
356 | |
357 template <typename T> | |
358 template <typename Func> | |
359 inline const T& Lazy<T>::get(Func&& init) const { | |
360 if (!once.isInitialized()) { | |
361 InitImpl<Func> initImpl(*this, kj::fwd<Func>(init)); | |
362 once.runOnce(initImpl); | |
363 } | |
364 return *value; | |
365 } | |
366 | |
367 } // namespace kj | |
368 | |
369 #endif // KJ_MUTEX_H_ |