Rapicorn - Experimental UI Toolkit - Source Code
13.07.0
|
00001 // Licensed GNU LGPL v3 or later: http://www.gnu.org/licenses/lgpl.html 00002 #ifndef __RAPICORN_THREAD_HH__ 00003 #define __RAPICORN_THREAD_HH__ 00004 00005 #include <rcore/utilities.hh> 00006 #include <rcore/threadlib.hh> 00007 #include <thread> 00008 #include <list> 00009 00010 namespace Rapicorn { 00011 00012 struct RECURSIVE_LOCK {} constexpr RECURSIVE_LOCK {}; 00013 00019 class Mutex { 00020 pthread_mutex_t mutex_; 00021 public: 00022 constexpr Mutex () : mutex_ (PTHREAD_MUTEX_INITIALIZER) {} 00023 constexpr Mutex (struct RECURSIVE_LOCK) : mutex_ (PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP) {} 00024 void lock () { pthread_mutex_lock (&mutex_); } 00025 void unlock () { pthread_mutex_unlock (&mutex_); } 00026 bool try_lock () { return 0 == pthread_mutex_trylock (&mutex_); } 00027 bool debug_locked(); 00028 typedef pthread_mutex_t* native_handle_type; 00029 native_handle_type native_handle() { return &mutex_; } 00030 /*ctor*/ Mutex (const Mutex&) = delete; 00031 Mutex& operator= (const Mutex&) = delete; 00032 }; 00033 00039 class Spinlock { 00040 pthread_spinlock_t spinlock_; 00041 public: 00042 constexpr Spinlock () : spinlock_ RAPICORN_SPINLOCK_INITIALIZER {} 00043 void lock () { pthread_spin_lock (&spinlock_); } 00044 void unlock () { pthread_spin_unlock (&spinlock_); } 00045 bool try_lock () { return 0 == pthread_spin_trylock (&spinlock_); } 00046 typedef pthread_spinlock_t* native_handle_type; 00047 native_handle_type native_handle() { return &spinlock_; } 00048 /*ctor*/ Spinlock (const Spinlock&) = delete; 00049 Mutex& operator= (const Spinlock&) = delete; 00050 }; 00051 00057 class RWLock { 00058 pthread_rwlock_t rwlock_; 00059 char initialized_; 00060 void real_init (); 00061 inline void fixinit () { if (RAPICORN_UNLIKELY (!Lib::atomic_load (&initialized_))) real_init(); } 00062 public: 00063 constexpr RWLock () : rwlock_ (), initialized_ (0) {} 00064 void rdlock () { fixinit(); while (pthread_rwlock_rdlock (&rwlock_) == EAGAIN); } 00065 void wrlock () { fixinit(); pthread_rwlock_wrlock (&rwlock_); } 00066 void unlock () { fixinit(); pthread_rwlock_unlock (&rwlock_); } 00067 bool try_rdlock () { fixinit(); return 0 == pthread_rwlock_tryrdlock (&rwlock_); } 00068 bool try_wrlock () { fixinit(); return 0 == pthread_rwlock_trywrlock (&rwlock_); } 00069 typedef pthread_rwlock_t* native_handle_type; 00070 native_handle_type native_handle() { return &rwlock_; } 00071 /*dtor*/ ~RWLock () { fixinit(); pthread_rwlock_destroy (&rwlock_); } 00072 /*ctor*/ RWLock (const RWLock&) = delete; 00073 Mutex& operator= (const RWLock&) = delete; 00074 }; 00075 00077 struct ThreadInfo { 00079 typedef std::vector<void*> VoidPointers; 00080 void *volatile hp[8]; 00081 static VoidPointers collect_hazards (); 00082 static inline bool lookup_pointer (const std::vector<void*> &ptrs, void *arg); 00083 00084 String ident (); 00085 String name (); 00086 void name (const String &newname); 00087 static inline ThreadInfo& self (); 00088 00091 template<typename T> inline T get_data (DataKey<T> *key) { tdl(); T d = data_list_.get (key); tdu(); return d; } 00092 template<typename T> inline void set_data (DataKey<T> *key, T data) { tdl(); data_list_.set (key, data); tdu(); } 00093 template<typename T> inline void delete_data (DataKey<T> *key) { tdl(); data_list_.del (key); tdu(); } 00094 template<typename T> inline T swap_data (DataKey<T> *key) { tdl(); T d = data_list_.swap (key); tdu(); return d; } 00095 template<typename T> inline T swap_data (DataKey<T> *key, T data) { tdl(); T d = data_list_.swap (key, data); tdu(); return d; } 00096 private: 00097 ThreadInfo *volatile next; 00098 pthread_t pth_thread_id; 00099 char pad[RAPICORN_CACHE_LINE_ALIGNMENT - sizeof hp - sizeof next - sizeof pth_thread_id]; 00100 String name_; 00101 Mutex data_mutex_; 00102 DataList data_list_; 00103 static ThreadInfo __thread *self_cached; 00104 /*ctor*/ ThreadInfo (); 00105 /*ctor*/ ThreadInfo (const ThreadInfo&) = delete; 00106 /*dtor*/ ~ThreadInfo (); 00107 ThreadInfo& operator= (const ThreadInfo&) = delete; 00108 static void destroy_specific(void *vdata); 00109 void reset_specific (); 00110 void setup_specific (); 00111 static ThreadInfo* create (); 00112 void tdl () { data_mutex_.lock(); } 00113 void tdu () { data_mutex_.unlock(); } 00114 }; 00115 00116 struct AUTOMATIC_LOCK {} constexpr AUTOMATIC_LOCK {}; 00117 struct BALANCED_LOCK {} constexpr BALANCED_LOCK {}; 00118 00132 template<class MUTEX> 00133 class ScopedLock { 00134 MUTEX &mutex_; 00135 volatile int count_; 00136 RAPICORN_CLASS_NON_COPYABLE (ScopedLock); 00137 public: 00138 inline ~ScopedLock () { while (count_ < 0) lock(); while (count_ > 0) unlock(); } 00139 inline void lock () { mutex_.lock(); count_++; } 00140 inline void unlock () { count_--; mutex_.unlock(); } 00141 inline ScopedLock (MUTEX &mutex, struct AUTOMATIC_LOCK = AUTOMATIC_LOCK) : mutex_ (mutex), count_ (0) { lock(); } 00142 inline ScopedLock (MUTEX &mutex, struct BALANCED_LOCK) : mutex_ (mutex), count_ (0) {} 00143 }; 00144 00149 class Cond { 00150 pthread_cond_t cond_; 00151 static struct timespec abstime (int64); 00152 /*ctor*/ Cond (const Cond&) = delete; 00153 Cond& operator= (const Cond&) = delete; 00154 public: 00155 constexpr Cond () : cond_ (PTHREAD_COND_INITIALIZER) {} 00156 /*dtor*/ ~Cond () { pthread_cond_destroy (&cond_); } 00157 void signal () { pthread_cond_signal (&cond_); } 00158 void broadcast () { pthread_cond_broadcast (&cond_); } 00159 void wait (Mutex &m) { pthread_cond_wait (&cond_, m.native_handle()); } 00160 void wait_timed (Mutex &m, int64 max_usecs) 00161 { struct timespec abs = abstime (max_usecs); pthread_cond_timedwait (&cond_, m.native_handle(), &abs); } 00162 typedef pthread_cond_t* native_handle_type; 00163 native_handle_type native_handle() { return &cond_; } 00164 }; 00165 00167 namespace ThisThread { 00168 00169 String name (); 00170 int online_cpus (); 00171 int affinity (); 00172 void affinity (int cpu); 00173 int thread_pid (); 00174 int process_pid (); 00175 00176 #ifdef DOXYGEN // parts reused from std::this_thread 00177 00178 void yield (); 00180 std::thread::id get_id (); 00182 template<class Rep, class Period> void sleep_for (std::chrono::duration<Rep,Period> sleep_duration); 00184 template<class Clock, class Duration> void sleep_until (const std::chrono::time_point<Clock,Duration> &sleep_time); 00185 #else // !DOXYGEN 00186 using namespace std::this_thread; 00187 #endif // !DOXYGEN 00188 00189 } // ThisThread 00190 00191 #ifdef RAPICORN_CONVENIENCE 00192 00196 #define do_once RAPICORN_DO_ONCE 00197 00198 #endif // RAPICORN_CONVENIENCE 00199 00202 template<typename T> class Atomic; 00203 00205 template<> struct Atomic<char> : Lib::Atomic<char> { 00206 constexpr Atomic<char> (char i = 0) : Lib::Atomic<char> (i) {} 00207 using Lib::Atomic<char>::operator=; 00208 }; 00209 00211 template<> struct Atomic<int8> : Lib::Atomic<int8> { 00212 constexpr Atomic<int8> (int8 i = 0) : Lib::Atomic<int8> (i) {} 00213 using Lib::Atomic<int8>::operator=; 00214 }; 00215 00217 template<> struct Atomic<uint8> : Lib::Atomic<uint8> { 00218 constexpr Atomic<uint8> (uint8 i = 0) : Lib::Atomic<uint8> (i) {} 00219 using Lib::Atomic<uint8>::operator=; 00220 }; 00221 00223 template<> struct Atomic<int32> : Lib::Atomic<int32> { 00224 constexpr Atomic<int32> (int32 i = 0) : Lib::Atomic<int32> (i) {} 00225 using Lib::Atomic<int32>::operator=; 00226 }; 00227 00229 template<> struct Atomic<uint32> : Lib::Atomic<uint32> { 00230 constexpr Atomic<uint32> (uint32 i = 0) : Lib::Atomic<uint32> (i) {} 00231 using Lib::Atomic<uint32>::operator=; 00232 }; 00233 00235 template<> struct Atomic<int64> : Lib::Atomic<int64> { 00236 constexpr Atomic<int64> (int64 i = 0) : Lib::Atomic<int64> (i) {} 00237 using Lib::Atomic<int64>::operator=; 00238 }; 00239 00241 template<> struct Atomic<uint64> : Lib::Atomic<uint64> { 00242 constexpr Atomic<uint64> (uint64 i = 0) : Lib::Atomic<uint64> (i) {} 00243 using Lib::Atomic<uint64>::operator=; 00244 }; 00245 00247 template<typename V> class Atomic<V*> : protected Lib::Atomic<V*> { 00248 typedef Lib::Atomic<V*> A; 00249 public: 00250 constexpr Atomic (V *p = nullptr) : A (p) {} 00251 using A::store; 00252 using A::load; 00253 using A::cas; 00254 using A::operator=; 00255 V* operator+= (ptrdiff_t d) volatile { return A::operator+= ((V*) d); } 00256 V* operator-= (ptrdiff_t d) volatile { return A::operator-= ((V*) d); } 00257 operator V* () const volatile { return load(); } 00258 void push_link (V*volatile *nextp, V *newv) { do { *nextp = load(); } while (!cas (*nextp, newv)); } 00259 }; 00260 00262 template<class Type> 00263 class Exclusive { 00264 Mutex mutex_; 00265 Type *data_; 00266 uint64 mem_[(sizeof (Type) + 7) / 8]; 00267 void setup_L (const Type &data) { if (data_) return; data_ = new (mem_) Type (data); } 00268 void replace_L (const Type &data) { if (!data_) setup_L (data); else *data_ = data; } 00269 public: 00270 constexpr Exclusive () : mutex_(), data_ (0) {} 00271 void operator= (const Type &data) { ScopedLock<Mutex> locker (mutex_); replace_L (data); } 00272 operator Type () { ScopedLock<Mutex> locker (mutex_); if (!data_) setup_L (Type()); return *data_; } 00273 /*dtor*/ ~Exclusive () { ScopedLock<Mutex> locker (mutex_); if (data_) data_->~Type(); } 00274 }; 00275 00276 // == AsyncBlockingQueue == 00280 template<class Value> 00281 class AsyncBlockingQueue { 00282 Mutex mutex_; 00283 Cond cond_; 00284 std::list<Value> list_; 00285 public: 00286 void push (const Value &v); 00287 Value pop (); 00288 bool pending (); 00289 void swap (std::list<Value> &list); 00290 }; 00291 00292 // == AsyncNotifyingQueue == 00296 template<class Value> 00297 class AsyncNotifyingQueue { 00298 Mutex mutex_; 00299 std::function<void()> notifier_; 00300 std::list<Value> list_; 00301 public: 00302 void push (const Value &v); 00303 Value pop (Value fallback = 0); 00304 bool pending (); 00305 void swap (std::list<Value> &list); 00306 void notifier (const std::function<void()> ¬ifier); 00307 }; 00308 00309 // == AsyncRingBuffer == 00322 template<typename T> 00323 class AsyncRingBuffer { 00324 const uint size_; 00325 Atomic<uint> wmark_, rmark_; 00326 T *buffer_; 00327 RAPICORN_CLASS_NON_COPYABLE (AsyncRingBuffer); 00328 public: 00329 explicit AsyncRingBuffer (uint buffer_size); 00330 /*dtor*/ ~AsyncRingBuffer (); 00331 uint n_readable () const; 00332 uint n_writable () const; 00333 uint read (uint length, T *data, bool partial = true); 00334 uint write (uint length, const T *data, bool partial = true); 00335 }; 00336 00337 // == Implementation Bits == 00338 template<typename T> 00339 AsyncRingBuffer<T>::AsyncRingBuffer (uint buffer_size) : 00340 size_ (buffer_size + 1), wmark_ (0), rmark_ (0), buffer_ (new T[size_]) 00341 {} 00342 00343 template<typename T> 00344 AsyncRingBuffer<T>::~AsyncRingBuffer() 00345 { 00346 T *old = buffer_; 00347 buffer_ = NULL; 00348 rmark_ = 0; 00349 wmark_ = 0; 00350 delete[] old; 00351 *const_cast<uint*> (&size_) = 0; 00352 } 00353 00354 template<typename T> uint 00355 AsyncRingBuffer<T>::n_writable() const 00356 { 00357 const uint rm = rmark_.load(); 00358 const uint wm = wmark_.load(); 00359 const uint space = (size_ - 1 + rm - wm) % size_; 00360 return space; 00361 } 00362 00363 template<typename T> uint 00364 AsyncRingBuffer<T>::write (uint length, const T *data, bool partial) 00365 { 00366 const uint orig_length = length; 00367 const uint rm = rmark_.load(); 00368 uint wm = wmark_.load(); 00369 uint space = (size_ - 1 + rm - wm) % size_; 00370 if (!partial && length > space) 00371 return 0; 00372 while (length) 00373 { 00374 if (rm <= wm) 00375 space = size_ - wm + (rm == 0 ? -1 : 0); 00376 else 00377 space = rm - wm -1; 00378 if (!space) 00379 break; 00380 space = MIN (space, length); 00381 std::copy (data, &data[space], &buffer_[wm]); 00382 wm = (wm + space) % size_; 00383 data += space; 00384 length -= space; 00385 } 00386 RAPICORN_SFENCE; // wmb ensures buffer_ writes are made visible before the wmark_ update 00387 wmark_.store (wm); 00388 return orig_length - length; 00389 } 00390 00391 template<typename T> uint 00392 AsyncRingBuffer<T>::n_readable() const 00393 { 00394 const uint wm = wmark_.load(); 00395 const uint rm = rmark_.load(); 00396 const uint space = (size_ + wm - rm) % size_; 00397 return space; 00398 } 00399 00400 template<typename T> uint 00401 AsyncRingBuffer<T>::read (uint length, T *data, bool partial) 00402 { 00403 const uint orig_length = length; 00404 RAPICORN_LFENCE; // rmb ensures buffer_ contents are seen before wmark_ updates 00405 const uint wm = wmark_.load(); 00406 uint rm = rmark_.load(); 00407 uint space = (size_ + wm - rm) % size_; 00408 if (!partial && length > space) 00409 return 0; 00410 while (length) 00411 { 00412 if (wm < rm) 00413 space = size_ - rm; 00414 else 00415 space = wm - rm; 00416 if (!space) 00417 break; 00418 space = MIN (space, length); 00419 std::copy (&buffer_[rm], &buffer_[rm + space], data); 00420 rm = (rm + space) % size_; 00421 data += space; 00422 length -= space; 00423 } 00424 rmark_.store (rm); 00425 return orig_length - length; 00426 } 00427 00428 template<class Value> void 00429 AsyncBlockingQueue<Value>::push (const Value &v) 00430 { 00431 ScopedLock<Mutex> sl (mutex_); 00432 const bool notify = list_.empty(); 00433 list_.push_back (v); 00434 if (RAPICORN_UNLIKELY (notify)) 00435 cond_.broadcast(); 00436 } 00437 00438 template<class Value> Value 00439 AsyncBlockingQueue<Value>::pop () 00440 { 00441 ScopedLock<Mutex> sl (mutex_); 00442 while (list_.empty()) 00443 cond_.wait (mutex_); 00444 Value v = list_.front(); 00445 list_.pop_front(); 00446 return v; 00447 } 00448 00449 template<class Value> bool 00450 AsyncBlockingQueue<Value>::pending() 00451 { 00452 ScopedLock<Mutex> sl (mutex_); 00453 return !list_.empty(); 00454 } 00455 00456 template<class Value> void 00457 AsyncBlockingQueue<Value>::swap (std::list<Value> &list) 00458 { 00459 ScopedLock<Mutex> sl (mutex_); 00460 const bool notify = list_.empty(); 00461 list_.swap (list); 00462 if (notify && !list_.empty()) 00463 cond_.broadcast(); 00464 } 00465 00466 template<class Value> void 00467 AsyncNotifyingQueue<Value>::push (const Value &v) 00468 { 00469 ScopedLock<Mutex> sl (mutex_); 00470 const bool notify = list_.empty(); 00471 list_.push_back (v); 00472 if (RAPICORN_UNLIKELY (notify) && notifier_) 00473 notifier_(); 00474 } 00475 00476 template<class Value> Value 00477 AsyncNotifyingQueue<Value>::pop (Value fallback) 00478 { 00479 ScopedLock<Mutex> sl (mutex_); 00480 if (RAPICORN_UNLIKELY (list_.empty())) 00481 return fallback; 00482 Value v = list_.front(); 00483 list_.pop_front(); 00484 return v; 00485 } 00486 00487 template<class Value> bool 00488 AsyncNotifyingQueue<Value>::pending() 00489 { 00490 ScopedLock<Mutex> sl (mutex_); 00491 return !list_.empty(); 00492 } 00493 00494 template<class Value> void 00495 AsyncNotifyingQueue<Value>::swap (std::list<Value> &list) 00496 { 00497 ScopedLock<Mutex> sl (mutex_); 00498 const bool notify = list_.empty(); 00499 list_.swap (list); 00500 if (notify && !list_.empty() && notifier_) 00501 notifier_(); 00502 } 00503 00504 template<class Value> void 00505 AsyncNotifyingQueue<Value>::notifier (const std::function<void()> ¬ifier) 00506 { 00507 ScopedLock<Mutex> sl (mutex_); 00508 notifier_ = notifier; 00509 } 00510 00511 inline ThreadInfo& 00512 ThreadInfo::self() 00513 { 00514 if (RAPICORN_UNLIKELY (!self_cached)) 00515 self_cached = create(); 00516 return *self_cached; 00517 } 00518 00519 inline bool 00520 ThreadInfo::lookup_pointer (const std::vector<void*> &ptrs, void *arg) 00521 { 00522 size_t n_elements = ptrs.size(), offs = 0; 00523 while (offs < n_elements) 00524 { 00525 size_t i = (offs + n_elements) >> 1; 00526 void *current = ptrs[i]; 00527 if (arg == current) 00528 return true; // match 00529 else if (arg < current) 00530 n_elements = i; 00531 else // (arg > current) 00532 offs = i + 1; 00533 } 00534 return false; // unmatched 00535 } 00536 00537 } // Rapicorn 00538 00539 #endif // __RAPICORN_THREAD_HH__