Main Page   Reference Manual   Namespace List   Compound List   Namespace Members   Compound Members   File Members  

private_threading.h
Go to the documentation of this file.
1 // $Header$
2 //
3 // Copyright (C) 2001 - 2004, by
4 //
5 // Carlo Wood, Run on IRC <carlo@alinoe.com>
6 // RSA-1024 0x624ACAD5 1997-01-26 Sign & Encrypt
7 // Fingerprint16 = 32 EC A7 B6 AC DB 65 A6 F6 F6 55 DD 1C DC FF 61
8 //
9 // This file may be distributed under the terms of the Q Public License
10 // version 1.0 as appearing in the file LICENSE.QPL included in the
11 // packaging of this file.
12 //
13 
18 #ifndef LIBCWD_PRIVATE_THREADING_H
19 #define LIBCWD_PRIVATE_THREADING_H
20 
21 #define LIBCWD_DEBUGDEBUGRWLOCK 0
22 
23 #if LIBCWD_DEBUGDEBUGRWLOCK
24 #define LIBCWD_NO_INTERNAL_STRING
25 #include <raw_write.h>
26 #undef LIBCWD_NO_INTERNAL_STRING
27 extern pthread_mutex_t LIBCWD_DEBUGDEBUGLOCK_CERR_mutex;
28 extern unsigned int LIBCWD_DEBUGDEBUGLOCK_CERR_count;
29 #define LIBCWD_DEBUGDEBUGRWLOCK_CERR(x) \
30  do { \
31  pthread_mutex_lock(&LIBCWD_DEBUGDEBUGLOCK_CERR_mutex); \
32  FATALDEBUGDEBUG_CERR(x); \
33  pthread_mutex_unlock(&LIBCWD_DEBUGDEBUGLOCK_CERR_mutex); \
34  } while(0)
35 #define LIBCWD_DEBUGDEBUGLOCK_CERR(x) \
36  do { \
37  if (instance != static_tsd_instance) \
38  { \
39  pthread_mutex_lock(&LIBCWD_DEBUGDEBUGLOCK_CERR_mutex); \
40  ++LIBCWD_DEBUGDEBUGLOCK_CERR_count; \
41  FATALDEBUGDEBUG_CERR("[" << LIBCWD_DEBUGDEBUGLOCK_CERR_count << "] " << pthread_self() << ": " << x); \
42  pthread_mutex_unlock(&LIBCWD_DEBUGDEBUGLOCK_CERR_mutex); \
43  } \
44  } while(0)
45 #else // !LIBCWD_DEBUGDEBUGRWLOCK
46 #define LIBCWD_DEBUGDEBUGRWLOCK_CERR(x) do { } while(0)
47 #define LIBCWD_DEBUGDEBUGLOCK_CERR(x) do { } while(0)
48 #endif // !LIBCWD_DEBUGDEBUGRWLOCK
49 
50 #ifndef LIBCWD_PRIVATE_SET_ALLOC_CHECKING_H
52 #endif
53 #ifndef LIBCWD_PRIVATE_STRUCT_TSD_H
55 #endif
56 #ifndef LIBCWD_PRIVATE_MUTEX_INSTANCES_H
58 #endif
59 #ifndef LIBCWD_CORE_DUMP_H
60 #include <libcwd/core_dump.h>
61 #endif
62 #ifndef LIBCW_CSTRING
63 #define LIBCW_CSTRING
64 #include <cstring> // Needed for std::memset and std::memcpy.
65 #endif
66 
67 #ifdef LIBCWD_HAVE_PTHREAD
68 #ifdef __linux
69 #ifndef _GNU_SOURCE
70 #error "You need to use define _GNU_SOURCE in order to make use of the extensions of Linux Threads."
71 #endif
72 #endif
73 #ifndef LIBCW_PTHREAD_H
74 #define LIBCW_PTHREAD_H
75 #include <pthread.h>
76 #endif
77 #if defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP) && defined(PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP)
78 #define LIBCWD_USE_LINUXTHREADS 1
79 #else
80 #define LIBCWD_USE_POSIX_THREADS 1
81 #endif
82 #else
83 #if LIBCWD_THREAD_SAFE
84 #error Fatal error: thread support was not detected during configuration of libcwd (did you use --disable-threading?)! \
85  How come you are trying to compile a threaded program now? \
86  To fix this problem, either link with libcwd_r (install it), or when you are indeed compiling a \
87  single threaded application, then get rid of the -D_REENTRANT and/or -D_THREAD_SAFE in your compile flags.
88 #endif
89 #endif // LIBCWD_HAVE_PTHREAD
90 
91 #ifndef LIBCWD_USE_LINUXTHREADS
92 #define LIBCWD_USE_LINUXTHREADS 0
93 #endif
94 #ifndef LIBCWD_USE_POSIX_THREADS
95 #define LIBCWD_USE_POSIX_THREADS 0
96 #endif
97 
98 #if CWDEBUG_DEBUGT
99 #define LibcwDebugThreads(x) do { x; } while(0)
100 #else
101 #define LibcwDebugThreads(x) do { } while(0)
102 #endif
103 
104 #if CWDEBUG_DEBUGT || CWDEBUG_DEBUG
105 #ifndef LIBCWD_PRIVATE_ASSERT_H
106 #include <libcwd/private_assert.h>
107 #endif
108 #endif
109 
110 #if LIBCWD_THREAD_SAFE
111 
112 namespace libcwd {
113 
114 #if LIBCWD_DEBUGDEBUGRWLOCK
115 inline
116 _private_::raw_write_nt const&
117 operator<<(_private_::raw_write_nt const& raw_write, pthread_mutex_t const& mutex)
118 {
119  raw_write << "(pthread_mutex_t&)" << (void*)&mutex <<
120  " = { __m_reserved = " << mutex.__m_reserved <<
121  ", __m_count = " << mutex.__m_count <<
122  ", __m_owner = " << (void*)mutex.__m_owner <<
123  ", __m_kind = " << mutex.__m_kind <<
124  ", __m_lock = { __status = " << mutex.__m_lock.__status <<
125  ", __spinlock = " << mutex.__m_lock.__spinlock << " } }";
126  return raw_write;
127 }
128 #endif
129 
130  namespace _private_ {
131 
132 extern void initialize_global_mutexes(void);
133 extern bool WST_multi_threaded;
134 
135 #if CWDEBUG_DEBUGT
136 extern void test_for_deadlock(size_t, struct TSD_st&, void const*);
137 inline void test_for_deadlock(int instance, struct TSD_st& __libcwd_tsd, void const* from)
138 {
139  assert(instance < 0x10000);
140  test_for_deadlock(static_cast<size_t>(instance), __libcwd_tsd, from);
141 }
142 inline void test_for_deadlock(void const* ptr, struct TSD_st& __libcwd_tsd, void const* from)
143 {
144  assert(reinterpret_cast<size_t>(ptr) >= 0x10000);
145  test_for_deadlock(reinterpret_cast<size_t>(ptr), __libcwd_tsd, from);
146 }
147 #endif
148 
149 //===================================================================================================
150 //
151 // Mutex locking.
152 //
153 // template <int instance> This class may not use system calls (it may not call malloc(3)).
154 // class mutex_tct;
155 //
156 // Usage.
157 //
158 // Global mutexes can be initialized once, before using the mutex.
159 // mutex_tct<instance_id_const>::initialize();
160 //
161 // Static mutexes in functions (or templates) that can not globally
162 // be initialized need to call `initialize()' prior to *each* use
163 // (using -O2 this is at most a single test and nothing at all when
164 // Linuxthreads are being used.
165 //
166 
167 //========================================================================================================================================17"
168 // class mutex_tct
169 
170 #if LIBCWD_USE_POSIX_THREADS || LIBCWD_USE_LINUXTHREADS
171 // We have to use macros because pthread_cleanup_push and pthread_cleanup_pop
172 // are macros with an unmatched '{' and '}' respectively.
173 #define LIBCWD_DISABLE_CANCEL \
174  { \
175  LIBCWD_DISABLE_CANCEL_NO_BRACE
176 #define LIBCWD_DISABLE_CANCEL_NO_BRACE \
177  int __libcwd_oldstate; \
178  pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &__libcwd_oldstate); \
179  LibcwDebugThreads( ++__libcwd_tsd.cancel_explicitely_disabled )
180 #if CWDEBUG_ALLOC
181 #define LIBCWD_ASSERT_USERSPACE_OR_DEFERED_BEFORE_SETCANCELSTATE \
182  /* pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL) will call, */ \
183  /* and pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) can call, */ \
184  /* __pthread_do_exit() when the thread is cancelled in the meantime. */ \
185  /* This might free allocations that are allocated in userspace. */ \
186  LIBCWD_ASSERT( !__libcwd_tsd.internal || __libcwd_tsd.cancel_explicitely_disabled || __libcwd_tsd.cancel_explicitely_deferred )
187 #else
188 #define LIBCWD_ASSERT_USERSPACE_OR_DEFERED_BEFORE_SETCANCELSTATE
189 #endif
190 #define LIBCWD_ENABLE_CANCEL_NO_BRACE \
191  LibcwDebugThreads(\
192  LIBCWD_ASSERT( __libcwd_tsd.cancel_explicitely_disabled > 0 ); \
193  --__libcwd_tsd.cancel_explicitely_disabled; \
194  LIBCWD_ASSERT_USERSPACE_OR_DEFERED_BEFORE_SETCANCELSTATE; \
195  ); \
196  pthread_setcancelstate(__libcwd_oldstate, NULL)
197 #define LIBCWD_ENABLE_CANCEL \
198  LIBCWD_ENABLE_CANCEL_NO_BRACE; \
199  }
200 
201 #define LIBCWD_DEFER_CANCEL \
202  { \
203  LIBCWD_DEFER_CANCEL_NO_BRACE
204 #define LIBCWD_DEFER_CANCEL_NO_BRACE \
205  int __libcwd_oldtype; \
206  pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &__libcwd_oldtype); \
207  LibcwDebugThreads( ++__libcwd_tsd.cancel_explicitely_deferred )
208 #define LIBCWD_RESTORE_CANCEL_NO_BRACE \
209  LibcwDebugThreads(\
210  LIBCWD_ASSERT( __libcwd_tsd.cancel_explicitely_deferred > 0 ); \
211  --__libcwd_tsd.cancel_explicitely_deferred; \
212  LIBCWD_ASSERT_USERSPACE_OR_DEFERED_BEFORE_SETCANCELSTATE; \
213  ); \
214  pthread_setcanceltype(__libcwd_oldtype, NULL)
215 #define LIBCWD_RESTORE_CANCEL \
216  LIBCWD_RESTORE_CANCEL_NO_BRACE; \
217  }
218 
219 #if LIBCWD_USE_LINUXTHREADS
220 #define LIBCWD_DEFER_CLEANUP_PUSH(routine, arg) \
221  pthread_cleanup_push_defer_np(reinterpret_cast<void(*)(void*)>(routine), reinterpret_cast<void*>(arg)); \
222  LibcwDebugThreads( ++__libcwd_tsd.cancel_explicitely_deferred; ++__libcwd_tsd.cleanup_handler_installed )
223 #if CWDEBUG_ALLOC
224 #define LIBCWD_ASSERT_NONINTERNAL LIBCWD_ASSERT( !__libcwd_tsd.internal )
225 #else
226 #define LIBCWD_ASSERT_NONINTERNAL
227 #endif
228 #define LIBCWD_CLEANUP_POP_RESTORE(execute) \
229  LibcwDebugThreads( --__libcwd_tsd.cleanup_handler_installed; \
230  LIBCWD_ASSERT( __libcwd_tsd.cancel_explicitely_deferred > 0 ); \
231  LIBCWD_ASSERT_NONINTERNAL; ); \
232  pthread_cleanup_pop_restore_np(static_cast<int>(execute)); \
233  LibcwDebugThreads( --__libcwd_tsd.cancel_explicitely_deferred; )
234 #else // !LIBCWD_USE_LINUXTHREADS
235 #define LIBCWD_DEFER_CLEANUP_PUSH(routine, arg) \
236  LIBCWD_DEFER_CANCEL; \
237  LibcwDebugThreads( ++__libcwd_tsd.cleanup_handler_installed ); \
238  pthread_cleanup_push(reinterpret_cast<void(*)(void*)>(routine), reinterpret_cast<void*>(arg))
239 #define LIBCWD_CLEANUP_POP_RESTORE(execute) \
240  LibcwDebugThreads( --__libcwd_tsd.cleanup_handler_installed ); \
241  pthread_cleanup_pop(static_cast<int>(execute)); \
242  LIBCWD_RESTORE_CANCEL
243 #endif // !LIBCWD_USE_LINUXTHREADS
244 
245 #define LIBCWD_PUSH_DEFER_TRYLOCK_MUTEX(instance, unlock_routine) \
246  LIBCWD_DEFER_CLEANUP_PUSH(static_cast<void (*)(void)>(unlock_routine), &::libcwd::_private_::mutex_tct<(instance)>::S_mutex); \
247  bool __libcwd_lock_successful = ::libcwd::_private_::mutex_tct<(instance)>::trylock()
248 #define LIBCWD_DEFER_PUSH_LOCKMUTEX(instance, unlock_routine) \
249  LIBCWD_DEFER_CLEANUP_PUSH(static_cast<void (*)(void)>(unlock_routine), &::libcwd::_private_::mutex_tct<(instance)>::S_mutex); \
250  ::libcwd::_private_::mutex_tct<(instance)>::lock(); \
251  bool const __libcwd_lock_successful = true
252 #define LIBCWD_UNLOCKMUTEX_POP_RESTORE(instance) \
253  LIBCWD_CLEANUP_POP_RESTORE(__libcwd_lock_successful)
254 
255 #define LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED \
256  LibcwDebugThreads( \
257  if (instance != static_tsd_instance) \
258  { \
259  /* When entering a critical area, make sure that we have explictely deferred cancellation of this */ \
260  /* thread (or disabled that) because when cancellation would happen in the middle of the critical */ \
261  /* area then the lock would stay locked. */ \
262  LIBCWD_ASSERT( __libcwd_tsd.cancel_explicitely_deferred || __libcwd_tsd.cancel_explicitely_disabled ); \
263  } )
264 
265 template <int instance>
266  class mutex_tct {
267  public:
268  static pthread_mutex_t S_mutex;
269 #if !LIBCWD_USE_LINUXTHREADS || CWDEBUG_DEBUGT
270  protected:
271  static bool volatile S_initialized;
272  static void S_initialize(void);
273 #endif
274  public:
275  static void initialize(void)
276 #if LIBCWD_USE_LINUXTHREADS && !CWDEBUG_DEBUGT
277  { }
278 #else
279  {
280  if (S_initialized) // Check if the static `S_mutex' already has been initialized.
281  return; // No need to lock: `S_initialized' is only set after it is
282  // really initialized.
283  S_initialize();
284  }
285 #endif
286  public:
287  static bool trylock(void)
288  {
289  LibcwDebugThreads( LIBCWD_ASSERT( S_initialized ) );
290 #if CWDEBUG_DEBUGT
291  LIBCWD_TSD_DECLARATION;
292 #endif
293  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
294  LIBCWD_DEBUGDEBUGLOCK_CERR("Trying to lock mutex " << instance << " (" << (void*)&S_mutex << ") from " << __builtin_return_address(0) << " from " << __builtin_return_address(1));
295  LIBCWD_DEBUGDEBUGLOCK_CERR("pthread_mutex_trylock(" << S_mutex << ").");
296  bool success = (pthread_mutex_trylock(&S_mutex) == 0);
297  LIBCWD_DEBUGDEBUGLOCK_CERR("Result = " << success << ". Mutex now " << S_mutex << ".");
298 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
299  if (success)
300  {
301 #if CWDEBUG_DEBUGT
302  _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
303 #endif
304  LIBCWD_DEBUGDEBUGLOCK_CERR("mutex_tct::trylock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; incrementing it.");
305  instance_locked[instance] += 1;
306 #if CWDEBUG_DEBUGT
307  locked_by[instance] = pthread_self();
308  locked_from[instance] = __builtin_return_address(0);
309 #endif
310  }
311 #endif
312  LibcwDebugThreads( if (success) { ++__libcwd_tsd.inside_critical_area; } );
313  return success;
314  }
315  static void lock(void)
316  {
317  LibcwDebugThreads( LIBCWD_ASSERT( S_initialized ) );
318 #if CWDEBUG_DEBUGT
319  TSD_st* tsd_ptr = 0;
320  if (instance != static_tsd_instance)
321  {
322  LIBCWD_TSD_DECLARATION;
323  tsd_ptr = &__libcwd_tsd;
324  }
325  TSD_st& __libcwd_tsd(*tsd_ptr);
326 #endif
327  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
328  LibcwDebugThreads( if (instance != static_tsd_instance) { ++__libcwd_tsd.inside_critical_area; } );
329  LIBCWD_DEBUGDEBUGLOCK_CERR("locking mutex " << instance << " (" << (void*)&S_mutex << ") from " << __builtin_return_address(0) << " from " << __builtin_return_address(1));
330 #if CWDEBUG_DEBUGT
331  if (instance != static_tsd_instance && !(instance >= 2 * reserved_instance_low && instance < 3 * reserved_instance_low))
332  {
333  __libcwd_tsd.waiting_for_lock = instance;
334  LIBCWD_DEBUGDEBUGLOCK_CERR("pthread_mutex_lock(" << S_mutex << ").");
335  int res = pthread_mutex_lock(&S_mutex);
336  LIBCWD_DEBUGDEBUGLOCK_CERR("Result = " << res << ". Mutex now " << S_mutex << ".");
337  LIBCWD_ASSERT( res == 0 );
338  __libcwd_tsd.waiting_for_lock = 0;
339  _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
340  }
341  else
342  {
343  LIBCWD_DEBUGDEBUGLOCK_CERR("pthread_mutex_lock(" << S_mutex << ").");
344  int res = pthread_mutex_lock(&S_mutex);
345  LIBCWD_DEBUGDEBUGLOCK_CERR("Result = " << res << ". Mutex now " << S_mutex << ".");
346  LIBCWD_ASSERT( res == 0 );
347  }
348 #else // !CWDEBUG_DEBUGT
349  pthread_mutex_lock(&S_mutex);
350 #endif // !CWDEBUG_DEBUGT
351  LIBCWD_DEBUGDEBUGLOCK_CERR("Lock " << instance << " obtained (" << (void*)&S_mutex << ").");
352 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
353  LIBCWD_DEBUGDEBUGLOCK_CERR("mutex_tct::lock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; incrementing it.");
354  instance_locked[instance] += 1;
355 #if CWDEBUG_DEBUGT
356  if (locked_by[instance] != 0 && locked_by[instance] != pthread_self())
357  {
358  LIBCWD_DEBUGDEBUGLOCK_CERR("mutex " << instance << " (" << (void*)&S_mutex << ") is already set by another thread (" << locked_by[instance] << ")!");
359  core_dump();
360  }
361  locked_by[instance] = pthread_self();
362  locked_from[instance] = __builtin_return_address(0);
363 #endif
364 #endif
365  }
366  static void unlock(void)
367  {
368 #if CWDEBUG_DEBUGT
369  TSD_st* tsd_ptr = 0;
370  if (instance != static_tsd_instance)
371  {
372  LIBCWD_TSD_DECLARATION;
373  tsd_ptr = &__libcwd_tsd;
374  }
375  TSD_st& __libcwd_tsd(*tsd_ptr);
376 #endif
377  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
378 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
379  LIBCWD_DEBUGDEBUGLOCK_CERR("mutex_tct::unlock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; decrementing it.");
380  LIBCWD_ASSERT( instance_locked[instance] > 0 );
381 #if CWDEBUG_DEBUGT
382  if (locked_by[instance] != pthread_self())
383  {
384  LIBCWD_DEBUGDEBUGLOCK_CERR("unlocking instance " << instance << " (" << (void*)&S_mutex << ") failed: locked_by[" << instance << "] == " << locked_by[instance] << ".");
385  core_dump();
386  }
387 #endif
388  instance_locked[instance] -= 1;
389 #if CWDEBUG_DEBUGT
390  if (instance_locked[instance] == 0)
391  {
392  locked_by[instance] = 0;
393  LIBCWD_DEBUGDEBUGLOCK_CERR("mutex_tct::unlock(): locked_by[" << instance << "] was reset.");
394  }
395  else LIBCWD_DEBUGDEBUGLOCK_CERR("mutex_tct::unlock(): locked_by[" << instance << "] was not reset, it still is " << locked_by[instance] << ".");
396 #endif
397 #endif
398  LIBCWD_DEBUGDEBUGLOCK_CERR("unlocking mutex " << instance << " (" << (void*)&S_mutex << ").");
399  LIBCWD_DEBUGDEBUGLOCK_CERR("pthread_mutex_unlock(" << S_mutex << ").");
400 #if CWDEBUG_DEBUGT
401  int res =
402 #endif
403  pthread_mutex_unlock(&S_mutex);
404 #if CWDEBUG_DEBUGT
405  LIBCWD_DEBUGDEBUGLOCK_CERR("Result = " << res << ". Mutex now " << S_mutex << ".");
406  LIBCWD_ASSERT(res == 0);
407 #endif
408  LIBCWD_DEBUGDEBUGLOCK_CERR("Lock " << instance << " released (" << (void*)&S_mutex << ").");
409  LibcwDebugThreads( if (instance != static_tsd_instance) { --__libcwd_tsd.inside_critical_area; } );
410  }
411  // This is used as cleanup handler with LIBCWD_DEFER_CLEANUP_PUSH.
412  static void cleanup(void*);
413  };
414 
415 #if !LIBCWD_USE_LINUXTHREADS || CWDEBUG_DEBUGT
416 template <int instance>
417  bool volatile mutex_tct<instance>::S_initialized = false;
418 
419 template <int instance>
420  void mutex_tct<instance>::S_initialize(void)
421  {
422  if (instance == mutex_initialization_instance) // Specialization.
423  {
424 #if !LIBCWD_USE_LINUXTHREADS
425  pthread_mutexattr_t mutex_attr;
426  pthread_mutexattr_init(&mutex_attr);
427 #if CWDEBUG_DEBUGT
428  pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_ERRORCHECK);
429 #else
430  pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_NORMAL);
431 #endif
432  pthread_mutex_init(&S_mutex, &mutex_attr);
433  pthread_mutexattr_destroy(&mutex_attr);
434 #endif // !LIBCWD_USE_LINUXTHREADS
435  S_initialized = true;
436  }
437  else // General case.
438  {
439  mutex_tct<mutex_initialization_instance>::initialize();
440  /* LIBCWD_DEFER_PUSH_LOCKMUTEX(mutex_initialization_instance, mutex_tct<mutex_initialization_instance>::unlock); */
441  if (!S_initialized) // Check again now that we are locked.
442  {
443 #if !LIBCWD_USE_LINUXTHREADS
444  pthread_mutexattr_t mutex_attr;
445  pthread_mutexattr_init(&mutex_attr);
446  if (instance < end_recursive_types)
447  pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_RECURSIVE);
448  else
449  {
450 #if CWDEBUG_DEBUGT
451  pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_ERRORCHECK);
452 #else
453  pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_NORMAL);
454 #endif
455  }
456  pthread_mutex_init(&S_mutex, &mutex_attr);
457  pthread_mutexattr_destroy(&mutex_attr);
458 #endif // !LIBCWD_USE_LINUXTHREADS
459  S_initialized = true;
460  }
461  /* LIBCWD_UNLOCKMUTEX_POP_RESTORE(mutex_initialization_instance); */
462  }
463  }
464 #endif // !LIBCWD_USE_LINUXTHREADS || CWDEBUG_DEBUGT
465 
466 template <int instance>
467  pthread_mutex_t mutex_tct<instance>::S_mutex
468 #if LIBCWD_USE_LINUXTHREADS
469  =
470 #if CWDEBUG_DEBUGT
471  PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
472 #else
473  PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP;
474 #endif
475 #else // !LIBCWD_USE_LINUXTHREADS
476  ;
477 #endif // !LIBCWD_USE_LINUXTHREADS
478 
479 template <int instance>
480  void mutex_tct<instance>::cleanup(void*)
481  {
482  unlock();
483  }
484 
485 //========================================================================================================================================17"
486 // class cond_tct
487 
488 template <int instance>
489  class cond_tct : public mutex_tct<instance> {
490  private:
491  static pthread_cond_t S_condition;
492 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
493  static bool volatile S_initialized;
494  private:
495  static void S_initialize(void);
496 #endif
497  public:
498  static void initialize(void)
499 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
500  {
501  if (S_initialized)
502  return;
503  S_initialize();
504  }
505 #else
506  { }
507 #endif
508  public:
509  void wait(void) {
510 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
511  LIBCWD_DEBUGDEBUGLOCK_CERR("cond_tct::wait(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; decrementing it.");
512  LIBCWD_ASSERT( instance_locked[instance] > 0 );
513 #if CWDEBUG_DEBUGT
514  if (locked_by[instance] != pthread_self())
515  {
516  LIBCWD_DEBUGDEBUGLOCK_CERR("unlocking instance " << instance << " (" << (void*)&S_mutex << ") failed: locked_by[" << instance << "] == " << locked_by[instance] << ".");
517  core_dump();
518  }
519 #endif
520  instance_locked[instance] -= 1;
521 #if CWDEBUG_DEBUGT
522  if (instance_locked[instance] == 0)
523  {
524  locked_by[instance] = 0;
525  LIBCWD_DEBUGDEBUGLOCK_CERR("cond_tct::wait(): locked_by[" << instance << "] was reset.");
526  }
527  else LIBCWD_DEBUGDEBUGLOCK_CERR("cond_tct::wait(): locked_by[" << instance << "] was not reset, it still is " << locked_by[instance] << ".");
528 #endif
529 #endif
530  LIBCWD_DEBUGDEBUGLOCK_CERR("unlocking mutex " << instance << " (" << (void*)&S_mutex << ").");
531  LIBCWD_DEBUGDEBUGLOCK_CERR("pthread_cond_wait(" << (void*)&S_condition << ", " << this->S_mutex << ").");
532 #if CWDEBUG_DEBUGT
533  int res =
534 #endif
535  pthread_cond_wait(&S_condition, &this->S_mutex);
536 #if CWDEBUG_DEBUGT
537  LIBCWD_DEBUGDEBUGLOCK_CERR("Result = " << res << ". Mutex now " << S_mutex << ".");
538  LIBCWD_ASSERT(res == 0);
539 #endif
540  LIBCWD_DEBUGDEBUGLOCK_CERR("Lock " << instance << " obtained (" << (void*)&S_mutex << ").");
541 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
542  LIBCWD_DEBUGDEBUGLOCK_CERR("cond_tct::wait(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; incrementing it.");
543  instance_locked[instance] += 1;
544 #if CWDEBUG_DEBUGT
545  if (locked_by[instance] != 0 && locked_by[instance] != pthread_self())
546  {
547  LIBCWD_DEBUGDEBUGLOCK_CERR("mutex " << instance << " (" << (void*)&S_mutex << ") is already set by another thread (" << locked_by[instance] << ")!");
548  core_dump();
549  }
550  locked_by[instance] = pthread_self();
551  locked_from[instance] = __builtin_return_address(0);
552 #endif
553 #endif
554  }
555  void signal(void) { pthread_cond_signal(&S_condition); }
556  void broadcast(void) { pthread_cond_broadcast(&S_condition); }
557  };
558 
559 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
560 template <int instance>
561  void cond_tct<instance>::S_initialize(void)
562  {
563 #if !LIBCWD_USE_LINUXTHREADS
564  mutex_tct<mutex_initialization_instance>::initialize();
565  LIBCWD_DEFER_PUSH_LOCKMUTEX(mutex_initialization_instance, mutex_tct<mutex_initialization_instance>::unlock);
566  if (!S_initialized) // Check again now that we are locked.
567  {
568  pthread_cond_init(&S_condition, NULL);
569  }
570  LIBCWD_UNLOCKMUTEX_POP_RESTORE(mutex_initialization_instance);
571 #endif
572  mutex_tct<instance>::S_initialize();
573  }
574 #endif // !LIBCWD_USE_LINUXTHREADS
575 
576 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
577 template <int instance>
578  bool volatile cond_tct<instance>::S_initialized = false;
579 #endif
580 
581 template <int instance>
582  pthread_cond_t cond_tct<instance>::S_condition
583 #if LIBCWD_USE_LINUXTHREADS
584  = PTHREAD_COND_INITIALIZER;
585 #else // !LIBCWD_USE_LINUXTHREADS
586  ;
587 #endif // !LIBCWD_USE_LINUXTHREADS
588 
589 #endif // LIBCWD_USE_POSIX_THREADS || LIBCWD_USE_LINUXTHREADS
590 
591 //========================================================================================================================================17"
592 // class rwlock_tct
593 
594 //
595 // template <int instance> This class may not use system calls (it may not call malloc(3)).
596 // class rwlock_tct;
597 //
598 // Read/write mutex lock implementation. Readers can set arbitrary number of locks, only locking
599 // writers. Writers lock readers and writers.
600 //
601 // Examples.
602 //
603 // rwlock_tct<instance_id_const>::initialize();
604 // if (rwlock_tct<instance_id_const>::tryrdlock()) ...
605 // if (rwlock_tct<instance_id_const>::trywrlock()) ...
606 // rwlock_tct<instance_id_const>::rdlock(); // Readers lock.
607 // rwlock_tct<instance_id_const>::rdunlock();
608 // rwlock_tct<instance_id_const>::wrlock(); // Writers lock.
609 // rwlock_tct<instance_id_const>::wrunlock();
610 // rwlock_tct<instance_id_const>::rd2wrlock(); // Convert read lock into write lock.
611 // rwlock_tct<instance_id_const>::wr2rdlock(); // Convert write lock into read lock.
612 //
613 
614 template <int instance>
615  class rwlock_tct {
616  private:
617  static int const readers_instance = instance + reserved_instance_low;
618  static int const holders_instance = instance + 2 * reserved_instance_low;
619  typedef cond_tct<holders_instance> cond_t;
620  static cond_t S_no_holders_condition;
621  static int S_holders_count; // Number of readers or -1 if a writer locked this object.
622  static bool volatile S_writer_is_waiting;
623  static pthread_t S_writer_id;
624 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
625  static bool S_initialized; // Set when initialized.
626 #endif
627  public:
628  static void initialize(void)
629  {
630 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
631  if (S_initialized)
632  return;
633  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling initialize() instance " << instance);
634  mutex_tct<readers_instance>::initialize();
635  S_no_holders_condition.initialize();
636  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving initialize() instance " << instance);
637  S_initialized = true;
638 #endif
639  }
640  static bool tryrdlock(void)
641  {
642 #if CWDEBUG_DEBUGT
643  LIBCWD_TSD_DECLARATION;
644 #endif
645  LibcwDebugThreads( LIBCWD_ASSERT( S_initialized ) );
646  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
647  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::tryrdlock()");
648  if (instance < end_recursive_types && pthread_equal(S_writer_id, pthread_self()))
649  {
650  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::tryrdlock() (skipped: thread has write lock)");
651  return true; // No error checking is done.
652  }
653  // Give a writer a higher priority (kinda fuzzy).
654  if (S_writer_is_waiting || !S_no_holders_condition.trylock())
655  return false;
656  bool success = (S_holders_count != -1);
657  if (success)
658  ++S_holders_count; // Add one reader.
659  S_no_holders_condition.unlock();
660  LibcwDebugThreads(
661  if (success)
662  {
663  ++__libcwd_tsd.inside_critical_area;
664  _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
665  __libcwd_tsd.instance_rdlocked[instance] += 1;
666  if (__libcwd_tsd.instance_rdlocked[instance] == 1)
667  {
668  __libcwd_tsd.rdlocked_by1[instance] = pthread_self();
669  __libcwd_tsd.rdlocked_from1[instance] = __builtin_return_address(0);
670  }
671  else if (__libcwd_tsd.instance_rdlocked[instance] == 2)
672  {
673  __libcwd_tsd.rdlocked_by2[instance] = pthread_self();
674  __libcwd_tsd.rdlocked_from2[instance] = __builtin_return_address(0);
675  }
676  else
677  core_dump();
678  }
679  );
680  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::tryrdlock()");
681  return success;
682  }
683  static bool trywrlock(void)
684  {
685  LibcwDebugThreads( LIBCWD_ASSERT( S_initialized ) );
686 #if CWDEBUG_DEBUGT
687  LIBCWD_TSD_DECLARATION;
688 #endif
689  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
690  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::trywrlock()");
691  bool success;
692  if ((success = mutex_tct<readers_instance>::trylock()))
693  {
694  S_writer_is_waiting = true;
695  if ((success = S_no_holders_condition.trylock()))
696  {
697  if ((success = (S_holders_count == 0)))
698  {
699  S_holders_count = -1; // Mark that we have a writer.
700  if (instance < end_recursive_types)
701  S_writer_id = pthread_self();
702 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
703 #if CWDEBUG_DEBUGT
704  _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
705 #endif
706  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::trywrlock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; incrementing it.");
707  instance_locked[instance] += 1;
708 #if CWDEBUG_DEBUGT
709  locked_by[instance] = pthread_self();
710  locked_from[instance] = __builtin_return_address(0);
711 #endif
712 #endif
713  }
714  S_no_holders_condition.unlock();
715  }
716  S_writer_is_waiting = false;
717  mutex_tct<readers_instance>::unlock();
718  }
719  LibcwDebugThreads( if (success) { ++__libcwd_tsd.inside_critical_area; } );
720  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::trywrlock()");
721  return success;
722  }
723  static void rdlock(bool high_priority = false)
724  {
725  LibcwDebugThreads( LIBCWD_ASSERT( S_initialized ) );
726 #if CWDEBUG_DEBUGT
727  LIBCWD_TSD_DECLARATION;
728 #endif
729  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
730  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::rdlock()");
731  if (instance < end_recursive_types && pthread_equal(S_writer_id, pthread_self()))
732  {
733  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::rdlock() (skipped: thread has write lock)");
734  return; // No error checking is done.
735  }
736  // Give a writer a higher priority (kinda fuzzy).
737  if (S_writer_is_waiting) // If there is a writer interested,
738  {
739  if (!high_priority)
740  {
741  mutex_tct<readers_instance>::lock(); // then give it precedence and wait here.
742  mutex_tct<readers_instance>::unlock();
743  }
744  }
745 #if CWDEBUG_DEBUGT
746  __libcwd_tsd.waiting_for_rdlock = instance;
747 #endif
748  S_no_holders_condition.lock();
749  while (S_holders_count == -1) // Writer locked it?
750  S_no_holders_condition.wait(); // Wait for writer to finish.
751 #if CWDEBUG_DEBUGT
752  __libcwd_tsd.waiting_for_rdlock = 0;
753 #endif
754  ++S_holders_count; // Add one reader.
755  S_no_holders_condition.unlock();
756  LibcwDebugThreads(
757  ++__libcwd_tsd.inside_critical_area;
758  // Thread A: rdlock<1> ... mutex<2>
759  // Thread B: mutex<2> ... rdlock<1>
760  // ^--- current program counter.
761  // can still lead to a deadlock when a third thread is trying to get the write lock
762  // because trying to acquire a write lock immedeately blocks new read locks.
763  // However, trying to acquire a write lock does not block high priority read locks,
764  // therefore the following is allowed:
765  // Thread A: rdlock<1> ... mutex<2>
766  // Thread B: mutex<2> ... high priority rdlock<1>
767  // provided that the write lock wrlock<1> is never used in combination with mutex<2>.
768  // In order to take this into account, we need to pass the information that this is
769  // a read lock to the test function.
770  _private_::test_for_deadlock(instance + (high_priority ? high_priority_read_lock_offset : read_lock_offset), __libcwd_tsd, __builtin_return_address(0));
771  __libcwd_tsd.instance_rdlocked[instance] += 1;
772  if (__libcwd_tsd.instance_rdlocked[instance] == 1)
773  {
774  __libcwd_tsd.rdlocked_by1[instance] = pthread_self();
775  __libcwd_tsd.rdlocked_from1[instance] = __builtin_return_address(0);
776  }
777  else if (__libcwd_tsd.instance_rdlocked[instance] == 2)
778  {
779  __libcwd_tsd.rdlocked_by2[instance] = pthread_self();
780  __libcwd_tsd.rdlocked_from2[instance] = __builtin_return_address(0);
781  }
782  else
783  core_dump();
784  );
785  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::rdlock()");
786  }
787  static void rdunlock(void)
788  {
789 #if CWDEBUG_DEBUGT
790  LIBCWD_TSD_DECLARATION;
791 #endif
792  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
793  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::rdunlock()");
794  if (instance < end_recursive_types && pthread_equal(S_writer_id, pthread_self()))
795  {
796  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::rdunlock() (skipped: thread has write lock)");
797  return; // No error checking is done.
798  }
799  LibcwDebugThreads( --__libcwd_tsd.inside_critical_area );
800  S_no_holders_condition.lock();
801  if (--S_holders_count == 0) // Was this the last reader?
802  S_no_holders_condition.signal(); // Tell waiting threads.
803  S_no_holders_condition.unlock();
804  LibcwDebugThreads(
805  if (__libcwd_tsd.instance_rdlocked[instance] == 2)
806  __libcwd_tsd.rdlocked_by2[instance] = 0;
807  else
808  __libcwd_tsd.rdlocked_by1[instance] = 0;
809  __libcwd_tsd.instance_rdlocked[instance] -= 1;
810  );
811  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::rdunlock()");
812  }
813  static void wrlock(void)
814  {
815  LibcwDebugThreads( LIBCWD_ASSERT( S_initialized ) );
816 #if CWDEBUG_DEBUGT
817  LIBCWD_TSD_DECLARATION;
818 #endif
819  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
820  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::wrlock()");
821  mutex_tct<readers_instance>::lock(); // Block new readers,
822  S_writer_is_waiting = true; // from this moment on.
823 #if CWDEBUG_DEBUGT
824  __libcwd_tsd.waiting_for_lock = instance;
825 #endif
826  S_no_holders_condition.lock();
827  while (S_holders_count != 0) // Other readers or writers have this lock?
828  S_no_holders_condition.wait(); // Wait until all current holders are done.
829 #if CWDEBUG_DEBUGT
830  __libcwd_tsd.waiting_for_lock = 0;
831 #endif
832  S_writer_is_waiting = false; // Stop checking the lock for new readers.
833  mutex_tct<readers_instance>::unlock(); // Release blocked readers.
834  S_holders_count = -1; // Mark that we have a writer.
835  S_no_holders_condition.unlock();
836  if (instance < end_recursive_types)
837  S_writer_id = pthread_self();
838  LibcwDebugThreads( ++__libcwd_tsd.inside_critical_area );
839 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
840 #if CWDEBUG_DEBUGT
841  _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
842 #endif
843  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::wrlock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; incrementing it.");
844  instance_locked[instance] += 1;
845 #if CWDEBUG_DEBUGT
846  locked_by[instance] = pthread_self();
847  locked_from[instance] = __builtin_return_address(0);
848 #endif
849 #endif
850  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::wrlock()");
851  }
852  static void wrunlock(void)
853  {
854 #if CWDEBUG_DEBUGT
855  LIBCWD_TSD_DECLARATION;
856 #endif
857  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
858 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
859  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::wrunlock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; decrementing it.");
860 #if CWDEBUG_DEBUGT
861  LIBCWD_ASSERT( instance_locked[instance] > 0 && locked_by[instance] == pthread_self() );
862 #endif
863  instance_locked[instance] -= 1;
864 #endif
865 #if CWDEBUG_DEBUGT
866  if (instance > end_recursive_types || instance_locked[instance] == 0)
867  {
868  locked_by[instance] = 0;
869  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::unlock(): locked_by[" << instance << "] was reset.");
870  }
871  else
872  {
873  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::wrunlock(): locked_by[" << instance << "] was not reset, it still is " << locked_by[instance] << ".");
874  }
875 #endif
876  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::wrunlock()");
877  LibcwDebugThreads( --__libcwd_tsd.inside_critical_area) ;
878  if (instance < end_recursive_types)
879  S_writer_id = 0;
880  S_no_holders_condition.lock();
881  S_holders_count = 0; // We have no writer anymore.
882  S_no_holders_condition.signal(); // No readers and no writers left.
883  S_no_holders_condition.unlock();
884  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::wrunlock()");
885  }
886  static void rd2wrlock(void)
887  {
888 #if CWDEBUG_DEBUGT
889  LIBCWD_TSD_DECLARATION;
890 #endif
891  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
892  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::rd2wrlock()");
893 #if CWDEBUG_DEBUGT
894  __libcwd_tsd.waiting_for_lock = instance;
895 #endif
896  S_no_holders_condition.lock();
897  if (--S_holders_count > 0)
898  {
899  mutex_tct<readers_instance>::lock(); // Block new readers.
900  S_writer_is_waiting = true;
901  while (S_holders_count != 0)
902  S_no_holders_condition.wait();
903  S_writer_is_waiting = false;
904  mutex_tct<readers_instance>::unlock(); // Release blocked readers.
905  }
906 #if CWDEBUG_DEBUGT
907  __libcwd_tsd.waiting_for_lock = 0;
908 #endif
909  S_holders_count = -1; // We are a writer now.
910  S_no_holders_condition.unlock();
911  if (instance < end_recursive_types)
912  S_writer_id = pthread_self();
913 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
914 #if CWDEBUG_DEBUGT
915  _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
916 #endif
917  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::rd2wrlock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; incrementing it.");
918  instance_locked[instance] += 1;
919 #if CWDEBUG_DEBUGT
920  locked_by[instance] = pthread_self();
921  locked_from[instance] = __builtin_return_address(0);
922 #endif
923 #endif
924  LibcwDebugThreads(
925  if (__libcwd_tsd.instance_rdlocked[instance] == 2)
926  __libcwd_tsd.rdlocked_by2[instance] = 0;
927  else
928  __libcwd_tsd.rdlocked_by1[instance] = 0;
929  __libcwd_tsd.instance_rdlocked[instance] -= 1;
930  );
931  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::rd2wrlock()");
932  }
933  static void wr2rdlock(void)
934  {
935 #if CWDEBUG_DEBUGT
936  LIBCWD_TSD_DECLARATION;
937 #endif
938  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
939 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
940  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::wr2rdlock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; decrementing it.");
941 #if CWDEBUG_DEBUGT
942  LIBCWD_ASSERT( instance_locked[instance] > 0 && locked_by[instance] == pthread_self() );
943 #endif
944  instance_locked[instance] -= 1;
945 #if CWDEBUG_DEBUGT
946  if (instance > end_recursive_types || instance_locked[instance] == 0)
947  {
948  locked_by[instance] = 0;
949  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::wr2rdlock(): locked_by[" << instance << "] was reset.");
950  }
951  else
952  {
953  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::wr2rdlock(): locked_by[" << instance << "] was not reset, it still is " << locked_by[instance] << ".");
954  }
955 #endif
956 #endif
957  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::wr2rdlock()");
958  if (instance < end_recursive_types)
959  S_writer_id = 0;
960  S_no_holders_condition.lock();
961  S_holders_count = 1; // Turn writer into a reader (atomic operation).
962  S_no_holders_condition.signal();
963  S_no_holders_condition.unlock();
964  LibcwDebugThreads(
965  _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
966  if (instance >= instance_rdlocked_size)
967  core_dump();
968  __libcwd_tsd.instance_rdlocked[instance] += 1;
969  if (__libcwd_tsd.instance_rdlocked[instance] == 1)
970  {
971  __libcwd_tsd.rdlocked_by1[instance] = pthread_self();
972  __libcwd_tsd.rdlocked_from1[instance] = __builtin_return_address(0);
973  }
974  else if (__libcwd_tsd.instance_rdlocked[instance] == 2)
975  {
976  __libcwd_tsd.rdlocked_by2[instance] = pthread_self();
977  __libcwd_tsd.rdlocked_from2[instance] = __builtin_return_address(0);
978  }
979  else
980  core_dump();
981  );
982  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::wr2rdlock()");
983  }
984  // This is used as cleanup handler with LIBCWD_DEFER_CLEANUP_PUSH.
985  static void cleanup(void*);
986  };
987 
988 template <int instance>
989  int rwlock_tct<instance>::S_holders_count = 0;
990 
991 template <int instance>
992  bool volatile rwlock_tct<instance>::S_writer_is_waiting = 0;
993 
994 template <int instance>
995  pthread_t rwlock_tct<instance>::S_writer_id = 0;
996 
997 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
998 template <int instance>
999  bool rwlock_tct<instance>::S_initialized = 0;
1000 #endif
1001 
1002 template <int instance>
1003  typename rwlock_tct<instance>::cond_t rwlock_tct<instance>::S_no_holders_condition;
1004 
1005 template <int instance>
1006  void rwlock_tct<instance>::cleanup(void*)
1007  {
1008  if (S_holders_count == -1)
1009  wrunlock();
1010  else
1011  rdunlock();
1012  }
1013 
1014 extern void fatal_cancellation(void*);
1015 
1016  } // namespace _private_
1017 } // namespace libcwd
1018 
1019 #else // !LIBCWD_THREAD_SAFE
1020 #define LIBCWD_DISABLE_CANCEL
1021 #define LIBCWD_DISABLE_CANCEL_NO_BRACE
1022 #define LIBCWD_ENABLE_CANCEL_NO_BRACE
1023 #define LIBCWD_ENABLE_CANCEL
1024 #define LIBCWD_DEFER_CANCEL
1025 #define LIBCWD_DEFER_CANCEL_NO_BRACE
1026 #define LIBCWD_RESTORE_CANCEL_NO_BRACE
1027 #define LIBCWD_RESTORE_CANCEL
1028 #define LIBCWD_DEFER_CLEANUP_PUSH(routine, arg)
1029 #define LIBCWD_CLEANUP_POP_RESTORE(execute)
1030 #define LIBCWD_PUSH_DEFER_TRYLOCK_MUTEX(instance, unlock_routine)
1031 #define LIBCWD_DEFER_PUSH_LOCKMUTEX(instance, unlock_routine)
1032 #define LIBCWD_UNLOCKMUTEX_POP_RESTORE(instance)
1033 #define LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED
1034 #endif // LIBCWD_THREAD_SAFE
1035 #endif // LIBCWD_PRIVATE_THREADING_H
1036 
std::ostream & operator<<(std::ostream &os, memblk_types_nt memblk_type)
Allow writing a memblk_types_nt directly to an ostream.
Definition: debugmalloc.cc:675
void core_dump(void)
Dump core of current thread.
Definition: debug.cc:805
Copyright © 2001 - 2004 Carlo Wood.  All rights reserved.