Main Page   Reference Manual   Namespace List   Compound List   Namespace Members   Compound Members   File Members  

private_allocator.h
Go to the documentation of this file.
1 // $Header$
2 //
3 // Copyright (C) 2001 - 2004, by
4 //
5 // Carlo Wood, Run on IRC <carlo@alinoe.com>
6 // RSA-1024 0x624ACAD5 1997-01-26 Sign & Encrypt
7 // Fingerprint16 = 32 EC A7 B6 AC DB 65 A6 F6 F6 55 DD 1C DC FF 61
8 //
9 // This file may be distributed under the terms of the Q Public License
10 // version 1.0 as appearing in the file LICENSE.QPL included in the
11 // packaging of this file.
12 //
13 
18 #ifndef LIBCWD_PRIVATE_ALLOCATOR_H
19 #define LIBCWD_PRIVATE_ALLOCATOR_H
20 
21 #ifndef LIBCWD_CONFIG_H
22 #include <libcwd/config.h>
23 #endif
24 
25 #if CWDEBUG_ALLOC // This file is not used when --disable-alloc was used.
26 
27 #ifndef LIBCWD_PRIVATE_MUTEX_INSTANCES_H
29 #endif
30 #ifndef LIBCWD_CORE_DUMP_H
31 #include <libcwd/core_dump.h>
32 #endif
33 #ifndef LIBCW_CSTDDEF
34 #define LIBCW_CSTDDEF
35 #include <cstddef> // Needed for size_t
36 #endif
37 #if __GNUC__ > 3 && LIBCWD_THREAD_SAFE
38 #include <libcwd/private_mutex.h> // mutex_ct
39 #endif
40 #include <memory>
41 #include <limits>
42 
43 //===================================================================================================
44 // Allocators
45 //
46 //
47 
48 /* The allocators used by libcwd have the following characteristics:
49 
50  1) The type T that is being allocated and deallocated.
51  2) Whether or not the allocation is internal, auto-internal or in userspace.
52  3) The pool instance from which the allocation should be drawn.
53  4) Whether or not a lock is needed for this pool.
54  5) Whether or not this allocation belongs to a libcwd
55  critical area and if so, which one.
56 
57  Note that each critical area (if any) uses its own lock and
58  therefore no (additional) lock will be needed for the allocator.
59  Otherwise a lock is always needed (in the multi-threaded case).
60  As of gcc 4.0, the used pool allocator doesn't use locks anymore
61  but separates the pools per thread (except for one common pool),
62  this need is equivalent for us to needing a lock or not: if we
63  don't need a lock then there is also no need to separate per thread.
64 
65  There are five different allocators in use by libcwd:
66 
67 Multi-threaded case:
68 
69  Allocator name | internal | Pool instance | Needs lock
70  ----------------------------------------------------------------------------------------------------
71  memblk_map_allocator | yes | memblk_map_instance | no (memblk_map_instance critical area)
72  object_files_allocator | yes | object_files_instance | no (object_files_instance critical area)
73  internal_allocator | yes | multi_threaded_internal_instance | yes
74  auto_internal_allocator | auto | multi_threaded_internal_instance | yes
75  userspace_allocator | no | userspace_instance | yes
76 
77 Single-threaded case:
78 
79  Allocator name | internal | Pool instance | Needs lock
80  ----------------------------------------------------------------------------------------------------
81  memblk_map_allocator | yes | single_threaded_internal_instance | no
82  object_files_allocator | yes | single_threaded_internal_instance | no
83  internal_allocator | yes | single_threaded_internal_instance | no
84  auto_internal_allocator | auto | single_threaded_internal_instance | no
85  userspace_allocator | no | std::alloc | -
86 
87 */
88 
89 #if __GNUC__ == 3 && __GNUC_MINOR__ == 4
90 #include <ext/pool_allocator.h> // __gnu_cxx::__pool_alloc
91 #endif
92 
93 namespace libcwd {
94  namespace _private_ {
95 
96 // This is a random number in the hope nobody else uses it.
97 int const random_salt = 327665;
98 
99 // Dummy mutex instance numbers, these must be negative.
100 int const multi_threaded_internal_instance = -1;
101 int const single_threaded_internal_instance = -2;
102 int const userspace_instance = -3;
103 
104 // Definition of CharPoolAlloc.
105 #if __GNUC__ == 3 && __GNUC_MINOR__ < 4
106 template<bool needs_lock, int pool_instance>
107  struct CharPoolAlloc : public std::__default_alloc_template<needs_lock, random_salt + pool_instance> {
108  typedef char* pointer;
109  };
110 #elif __GNUC__ == 3 && __GNUC_MINOR__ == 4 && __GNUC_PATCHLEVEL__ == 0
111 template<bool needs_lock, int pool_instance>
112  struct CharPoolAlloc : public __gnu_cxx::__pool_alloc<needs_lock, random_salt + pool_instance> {
113  typedef char* pointer;
114  };
115 #elif __GNUC__ == 3
116 // gcc 3.4.1 and higher.
117 template<int pool_instance>
118  struct char_wrapper {
119  char c;
120  };
121 // gcc 3.4.1 and 3.4.2 always use a lock, in the threaded case.
122 template<bool needs_lock, int pool_instance>
123  class CharPoolAlloc : public __gnu_cxx::__pool_alloc<char_wrapper<pool_instance> > { };
124 #else // gcc 4.0 and higher.
125 // Sometimes reusing code isn't possibly anymore (die gcc developers die).
126 
127 static size_t const maximum_size_exp = 10; // The log2 of the maximum size that is
128  // allocated in a pool. Larger sizes are
129  // allocated directly with operator new.
130 static size_t const maximum_size = (1U << maximum_size_exp); // 1024 bytes.
131 
132 struct Node {
133  Node* M_next;
134  Node* M_prev;
135 
136  Node* next(void) const { return M_next; }
137  Node* prev(void) const { return M_prev; }
138 
139  void unlink(void)
140  {
141  M_prev->M_next = M_next;
142  M_next->M_prev = M_prev;
143  }
144 };
145 
146 // The log2 of minimum_size. 2^(minimum_size_exp - 1) < sizeof(Node) <= 2^minimum_size_exp.
147 template <unsigned int N> struct log2 { enum { result = 1 + log2<N/2>::result }; };
148 template<> struct log2<0> { enum { result = -1 }; };
149 static size_t const minimum_size_exp = log2<sizeof(Node) - 1>::result + 1; // Calculate rounded up log2 value.
150 
151 static size_t const minimum_size = (1U << minimum_size_exp); // The minimum chunk size, must be a power of 2.
152 // The number of different buckets (with respective chunk sizes: 8, 16, 32, 64, 128, 256, 512 and 1024).
153 static int const bucket_sizes = maximum_size_exp - minimum_size_exp + 1;
154 
155 struct List : public Node {
156  bool empty(void) const { return M_next == this; }
157  void insert(Node* node)
158  {
159  node->M_prev = this;
160  node->M_next = M_next;
161  M_next->M_prev = node;
162  M_next = node;
163  }
164  void insert_back(Node* node)
165  {
166  node->M_prev = M_prev;
167  node->M_next = this;
168  M_prev->M_next = node;
169  M_prev = node;
170  }
171 private:
172  using Node::next;
173  using Node::prev;
174 };
175 
176 struct ChunkNode : public Node {
177  // This is commented out because it's 'virtual' (it can be zero size too).
178  // char M_padding[size_of(ChunkNode) - sizeof(Node)];
179 
180  ChunkNode* next(void) const { return static_cast<ChunkNode*>(M_next); }
181  ChunkNode* prev(void) const { return static_cast<ChunkNode*>(M_prev); }
182 };
183 
184 struct ChunkList : public List {
185  unsigned int M_used_count; // Number of _used_ chunks (thus, that are allocated and not in the list anymore).
186  ChunkNode* begin(void) const { return static_cast<ChunkNode*>(M_next); }
187  Node const* end(void) const { return this; }
188 };
189 
190 struct BlockNode : public Node {
191  ChunkList M_chunks;
192  ChunkNode M_data[1]; // One or more Chunks.
193 
194  BlockNode* next(void) const { return static_cast<BlockNode*>(M_next); }
195  BlockNode* prev(void) const { return static_cast<BlockNode*>(M_prev); }
196 };
197 
198 struct BlockList : public List {
199  unsigned int* M_count_ptr; // Pointer to number of blocks (thus, that are in the (full+notfull) list).
200  unsigned short M_internal; // Whether or not this block list contains internal blocks or not.
201 
202  BlockNode* begin(void) const { return static_cast<BlockNode*>(M_next); }
203  Node const* end(void) const { return this; }
204 
205  void initialize(unsigned int* count_ptr, unsigned short internal);
206  void uninitialize(void);
207  ~BlockList() { uninitialize(); }
208 #if CWDEBUG_DEBUG
209  void consistency_check(void);
210 #endif
211 };
212 
213 struct TSD_st;
214 
215 struct FreeList {
216 #if LIBCWD_THREAD_SAFE
217  pthread_mutex_t M_mutex;
218  static pthread_mutex_t S_mutex;
219 #endif
220  bool M_initialized;
221  unsigned int M_count[bucket_sizes]; // Number of blocks (in the full+notfull list).
222  unsigned short M_keep[bucket_sizes]; // Number of blocks that shouldn't be freed.
223  BlockList M_list_notfull[bucket_sizes];
224  BlockList M_list_full[bucket_sizes];
225 
226 #if LIBCWD_THREAD_SAFE
227  void initialize(TSD_st& __libcwd_tsd);
228 #else
229  void initialize(void);
230 #endif
231  void uninitialize(void);
232  ~FreeList() { uninitialize(); }
233  char* allocate(int power, size_t size);
234  void deallocate(char* p, int power, size_t size);
235 #if CWDEBUG_DEBUG
236  void consistency_check(void);
237 #endif
238 };
239 
240 template<bool needs_lock, int pool_instance>
241  class CharPoolAlloc {
242  private:
243  static FreeList S_freelist;
244 
245  public:
246  // Type definitions.
247  typedef char value_type;
248  typedef size_t size_type;
249  typedef ptrdiff_t difference_type;
250  typedef char* pointer;
251  typedef char const* const_pointer;
252  typedef char& reference;
253  typedef char const& const_reference;
254 
255  // Allocate but don't initialize num elements of type T.
256 #if LIBCWD_THREAD_SAFE
257  pointer allocate(size_type num, TSD_st&);
258 #else
259  pointer allocate(size_type num);
260 #endif
261 
262  // Deallocate storage p of deleted elements.
263 #if LIBCWD_THREAD_SAFE
264  void deallocate(pointer p, size_type num, TSD_st&);
265 #else
266  void deallocate(pointer p, size_type num);
267 #endif
268 
269  template <bool needs_lock1, int pool_instance1,
270  bool needs_lock2, int pool_instance2>
271  friend inline
272  bool operator==(CharPoolAlloc<needs_lock1, pool_instance1> const&,
273  CharPoolAlloc<needs_lock2, pool_instance2> const&);
274  template <bool needs_lock1, int pool_instance1,
275  bool needs_lock2, int pool_instance2>
276  friend inline
277  bool operator!=(CharPoolAlloc<needs_lock1, pool_instance1> const&,
278  CharPoolAlloc<needs_lock2, pool_instance2> const&);
279 
280  size_type max_size(void) const { return std::numeric_limits<size_type>::max(); }
281  };
282 #endif // gcc 4.0 and higher.
283 
284 // Convenience macros.
285 #if CWDEBUG_DEBUG
286 #define LIBCWD_COMMA_INT_INSTANCE , int instance
287 #define LIBCWD_COMMA_INSTANCE , instance
288 #define LIBCWD_DEBUGDEBUG_COMMA(x) , x
289 #else
290 #define LIBCWD_COMMA_INT_INSTANCE
291 #define LIBCWD_COMMA_INSTANCE
292 #define LIBCWD_DEBUGDEBUG_COMMA(x)
293 #endif
294 
295 enum pool_nt {
296  userspace_pool,
297  internal_pool,
298  auto_internal_pool
299 };
300 
301 // This wrapper adds sanity checks to the allocator use (like testing if
302 // 'internal' allocators are indeed only used while in internal mode, and
303 // critical area allocators are only used when the related lock is indeed
304 // locked etc.
305 template<typename T, class CharAlloc, pool_nt internal LIBCWD_COMMA_INT_INSTANCE>
306  class allocator_adaptor {
307  private:
308  // The underlying allocator.
309  CharAlloc M_char_allocator;
310 
311  public:
312  // Type definitions.
313  typedef T value_type;
314  typedef size_t size_type;
315  typedef ptrdiff_t difference_type;
316  typedef T* pointer;
317  typedef T const* const_pointer;
318  typedef T& reference;
319  typedef T const& const_reference;
320 
321  // Rebind allocator to type U.
322  template <class U>
323  struct rebind {
324  typedef allocator_adaptor<U, CharAlloc, internal LIBCWD_COMMA_INSTANCE> other;
325  };
326 
327  // Return address of values.
328  pointer address(reference value) const { return &value; }
329  const_pointer address(const_reference value) const { return &value; }
330 
331  // Constructors and destructor.
332  allocator_adaptor(void) throw() { }
333  allocator_adaptor(allocator_adaptor const& a) : M_char_allocator(a.M_char_allocator) { }
334  template<class U>
335  allocator_adaptor(allocator_adaptor<U, CharAlloc, internal LIBCWD_COMMA_INSTANCE> const& a) :
336  M_char_allocator(a.M_char_allocator) { }
337  template<class T2, class CharAlloc2, pool_nt internal2 LIBCWD_DEBUGDEBUG_COMMA(int instance2)>
338  friend class allocator_adaptor;
339  ~allocator_adaptor() throw() { }
340 
341  // Return maximum number of elements that can be allocated.
342  size_type max_size(void) const { return M_char_allocator.max_size() / sizeof(T); }
343 
344  // Allocate but don't initialize num elements of type T.
345  pointer allocate(size_type num);
346  pointer allocate(size_type num, void const* hint);
347 
348  // Deallocate storage p of deleted elements.
349  void deallocate(pointer p, size_type num);
350 
351  // Initialize elements of allocated storage p with value value.
352  void construct(pointer p, T const& value) { new ((void*)p) T(value); }
353 
354  // Destroy elements of initialized storage p.
355  void destroy(pointer p) { p->~T(); }
356 
357 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGM
358  private:
359  static void sanity_check(void);
360 #endif
361 
362  template <class T1, class CharAlloc1, pool_nt internal1 LIBCWD_DEBUGDEBUG_COMMA(int inst1),
363  class T2, class CharAlloc2, pool_nt internal2 LIBCWD_DEBUGDEBUG_COMMA(int inst2)>
364  friend inline
365  bool operator==(allocator_adaptor<T1, CharAlloc1, internal1 LIBCWD_DEBUGDEBUG_COMMA(inst1)> const& a1,
366  allocator_adaptor<T2, CharAlloc2, internal2 LIBCWD_DEBUGDEBUG_COMMA(inst2)> const& a2);
367  template <class T1, class CharAlloc1, pool_nt internal1 LIBCWD_DEBUGDEBUG_COMMA(int inst1),
368  class T2, class CharAlloc2, pool_nt internal2 LIBCWD_DEBUGDEBUG_COMMA(int inst2)>
369  friend inline
370  bool operator!=(allocator_adaptor<T1, CharAlloc1, internal1 LIBCWD_DEBUGDEBUG_COMMA(inst1)> const& a1,
371  allocator_adaptor<T2, CharAlloc2, internal2 LIBCWD_DEBUGDEBUG_COMMA(inst2)> const& a2);
372  };
373 
374 #if LIBCWD_THREAD_SAFE
375 // We normally would be able to use the default allocator, but... libcwd functions can
376 // at all times be called from malloc which might be called from std::allocator with its
377 // lock set. Therefore we also use a separate allocator pool for the userspace, in the
378 // threaded case.
379 #define LIBCWD_CHARALLOCATOR_USERSPACE(instance) ::libcwd::_private_:: \
380  allocator_adaptor<char, \
381  CharPoolAlloc<true, userspace_instance>, \
382  userspace_pool \
383  LIBCWD_DEBUGDEBUG_COMMA(::libcwd::_private_::instance)>
384 #endif
385 
386 // Both, multi_threaded_internal_instance and memblk_map_instance use also locks for
387 // the allocator pool itself because they (the memory pools) are being shared between
388 // threads from within critical areas with different mutexes.
389 // Other instances (> 0) are supposed to only use the allocator instance from within
390 // the critical area of the corresponding mutex_tct<instance>, and thus only by one
391 // thread at a time.
392 #if LIBCWD_THREAD_SAFE
393 #define LIBCWD_ALLOCATOR_POOL_NEEDS_LOCK(instance) \
394  ::libcwd::_private_::instance == \
395  ::libcwd::_private_::multi_threaded_internal_instance || \
396  ::libcwd::_private_::instance == \
397  ::libcwd::_private_::memblk_map_instance
398 #else // !LIBCWD_THREAD_SAFE
399 #define LIBCWD_ALLOCATOR_POOL_NEEDS_LOCK(instance) false
400 #endif // !LIBCWD_THREAD_SAFE
401 
402 #define LIBCWD_CHARALLOCATOR_INTERNAL(instance) ::libcwd::_private_:: \
403  allocator_adaptor<char, \
404  CharPoolAlloc<LIBCWD_ALLOCATOR_POOL_NEEDS_LOCK(instance), \
405  ::libcwd::_private_::instance >, \
406  internal_pool \
407  LIBCWD_DEBUGDEBUG_COMMA(::libcwd::_private_::instance)>
408 
409 #define LIBCWD_CHARALLOCATOR_AUTO_INTERNAL(instance) ::libcwd::_private_:: \
410  allocator_adaptor<char, \
411  CharPoolAlloc<LIBCWD_ALLOCATOR_POOL_NEEDS_LOCK(instance), \
412  ::libcwd::_private_::instance >, \
413  auto_internal_pool \
414  LIBCWD_DEBUGDEBUG_COMMA(::libcwd::_private_::instance)>
415 
416 #if LIBCWD_THREAD_SAFE
417 // Our allocator adaptor for the Non-Shared internal cases: Single Threaded
418 // (inst = single_threaded_internal_instance) or inside the critical area of the corresponding
419 // libcwd mutex instance.
420 #define LIBCWD_NS_INTERNAL_ALLOCATOR(instance) LIBCWD_CHARALLOCATOR_INTERNAL(instance)
421 #else // !LIBCWD_THREAD_SAFE
422 // In a single threaded application, the Non-Shared case is equivalent to the Single Threaded case.
423 #define LIBCWD_NS_INTERNAL_ALLOCATOR(instance) LIBCWD_CHARALLOCATOR_INTERNAL(single_threaded_internal_instance)
424 #endif // !LIBCWD_THREAD_SAFE
425 
426 #if LIBCWD_THREAD_SAFE
427 // LIBCWD_MT_*_ALLOCATOR uses a different allocator than the normal default allocator of libstdc++
428 // in the case of multi-threading because it can be that the allocator mutex is locked, which would
429 // result in a deadlock if we try to use it again here.
430 #define LIBCWD_MT_USERSPACE_ALLOCATOR LIBCWD_CHARALLOCATOR_USERSPACE(userspace_instance)
431 #define LIBCWD_MT_INTERNAL_ALLOCATOR LIBCWD_CHARALLOCATOR_INTERNAL(multi_threaded_internal_instance)
432 #define LIBCWD_MT_AUTO_INTERNAL_ALLOCATOR LIBCWD_CHARALLOCATOR_AUTO_INTERNAL(multi_threaded_internal_instance)
433 #else // !LIBCWD_THREAD_SAFE
434 // LIBCWD_MT_*_ALLOCATOR uses the normal default allocator of libstdc++-v3 (alloc) using locking
435 // itself. The userspace allocator shares it memory pool with everything else (that uses this
436 // allocator, which is most of the (userspace) STL).
437 #define LIBCWD_MT_USERSPACE_ALLOCATOR std::allocator<char>
438 #define LIBCWD_MT_INTERNAL_ALLOCATOR LIBCWD_CHARALLOCATOR_INTERNAL(single_threaded_internal_instance)
439 #define LIBCWD_MT_AUTO_INTERNAL_ALLOCATOR LIBCWD_CHARALLOCATOR_AUTO_INTERNAL(single_threaded_internal_instance)
440 #endif // !LIBCWD_THREAD_SAFE
441 
442 //---------------------------------------------------------------------------------------------------
443 // Internal allocator types.
444 
445 // This allocator is used in critical areas that are already locked by memblk_map_instance.
446 typedef LIBCWD_NS_INTERNAL_ALLOCATOR(memblk_map_instance) memblk_map_allocator;
447 
448 // This allocator is used in critical areas that are already locked by object_files_instance.
449 typedef LIBCWD_NS_INTERNAL_ALLOCATOR(object_files_instance) object_files_allocator;
450 
451 // This general allocator can be used outside libcwd-specific critical areas,
452 // but inside a set_alloc_checking_off() .. set_alloc_checking_on() pair.
453 typedef LIBCWD_MT_INTERNAL_ALLOCATOR internal_allocator;
454 
455 // This general allocator can be used outside libcwd-specific critical areas,
456 // in "user space" but that will cause internal memory to be allocated.
457 typedef LIBCWD_MT_AUTO_INTERNAL_ALLOCATOR auto_internal_allocator;
458 
459 //---------------------------------------------------------------------------------------------------
460 // User space allocator type.
461 
462 // This general allocator can be used outside libcwd-specific critical areas.
463 typedef LIBCWD_MT_USERSPACE_ALLOCATOR userspace_allocator;
464 
465  } // namespace _private_
466 } // namespace libcwd
467 
468 #endif // CWDEBUG_ALLOC
469 #endif // LIBCWD_PRIVATE_ALLOCATOR_H
470 
Copyright © 2001 - 2004 Carlo Wood.  All rights reserved.