My Project
static_mem_pool.h
Go to the documentation of this file.
1 // -*- Mode: C++; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
2 // vim:tabstop=4:shiftwidth=4:expandtab:
3 
4 /*
5  * Copyright (C) 2004-2007 Wu Yongwei <adah at users dot sourceforge dot net>
6  *
7  * This software is provided 'as-is', without any express or implied
8  * warranty. In no event will the authors be held liable for any
9  * damages arising from the use of this software.
10  *
11  * Permission is granted to anyone to use this software for any purpose,
12  * including commercial applications, and to alter it and redistribute
13  * it freely, subject to the following restrictions:
14  *
15  * 1. The origin of this software must not be misrepresented; you must
16  * not claim that you wrote the original software. If you use this
17  * software in a product, an acknowledgement in the product
18  * documentation would be appreciated but is not required.
19  * 2. Altered source versions must be plainly marked as such, and must
20  * not be misrepresented as being the original software.
21  * 3. This notice may not be removed or altered from any source
22  * distribution.
23  *
24  * This file is part of Stones of Nvwa:
25  * http://sourceforge.net/projects/nvwa
26  *
27  */
28 
39 #ifndef _STATIC_MEM_POOL_H
40 #define _STATIC_MEM_POOL_H
41 
42 #include <new>
43 #include <stdexcept>
44 #include <string>
45 #include <vector>
46 #include <assert.h>
47 #include <stddef.h>
48 #include "class_level_lock.h"
49 #include "mem_pool_base.h"
50 
51 /* Defines Work-around for Microsoft Visual C++ 6.0 and Borland C++ 5.5.1 */
52 # if (defined(_MSC_VER) && _MSC_VER < 1300) \
53  || (defined(__BORLANDC__) && __BORLANDC__ < 0x600)
54 # define __PRIVATE public
55 # else
56 # define __PRIVATE private
57 # endif
58 
59 /* Defines the macro for debugging output */
60 # ifdef _STATIC_MEM_POOL_DEBUG
61 # include <iostream>
62 # define _STATIC_MEM_POOL_TRACE(_Lck, _Msg) \
63  { \
64  if (_Lck) { \
65  static_mem_pool_set::lock __guard; \
66  std::cerr << "static_mem_pool: " << _Msg << std::endl; \
67  } else { \
68  std::cerr << "static_mem_pool: " << _Msg << std::endl; \
69  } \
70  }
71 # else
72 # define _STATIC_MEM_POOL_TRACE(_Lck, _Msg) \
73  ((void)0)
74 # endif
75 
81 {
82 public:
83  typedef class_level_lock<static_mem_pool_set>::lock lock;
84  static static_mem_pool_set& instance();
85  void recycle();
86  void add(mem_pool_base* __memory_pool_p);
87 
88 __PRIVATE:
90 private:
92 
93  typedef std::vector<mem_pool_base*> container_type;
94  container_type _M_memory_pool_set;
95 
96  /* Forbid their use */
98  const static_mem_pool_set& operator=(const static_mem_pool_set&);
99 };
100 
111 template <size_t _Sz, int _Gid = -1>
112 class static_mem_pool : public mem_pool_base
113 {
114  typedef typename class_level_lock<static_mem_pool<_Sz, _Gid>, (_Gid < 0)>
115  ::lock lock;
116 public:
126  {
127  lock __guard;
128  if (!_S_instance_p)
129  {
130  _S_instance_p = _S_create_instance();
131  }
132  return *_S_instance_p;
133  }
142  {
143  assert(_S_instance_p != NULL);
144  return *_S_instance_p;
145  }
154  void* allocate()
155  {
156  {
157  lock __guard;
158  if (_S_memory_block_p)
159  {
160  void* __result = _S_memory_block_p;
161  _S_memory_block_p = _S_memory_block_p->_M_next;
162  return __result;
163  }
164  }
165  return _S_alloc_sys(_S_align(_Sz));
166  }
172  void deallocate(void* __ptr)
173  {
174  assert(__ptr != NULL);
175  lock __guard;
176  _Block_list* __block = reinterpret_cast<_Block_list*>(__ptr);
177  __block->_M_next = _S_memory_block_p;
178  _S_memory_block_p = __block;
179  }
180  virtual void recycle();
181 
182 private:
184  {
185  _STATIC_MEM_POOL_TRACE(true, "static_mem_pool<" << _Sz << ','
186  << _Gid << "> is created");
187  }
188  ~static_mem_pool()
189  {
190 # ifdef _DEBUG
191  // Empty the pool to avoid false memory leakage alarms. This is
192  // generally not necessary for release binaries.
193  _Block_list* __block = _S_memory_block_p;
194  while (__block)
195  {
196  _Block_list* __next = __block->_M_next;
197  dealloc_sys(__block);
198  __block = __next;
199  }
200  _S_memory_block_p = NULL;
201 # endif
202  _S_instance_p = NULL;
203  _S_destroyed = true;
204  _STATIC_MEM_POOL_TRACE(false, "static_mem_pool<" << _Sz << ','
205  << _Gid << "> is destroyed");
206  }
207  static size_t _S_align(size_t __size)
208  {
209  return __size >= sizeof(_Block_list) ? __size : sizeof(_Block_list);
210  }
211  static void* _S_alloc_sys(size_t __size);
212  static static_mem_pool* _S_create_instance();
213 
214  static bool _S_destroyed;
215  static static_mem_pool* _S_instance_p;
216  static mem_pool_base::_Block_list* _S_memory_block_p;
217 
218  /* Forbid their use */
220  const static_mem_pool& operator=(const static_mem_pool&);
221 };
222 
223 template <size_t _Sz, int _Gid> bool
225 template <size_t _Sz, int _Gid> mem_pool_base::_Block_list*
227 template <size_t _Sz, int _Gid> static_mem_pool<_Sz, _Gid>*
228  static_mem_pool<_Sz, _Gid>::_S_instance_p = _S_create_instance();
229 
235 template <size_t _Sz, int _Gid>
237 {
238  // Only here the global lock in static_mem_pool_set is obtained
239  // before the pool-specific lock. However, no race conditions are
240  // found so far.
241  lock __guard;
242  _Block_list* __block = _S_memory_block_p;
243  while (__block)
244  {
245  if (_Block_list* __temp = __block->_M_next)
246  {
247  _Block_list* __next = __temp->_M_next;
248  __block->_M_next = __next;
249  dealloc_sys(__temp);
250  __block = __next;
251  }
252  else
253  {
254  break;
255  }
256  }
257  _STATIC_MEM_POOL_TRACE(false, "static_mem_pool<" << _Sz << ','
258  << _Gid << "> is recycled");
259 }
260 
261 template <size_t _Sz, int _Gid>
262 void* static_mem_pool<_Sz, _Gid>::_S_alloc_sys(size_t __size)
263 {
264  static_mem_pool_set::lock __guard;
265  void* __result = mem_pool_base::alloc_sys(__size);
266  if (!__result)
267  {
268  static_mem_pool_set::instance().recycle();
269  __result = mem_pool_base::alloc_sys(__size);
270  }
271  return __result;
272 }
273 
274 template <size_t _Sz, int _Gid>
276 {
277  if (_S_destroyed)
278  throw std::runtime_error("dead reference detected");
279 
280  static_mem_pool_set::instance(); // Force its creation
281  static_mem_pool* __inst_p = new static_mem_pool();
282  try
283  {
284  static_mem_pool_set::instance().add(__inst_p);
285  }
286  catch (...)
287  {
288  _STATIC_MEM_POOL_TRACE(true,
289  "Exception occurs in static_mem_pool_set::add");
290  // The strange cast below is to work around a bug in GCC 2.95.3
291  delete static_cast<mem_pool_base*>(__inst_p);
292  throw;
293  }
294  return __inst_p;
295 }
296 
297 #define DECLARE_STATIC_MEM_POOL(_Cls) \
298 public: \
299  static void* operator new(size_t __size) \
300  { \
301  assert(__size == sizeof(_Cls)); \
302  void* __ptr; \
303  __ptr = static_mem_pool<sizeof(_Cls)>:: \
304  instance_known().allocate(); \
305  if (__ptr == NULL) \
306  throw std::bad_alloc(); \
307  return __ptr; \
308  } \
309  static void operator delete(void* __ptr) \
310  { \
311  if (__ptr) \
312  static_mem_pool<sizeof(_Cls)>:: \
313  instance_known().deallocate(__ptr); \
314  }
315 
316 #define DECLARE_STATIC_MEM_POOL__NOTHROW(_Cls) \
317 public: \
318  static void* operator new(size_t __size) throw() \
319  { \
320  assert(__size == sizeof(_Cls)); \
321  return static_mem_pool<sizeof(_Cls)>:: \
322  instance_known().allocate(); \
323  } \
324  static void operator delete(void* __ptr) \
325  { \
326  if (__ptr) \
327  static_mem_pool<sizeof(_Cls)>:: \
328  instance_known().deallocate(__ptr); \
329  }
330 
331 #define DECLARE_STATIC_MEM_POOL_GROUPED(_Cls, _Gid) \
332 public: \
333  static void* operator new(size_t __size) \
334  { \
335  assert(__size == sizeof(_Cls)); \
336  void* __ptr; \
337  __ptr = static_mem_pool<sizeof(_Cls), (_Gid)>:: \
338  instance_known().allocate(); \
339  if (__ptr == NULL) \
340  throw std::bad_alloc(); \
341  return __ptr; \
342  } \
343  static void operator delete(void* __ptr) \
344  { \
345  if (__ptr) \
346  static_mem_pool<sizeof(_Cls), (_Gid)>:: \
347  instance_known().deallocate(__ptr); \
348  }
349 
350 #define DECLARE_STATIC_MEM_POOL_GROUPED__NOTHROW(_Cls, _Gid) \
351 public: \
352  static void* operator new(size_t __size) throw() \
353  { \
354  assert(__size == sizeof(_Cls)); \
355  return static_mem_pool<sizeof(_Cls), (_Gid)>:: \
356  instance_known().allocate(); \
357  } \
358  static void operator delete(void* __ptr) \
359  { \
360  if (__ptr) \
361  static_mem_pool<sizeof(_Cls), (_Gid)>:: \
362  instance_known().deallocate(__ptr); \
363  }
364 
365 // OBSOLETE: no longer needed
366 #define PREPARE_STATIC_MEM_POOL(_Cls) \
367  std::cerr << "PREPARE_STATIC_MEM_POOL is obsolete!\n";
368 
369 // OBSOLETE: no longer needed
370 #define PREPARE_STATIC_MEM_POOL_GROUPED(_Cls, _Gid) \
371  std::cerr << "PREPARE_STATIC_MEM_POOL_GROUPED is obsolete!\n";
372 
373 #undef __PRIVATE
374 
375 #endif // _STATIC_MEM_POOL_H
Definition: static_mem_pool.h:80
void deallocate(void *__ptr)
Definition: static_mem_pool.h:172
Definition: static_mem_pool.h:112
static static_mem_pool & instance_known()
Definition: static_mem_pool.h:141
virtual void recycle()
Definition: static_mem_pool.h:236
static static_mem_pool & instance()
Definition: static_mem_pool.h:125
void * allocate()
Definition: static_mem_pool.h:154