mt_allocator.h

Go to the documentation of this file.
00001 // MT-optimized allocator -*- C++ -*-
00002 
00003 // Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
00004 //
00005 // This file is part of the GNU ISO C++ Library.  This library is free
00006 // software; you can redistribute it and/or modify it under the
00007 // terms of the GNU General Public License as published by the
00008 // Free Software Foundation; either version 2, or (at your option)
00009 // any later version.
00010 
00011 // This library is distributed in the hope that it will be useful,
00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00014 // GNU General Public License for more details.
00015 
00016 // You should have received a copy of the GNU General Public License along
00017 // with this library; see the file COPYING.  If not, write to the Free
00018 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
00019 // USA.
00020 
00021 // As a special exception, you may use this file as part of a free software
00022 // library without restriction.  Specifically, if other files instantiate
00023 // templates or use macros or inline functions from this file, or you compile
00024 // this file and link it with other files to produce an executable, this
00025 // file does not by itself cause the resulting executable to be covered by
00026 // the GNU General Public License.  This exception does not however
00027 // invalidate any other reasons why the executable file might be covered by
00028 // the GNU General Public License.
00029 
00030 /** @file ext/mt_allocator.h
00031  *  This file is a GNU extension to the Standard C++ Library.
00032  */
00033 
00034 #ifndef _MT_ALLOCATOR_H
00035 #define _MT_ALLOCATOR_H 1
00036 
00037 #include <new>
00038 #include <cstdlib>
00039 #include <bits/functexcept.h>
00040 #include <ext/atomicity.h>
00041 #include <bits/stl_move.h>
00042 
00043 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
00044 
00045   using std::size_t;
00046   using std::ptrdiff_t;
00047 
00048   typedef void (*__destroy_handler)(void*);
00049 
00050   /// Base class for pool object.
00051   struct __pool_base
00052   {
00053     // Using short int as type for the binmap implies we are never
00054     // caching blocks larger than 32768 with this allocator.
00055     typedef unsigned short int _Binmap_type;
00056 
00057     // Variables used to configure the behavior of the allocator,
00058     // assigned and explained in detail below.
00059     struct _Tune
00060      {
00061       // Compile time constants for the default _Tune values.
00062       enum { _S_align = 8 };
00063       enum { _S_max_bytes = 128 };
00064       enum { _S_min_bin = 8 };
00065       enum { _S_chunk_size = 4096 - 4 * sizeof(void*) };
00066       enum { _S_max_threads = 4096 };
00067       enum { _S_freelist_headroom = 10 };
00068 
00069       // Alignment needed.
00070       // NB: In any case must be >= sizeof(_Block_record), that
00071       // is 4 on 32 bit machines and 8 on 64 bit machines.
00072       size_t    _M_align;
00073       
00074       // Allocation requests (after round-up to power of 2) below
00075       // this value will be handled by the allocator. A raw new/
00076       // call will be used for requests larger than this value.
00077       // NB: Must be much smaller than _M_chunk_size and in any
00078       // case <= 32768.
00079       size_t    _M_max_bytes; 
00080 
00081       // Size in bytes of the smallest bin.
00082       // NB: Must be a power of 2 and >= _M_align (and of course
00083       // much smaller than _M_max_bytes).
00084       size_t    _M_min_bin;
00085 
00086       // In order to avoid fragmenting and minimize the number of
00087       // new() calls we always request new memory using this
00088       // value. Based on previous discussions on the libstdc++
00089       // mailing list we have chosen the value below.
00090       // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
00091       // NB: At least one order of magnitude > _M_max_bytes. 
00092       size_t    _M_chunk_size;
00093 
00094       // The maximum number of supported threads. For
00095       // single-threaded operation, use one. Maximum values will
00096       // vary depending on details of the underlying system. (For
00097       // instance, Linux 2.4.18 reports 4070 in
00098       // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports
00099       // 65534)
00100       size_t    _M_max_threads;
00101 
00102       // Each time a deallocation occurs in a threaded application
00103       // we make sure that there are no more than
00104       // _M_freelist_headroom % of used memory on the freelist. If
00105       // the number of additional records is more than
00106       // _M_freelist_headroom % of the freelist, we move these
00107       // records back to the global pool.
00108       size_t    _M_freelist_headroom;
00109       
00110       // Set to true forces all allocations to use new().
00111       bool  _M_force_new; 
00112       
00113       explicit
00114       _Tune()
00115       : _M_align(_S_align), _M_max_bytes(_S_max_bytes), _M_min_bin(_S_min_bin),
00116       _M_chunk_size(_S_chunk_size), _M_max_threads(_S_max_threads), 
00117       _M_freelist_headroom(_S_freelist_headroom), 
00118       _M_force_new(std::getenv("GLIBCXX_FORCE_NEW") ? true : false)
00119       { }
00120 
00121       explicit
00122       _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk, 
00123         size_t __maxthreads, size_t __headroom, bool __force) 
00124       : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
00125       _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
00126       _M_freelist_headroom(__headroom), _M_force_new(__force)
00127       { }
00128     };
00129     
00130     struct _Block_address
00131     {
00132       void*             _M_initial;
00133       _Block_address*       _M_next;
00134     };
00135     
00136     const _Tune&
00137     _M_get_options() const
00138     { return _M_options; }
00139 
00140     void
00141     _M_set_options(_Tune __t)
00142     { 
00143       if (!_M_init)
00144     _M_options = __t;
00145     }
00146 
00147     bool
00148     _M_check_threshold(size_t __bytes)
00149     { return __bytes > _M_options._M_max_bytes || _M_options._M_force_new; }
00150 
00151     size_t
00152     _M_get_binmap(size_t __bytes)
00153     { return _M_binmap[__bytes]; }
00154 
00155     size_t
00156     _M_get_align()
00157     { return _M_options._M_align; }
00158 
00159     explicit 
00160     __pool_base() 
00161     : _M_options(_Tune()), _M_binmap(NULL), _M_init(false) { }
00162 
00163     explicit 
00164     __pool_base(const _Tune& __options)
00165     : _M_options(__options), _M_binmap(NULL), _M_init(false) { }
00166 
00167   private:
00168     explicit 
00169     __pool_base(const __pool_base&);
00170 
00171     __pool_base&
00172     operator=(const __pool_base&);
00173 
00174   protected:
00175     // Configuration options.
00176     _Tune               _M_options;
00177     
00178     _Binmap_type*       _M_binmap;
00179 
00180     // Configuration of the pool object via _M_options can happen
00181     // after construction but before initialization. After
00182     // initialization is complete, this variable is set to true.
00183     bool            _M_init;
00184   };
00185 
00186 
00187   /**
00188    *  @brief  Data describing the underlying memory pool, parameterized on
00189    *  threading support.
00190    */
00191   template<bool _Thread>
00192     class __pool;
00193 
00194   /// Specialization for single thread.
00195   template<>
00196     class __pool<false> : public __pool_base
00197     {
00198     public:
00199       union _Block_record
00200       {
00201     // Points to the block_record of the next free block.
00202     _Block_record*          _M_next;
00203       };
00204 
00205       struct _Bin_record
00206       {
00207     // An "array" of pointers to the first free block.
00208     _Block_record**         _M_first;
00209 
00210     // A list of the initial addresses of all allocated blocks.
00211     _Block_address*             _M_address;
00212       };
00213       
00214       void
00215       _M_initialize_once()
00216       {
00217     if (__builtin_expect(_M_init == false, false))
00218       _M_initialize();
00219       }
00220 
00221       void
00222       _M_destroy() throw();
00223 
00224       char* 
00225       _M_reserve_block(size_t __bytes, const size_t __thread_id);
00226     
00227       void
00228       _M_reclaim_block(char* __p, size_t __bytes);
00229     
00230       size_t 
00231       _M_get_thread_id() { return 0; }
00232       
00233       const _Bin_record&
00234       _M_get_bin(size_t __which)
00235       { return _M_bin[__which]; }
00236       
00237       void
00238       _M_adjust_freelist(const _Bin_record&, _Block_record*, size_t)
00239       { }
00240 
00241       explicit __pool() 
00242       : _M_bin(NULL), _M_bin_size(1) { }
00243 
00244       explicit __pool(const __pool_base::_Tune& __tune) 
00245       : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1) { }
00246 
00247     private:
00248       // An "array" of bin_records each of which represents a specific
00249       // power of 2 size. Memory to this "array" is allocated in
00250       // _M_initialize().
00251       _Bin_record*       _M_bin;
00252       
00253       // Actual value calculated in _M_initialize().
00254       size_t                    _M_bin_size;     
00255 
00256       void
00257       _M_initialize();
00258   };
00259  
00260 #ifdef __GTHREADS
00261   /// Specialization for thread enabled, via gthreads.h.
00262   template<>
00263     class __pool<true> : public __pool_base
00264     {
00265     public:
00266       // Each requesting thread is assigned an id ranging from 1 to
00267       // _S_max_threads. Thread id 0 is used as a global memory pool.
00268       // In order to get constant performance on the thread assignment
00269       // routine, we keep a list of free ids. When a thread first
00270       // requests memory we remove the first record in this list and
00271       // stores the address in a __gthread_key. When initializing the
00272       // __gthread_key we specify a destructor. When this destructor
00273       // (i.e. the thread dies) is called, we return the thread id to
00274       // the front of this list.
00275       struct _Thread_record
00276       {
00277     // Points to next free thread id record. NULL if last record in list.
00278     _Thread_record*         _M_next;
00279     
00280     // Thread id ranging from 1 to _S_max_threads.
00281     size_t                          _M_id;
00282       };
00283       
00284       union _Block_record
00285       {
00286     // Points to the block_record of the next free block.
00287     _Block_record*          _M_next;
00288     
00289     // The thread id of the thread which has requested this block.
00290     size_t                          _M_thread_id;
00291       };
00292       
00293       struct _Bin_record
00294       {
00295     // An "array" of pointers to the first free block for each
00296     // thread id. Memory to this "array" is allocated in
00297     // _S_initialize() for _S_max_threads + global pool 0.
00298     _Block_record**         _M_first;
00299     
00300     // A list of the initial addresses of all allocated blocks.
00301     _Block_address*             _M_address;
00302 
00303     // An "array" of counters used to keep track of the amount of
00304     // blocks that are on the freelist/used for each thread id.
00305     // - Note that the second part of the allocated _M_used "array"
00306     //   actually hosts (atomic) counters of reclaimed blocks:  in
00307     //   _M_reserve_block and in _M_reclaim_block those numbers are
00308     //   subtracted from the first ones to obtain the actual size
00309     //   of the "working set" of the given thread.
00310     // - Memory to these "arrays" is allocated in _S_initialize()
00311     //   for _S_max_threads + global pool 0.
00312     size_t*             _M_free;
00313     size_t*                 _M_used;
00314     
00315     // Each bin has its own mutex which is used to ensure data
00316     // integrity while changing "ownership" on a block.  The mutex
00317     // is initialized in _S_initialize().
00318     __gthread_mutex_t*              _M_mutex;
00319       };
00320       
00321       // XXX GLIBCXX_ABI Deprecated
00322       void
00323       _M_initialize(__destroy_handler);
00324 
00325       void
00326       _M_initialize_once()
00327       {
00328     if (__builtin_expect(_M_init == false, false))
00329       _M_initialize();
00330       }
00331 
00332       void
00333       _M_destroy() throw();
00334 
00335       char* 
00336       _M_reserve_block(size_t __bytes, const size_t __thread_id);
00337     
00338       void
00339       _M_reclaim_block(char* __p, size_t __bytes);
00340     
00341       const _Bin_record&
00342       _M_get_bin(size_t __which)
00343       { return _M_bin[__which]; }
00344       
00345       void
00346       _M_adjust_freelist(const _Bin_record& __bin, _Block_record* __block, 
00347              size_t __thread_id)
00348       {
00349     if (__gthread_active_p())
00350       {
00351         __block->_M_thread_id = __thread_id;
00352         --__bin._M_free[__thread_id];
00353         ++__bin._M_used[__thread_id];
00354       }
00355       }
00356 
00357       // XXX GLIBCXX_ABI Deprecated
00358       void 
00359       _M_destroy_thread_key(void*);
00360 
00361       size_t 
00362       _M_get_thread_id();
00363 
00364       explicit __pool() 
00365       : _M_bin(NULL), _M_bin_size(1), _M_thread_freelist(NULL) 
00366       { }
00367 
00368       explicit __pool(const __pool_base::_Tune& __tune) 
00369       : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1), 
00370       _M_thread_freelist(NULL) 
00371       { }
00372 
00373     private:
00374       // An "array" of bin_records each of which represents a specific
00375       // power of 2 size. Memory to this "array" is allocated in
00376       // _M_initialize().
00377       _Bin_record*      _M_bin;
00378 
00379       // Actual value calculated in _M_initialize().
00380       size_t                    _M_bin_size;
00381 
00382       _Thread_record*       _M_thread_freelist;
00383       void*         _M_thread_freelist_initial;
00384 
00385       void
00386       _M_initialize();
00387     };
00388 #endif
00389 
00390   template<template <bool> class _PoolTp, bool _Thread>
00391     struct __common_pool
00392     {
00393       typedef _PoolTp<_Thread>      pool_type;
00394       
00395       static pool_type&
00396       _S_get_pool()
00397       { 
00398     static pool_type _S_pool;
00399     return _S_pool;
00400       }
00401     };
00402 
00403   template<template <bool> class _PoolTp, bool _Thread>
00404     struct __common_pool_base;
00405 
00406   template<template <bool> class _PoolTp>
00407     struct __common_pool_base<_PoolTp, false> 
00408     : public __common_pool<_PoolTp, false>
00409     {
00410       using  __common_pool<_PoolTp, false>::_S_get_pool;
00411 
00412       static void
00413       _S_initialize_once()
00414       {
00415     static bool __init;
00416     if (__builtin_expect(__init == false, false))
00417       {
00418         _S_get_pool()._M_initialize_once(); 
00419         __init = true;
00420       }
00421       }
00422     };
00423 
00424 #ifdef __GTHREADS
00425   template<template <bool> class _PoolTp>
00426     struct __common_pool_base<_PoolTp, true>
00427     : public __common_pool<_PoolTp, true>
00428     {
00429       using  __common_pool<_PoolTp, true>::_S_get_pool;
00430       
00431       static void
00432       _S_initialize() 
00433       { _S_get_pool()._M_initialize_once(); }
00434 
00435       static void
00436       _S_initialize_once()
00437       { 
00438     static bool __init;
00439     if (__builtin_expect(__init == false, false))
00440       {
00441         if (__gthread_active_p())
00442           {
00443         // On some platforms, __gthread_once_t is an aggregate.
00444         static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
00445         __gthread_once(&__once, _S_initialize);
00446           }
00447 
00448         // Double check initialization. May be necessary on some
00449         // systems for proper construction when not compiling with
00450         // thread flags.
00451         _S_get_pool()._M_initialize_once(); 
00452         __init = true;
00453       }
00454       }
00455     };
00456 #endif
00457 
00458   /// Policy for shared __pool objects.
00459   template<template <bool> class _PoolTp, bool _Thread>
00460     struct __common_pool_policy : public __common_pool_base<_PoolTp, _Thread>
00461     {
00462       template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp, 
00463            bool _Thread1 = _Thread>
00464         struct _M_rebind
00465         { typedef __common_pool_policy<_PoolTp1, _Thread1> other; };
00466 
00467       using  __common_pool_base<_PoolTp, _Thread>::_S_get_pool;
00468       using  __common_pool_base<_PoolTp, _Thread>::_S_initialize_once;
00469   };
00470  
00471 
00472   template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
00473     struct __per_type_pool
00474     {
00475       typedef _Tp           value_type;
00476       typedef _PoolTp<_Thread>      pool_type;
00477       
00478       static pool_type&
00479       _S_get_pool()
00480       { 
00481     // Sane defaults for the _PoolTp.
00482     typedef typename pool_type::_Block_record _Block_record;
00483     const static size_t __a = (__alignof__(_Tp) >= sizeof(_Block_record)
00484                    ? __alignof__(_Tp) : sizeof(_Block_record));
00485 
00486     typedef typename __pool_base::_Tune _Tune;
00487     static _Tune _S_tune(__a, sizeof(_Tp) * 64,
00488                  sizeof(_Tp) * 2 >= __a ? sizeof(_Tp) * 2 : __a,
00489                  sizeof(_Tp) * size_t(_Tune::_S_chunk_size),
00490                  _Tune::_S_max_threads,
00491                  _Tune::_S_freelist_headroom,
00492                  std::getenv("GLIBCXX_FORCE_NEW") ? true : false);
00493     static pool_type _S_pool(_S_tune);
00494     return _S_pool;
00495       }
00496     };
00497 
00498   template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
00499     struct __per_type_pool_base;
00500 
00501   template<typename _Tp, template <bool> class _PoolTp>
00502     struct __per_type_pool_base<_Tp, _PoolTp, false> 
00503     : public __per_type_pool<_Tp, _PoolTp, false> 
00504     {
00505       using  __per_type_pool<_Tp, _PoolTp, false>::_S_get_pool;
00506 
00507       static void
00508       _S_initialize_once()
00509       {
00510     static bool __init;
00511     if (__builtin_expect(__init == false, false))
00512       {
00513         _S_get_pool()._M_initialize_once(); 
00514         __init = true;
00515       }
00516       }
00517     };
00518 
00519  #ifdef __GTHREADS
00520  template<typename _Tp, template <bool> class _PoolTp>
00521     struct __per_type_pool_base<_Tp, _PoolTp, true> 
00522     : public __per_type_pool<_Tp, _PoolTp, true> 
00523     {
00524       using  __per_type_pool<_Tp, _PoolTp, true>::_S_get_pool;
00525 
00526       static void
00527       _S_initialize() 
00528       { _S_get_pool()._M_initialize_once(); }
00529 
00530       static void
00531       _S_initialize_once()
00532       { 
00533     static bool __init;
00534     if (__builtin_expect(__init == false, false))
00535       {
00536         if (__gthread_active_p())
00537           {
00538         // On some platforms, __gthread_once_t is an aggregate.
00539         static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
00540         __gthread_once(&__once, _S_initialize);
00541           }
00542 
00543         // Double check initialization. May be necessary on some
00544         // systems for proper construction when not compiling with
00545         // thread flags.
00546         _S_get_pool()._M_initialize_once(); 
00547         __init = true;
00548       }
00549       }
00550     };
00551 #endif
00552 
00553   /// Policy for individual __pool objects.
00554   template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
00555     struct __per_type_pool_policy 
00556     : public __per_type_pool_base<_Tp, _PoolTp, _Thread>
00557     {
00558       template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp, 
00559            bool _Thread1 = _Thread>
00560         struct _M_rebind
00561         { typedef __per_type_pool_policy<_Tp1, _PoolTp1, _Thread1> other; };
00562 
00563       using  __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_get_pool;
00564       using  __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_initialize_once;
00565   };
00566 
00567 
00568   /// Base class for _Tp dependent member functions.
00569   template<typename _Tp>
00570     class __mt_alloc_base 
00571     {
00572     public:
00573       typedef size_t                    size_type;
00574       typedef ptrdiff_t                 difference_type;
00575       typedef _Tp*                      pointer;
00576       typedef const _Tp*                const_pointer;
00577       typedef _Tp&                      reference;
00578       typedef const _Tp&                const_reference;
00579       typedef _Tp                       value_type;
00580 
00581       pointer
00582       address(reference __x) const
00583       { return &__x; }
00584 
00585       const_pointer
00586       address(const_reference __x) const
00587       { return &__x; }
00588 
00589       size_type
00590       max_size() const throw() 
00591       { return size_t(-1) / sizeof(_Tp); }
00592 
00593       // _GLIBCXX_RESOLVE_LIB_DEFECTS
00594       // 402. wrong new expression in [some_] allocator::construct
00595       void 
00596       construct(pointer __p, const _Tp& __val) 
00597       { ::new((void *)__p) _Tp(__val); }
00598 
00599 #ifdef __GXX_EXPERIMENTAL_CXX0X__
00600       template<typename... _Args>
00601         void
00602         construct(pointer __p, _Args&&... __args)
00603     { ::new((void *)__p) _Tp(std::forward<_Args>(__args)...); }
00604 #endif
00605 
00606       void 
00607       destroy(pointer __p) { __p->~_Tp(); }
00608     };
00609 
00610 #ifdef __GTHREADS
00611 #define __thread_default true
00612 #else
00613 #define __thread_default false
00614 #endif
00615 
00616   /**
00617    *  @brief  This is a fixed size (power of 2) allocator which - when
00618    *  compiled with thread support - will maintain one freelist per
00619    *  size per thread plus a "global" one. Steps are taken to limit
00620    *  the per thread freelist sizes (by returning excess back to
00621    *  the "global" list).
00622    *
00623    *  Further details:
00624    *  http://gcc.gnu.org/onlinedocs/libstdc++/manual/bk01pt12ch32.html
00625    */
00626   template<typename _Tp, 
00627        typename _Poolp = __common_pool_policy<__pool, __thread_default> >
00628     class __mt_alloc : public __mt_alloc_base<_Tp>
00629     {
00630     public:
00631       typedef size_t                        size_type;
00632       typedef ptrdiff_t                     difference_type;
00633       typedef _Tp*                          pointer;
00634       typedef const _Tp*                    const_pointer;
00635       typedef _Tp&                          reference;
00636       typedef const _Tp&                    const_reference;
00637       typedef _Tp                           value_type;
00638       typedef _Poolp                __policy_type;
00639       typedef typename _Poolp::pool_type    __pool_type;
00640 
00641       template<typename _Tp1, typename _Poolp1 = _Poolp>
00642         struct rebind
00643         { 
00644       typedef typename _Poolp1::template _M_rebind<_Tp1>::other pol_type;
00645       typedef __mt_alloc<_Tp1, pol_type> other;
00646     };
00647 
00648       __mt_alloc() throw() { }
00649 
00650       __mt_alloc(const __mt_alloc&) throw() { }
00651 
00652       template<typename _Tp1, typename _Poolp1>
00653         __mt_alloc(const __mt_alloc<_Tp1, _Poolp1>&) throw() { }
00654 
00655       ~__mt_alloc() throw() { }
00656 
00657       pointer
00658       allocate(size_type __n, const void* = 0);
00659 
00660       void
00661       deallocate(pointer __p, size_type __n);
00662 
00663       const __pool_base::_Tune
00664       _M_get_options()
00665       { 
00666     // Return a copy, not a reference, for external consumption.
00667     return __policy_type::_S_get_pool()._M_get_options();
00668       }
00669       
00670       void
00671       _M_set_options(__pool_base::_Tune __t)
00672       { __policy_type::_S_get_pool()._M_set_options(__t); }
00673     };
00674 
00675   template<typename _Tp, typename _Poolp>
00676     typename __mt_alloc<_Tp, _Poolp>::pointer
00677     __mt_alloc<_Tp, _Poolp>::
00678     allocate(size_type __n, const void*)
00679     {
00680       if (__builtin_expect(__n > this->max_size(), false))
00681     std::__throw_bad_alloc();
00682 
00683       __policy_type::_S_initialize_once();
00684 
00685       // Requests larger than _M_max_bytes are handled by operator
00686       // new/delete directly.
00687       __pool_type& __pool = __policy_type::_S_get_pool();
00688       const size_t __bytes = __n * sizeof(_Tp);
00689       if (__pool._M_check_threshold(__bytes))
00690     {
00691       void* __ret = ::operator new(__bytes);
00692       return static_cast<_Tp*>(__ret);
00693     }
00694       
00695       // Round up to power of 2 and figure out which bin to use.
00696       const size_t __which = __pool._M_get_binmap(__bytes);
00697       const size_t __thread_id = __pool._M_get_thread_id();
00698       
00699       // Find out if we have blocks on our freelist.  If so, go ahead
00700       // and use them directly without having to lock anything.
00701       char* __c;
00702       typedef typename __pool_type::_Bin_record _Bin_record;
00703       const _Bin_record& __bin = __pool._M_get_bin(__which);
00704       if (__bin._M_first[__thread_id])
00705     {
00706       // Already reserved.
00707       typedef typename __pool_type::_Block_record _Block_record;
00708       _Block_record* __block = __bin._M_first[__thread_id];
00709       __bin._M_first[__thread_id] = __block->_M_next;
00710       
00711       __pool._M_adjust_freelist(__bin, __block, __thread_id);
00712       __c = reinterpret_cast<char*>(__block) + __pool._M_get_align();
00713     }
00714       else
00715     {
00716       // Null, reserve.
00717       __c = __pool._M_reserve_block(__bytes, __thread_id);
00718     }
00719       return static_cast<_Tp*>(static_cast<void*>(__c));
00720     }
00721   
00722   template<typename _Tp, typename _Poolp>
00723     void
00724     __mt_alloc<_Tp, _Poolp>::
00725     deallocate(pointer __p, size_type __n)
00726     {
00727       if (__builtin_expect(__p != 0, true))
00728     {
00729       // Requests larger than _M_max_bytes are handled by
00730       // operators new/delete directly.
00731       __pool_type& __pool = __policy_type::_S_get_pool();
00732       const size_t __bytes = __n * sizeof(_Tp);
00733       if (__pool._M_check_threshold(__bytes))
00734         ::operator delete(__p);
00735       else
00736         __pool._M_reclaim_block(reinterpret_cast<char*>(__p), __bytes);
00737     }
00738     }
00739   
00740   template<typename _Tp, typename _Poolp>
00741     inline bool
00742     operator==(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
00743     { return true; }
00744   
00745   template<typename _Tp, typename _Poolp>
00746     inline bool
00747     operator!=(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
00748     { return false; }
00749 
00750 #undef __thread_default
00751 
00752 _GLIBCXX_END_NAMESPACE
00753 
00754 #endif

Generated on Wed Mar 26 00:43:02 2008 for libstdc++ by  doxygen 1.5.1