Planet
navi homePPSaboutscreenshotsdownloaddevelopmentforum

source: downloads/boost_1_34_1/boost/pool/pool.hpp @ 47

Last change on this file since 47 was 29, checked in by landauf, 16 years ago

updated boost from 1_33_1 to 1_34_1

File size: 17.1 KB
Line 
1// Copyright (C) 2000, 2001 Stephen Cleary
2//
3// Distributed under the Boost Software License, Version 1.0. (See
4// accompanying file LICENSE_1_0.txt or copy at
5// http://www.boost.org/LICENSE_1_0.txt)
6//
7// See http://www.boost.org for updates, documentation, and revision history.
8
9#ifndef BOOST_POOL_HPP
10#define BOOST_POOL_HPP
11
12#include <boost/config.hpp>  // for workarounds
13
14// std::less, std::less_equal, std::greater
15#include <functional>
16// new[], delete[], std::nothrow
17#include <new>
18// std::size_t, std::ptrdiff_t
19#include <cstddef>
20// std::malloc, std::free
21#include <cstdlib>
22// std::invalid_argument
23#include <exception>
24// std::max
25#include <algorithm>
26
27#include <boost/pool/poolfwd.hpp>
28
29// boost::details::pool::ct_lcm
30#include <boost/pool/detail/ct_gcd_lcm.hpp>
31// boost::details::pool::lcm
32#include <boost/pool/detail/gcd_lcm.hpp>
33// boost::simple_segregated_storage
34#include <boost/pool/simple_segregated_storage.hpp>
35
36#ifdef BOOST_NO_STDC_NAMESPACE
37 namespace std { using ::malloc; using ::free; }
38#endif
39
40// There are a few places in this file where the expression "this->m" is used.
41// This expression is used to force instantiation-time name lookup, which I am
42//   informed is required for strict Standard compliance.  It's only necessary
43//   if "m" is a member of a base class that is dependent on a template
44//   parameter.
45// Thanks to Jens Maurer for pointing this out!
46
47namespace boost {
48
49struct default_user_allocator_new_delete
50{
51  typedef std::size_t size_type;
52  typedef std::ptrdiff_t difference_type;
53
54  static char * malloc(const size_type bytes)
55  { return new (std::nothrow) char[bytes]; }
56  static void free(char * const block)
57  { delete [] block; }
58};
59
60struct default_user_allocator_malloc_free
61{
62  typedef std::size_t size_type;
63  typedef std::ptrdiff_t difference_type;
64
65  static char * malloc(const size_type bytes)
66  { return reinterpret_cast<char *>(std::malloc(bytes)); }
67  static void free(char * const block)
68  { std::free(block); }
69};
70
71namespace details {
72
73// PODptr is a class that pretends to be a "pointer" to different class types
74//  that don't really exist.  It provides member functions to access the "data"
75//  of the "object" it points to.  Since these "class" types are of variable
76//  size, and contains some information at the *end* of its memory (for
77//  alignment reasons), PODptr must contain the size of this "class" as well as
78//  the pointer to this "object".
79template <typename SizeType>
80class PODptr
81{
82  public:
83    typedef SizeType size_type;
84
85  private:
86    char * ptr;
87    size_type sz;
88
89    char * ptr_next_size() const
90    { return (ptr + sz - sizeof(size_type)); }
91    char * ptr_next_ptr() const
92    {
93      return (ptr_next_size() -
94          pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value);
95    }
96
97  public:
98    PODptr(char * const nptr, const size_type nsize)
99    :ptr(nptr), sz(nsize) { }
100    PODptr()
101    :ptr(0), sz(0) { }
102
103    bool valid() const { return (begin() != 0); }
104    void invalidate() { begin() = 0; }
105    char * & begin() { return ptr; }
106    char * begin() const { return ptr; }
107    char * end() const { return ptr_next_ptr(); }
108    size_type total_size() const { return sz; }
109    size_type element_size() const
110    {
111      return (sz - sizeof(size_type) -
112          pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value);
113    }
114
115    size_type & next_size() const
116    { return *(reinterpret_cast<size_type *>(ptr_next_size())); }
117    char * & next_ptr() const
118    { return *(reinterpret_cast<char **>(ptr_next_ptr())); }
119
120    PODptr next() const
121    { return PODptr<size_type>(next_ptr(), next_size()); }
122    void next(const PODptr & arg) const
123    {
124      next_ptr() = arg.begin();
125      next_size() = arg.total_size();
126    }
127};
128
129} // namespace details
130
131template <typename UserAllocator>
132class pool: protected simple_segregated_storage<
133    typename UserAllocator::size_type>
134{
135  public:
136    typedef UserAllocator user_allocator;
137    typedef typename UserAllocator::size_type size_type;
138    typedef typename UserAllocator::difference_type difference_type;
139
140  private:
141    BOOST_STATIC_CONSTANT(unsigned, min_alloc_size =
142        (::boost::details::pool::ct_lcm<sizeof(void *), sizeof(size_type)>::value) );
143
144    // Returns 0 if out-of-memory
145    // Called if malloc/ordered_malloc needs to resize the free list
146    void * malloc_need_resize();
147    void * ordered_malloc_need_resize();
148
149  protected:
150    details::PODptr<size_type> list;
151
152    simple_segregated_storage<size_type> & store() { return *this; }
153    const simple_segregated_storage<size_type> & store() const { return *this; }
154    const size_type requested_size;
155    size_type next_size;
156
157    // finds which POD in the list 'chunk' was allocated from
158    details::PODptr<size_type> find_POD(void * const chunk) const;
159
160    // is_from() tests a chunk to determine if it belongs in a block
161    static bool is_from(void * const chunk, char * const i,
162        const size_type sizeof_i)
163    {
164      // We use std::less_equal and std::less to test 'chunk'
165      //  against the array bounds because standard operators
166      //  may return unspecified results.
167      // This is to ensure portability.  The operators < <= > >= are only
168      //  defined for pointers to objects that are 1) in the same array, or
169      //  2) subobjects of the same object [5.9/2].
170      // The functor objects guarantee a total order for any pointer [20.3.3/8]
171//WAS:
172//      return (std::less_equal<void *>()(static_cast<void *>(i), chunk)
173//          && std::less<void *>()(chunk,
174//              static_cast<void *>(i + sizeof_i)));
175      std::less_equal<void *> lt_eq;
176      std::less<void *> lt;
177      return (lt_eq(i, chunk) && lt(chunk, i + sizeof_i));
178    }
179
180    size_type alloc_size() const
181    {
182      const unsigned min_size = min_alloc_size;
183      return details::pool::lcm<size_type>(requested_size, min_size);
184    }
185
186    // for the sake of code readability :)
187    static void * & nextof(void * const ptr)
188    { return *(static_cast<void **>(ptr)); }
189
190  public:
191    // The second parameter here is an extension!
192    // pre: npartition_size != 0 && nnext_size != 0
193    explicit pool(const size_type nrequested_size,
194        const size_type nnext_size = 32)
195    :list(0, 0), requested_size(nrequested_size), next_size(nnext_size)
196    { }
197
198    ~pool() { purge_memory(); }
199
200    // Releases memory blocks that don't have chunks allocated
201    // pre: lists are ordered
202    //  Returns true if memory was actually deallocated
203    bool release_memory();
204
205    // Releases *all* memory blocks, even if chunks are still allocated
206    //  Returns true if memory was actually deallocated
207    bool purge_memory();
208
209    // These functions are extensions!
210    size_type get_next_size() const { return next_size; }
211    void set_next_size(const size_type nnext_size) { next_size = nnext_size; }
212
213    // Both malloc and ordered_malloc do a quick inlined check first for any
214    //  free chunks.  Only if we need to get another memory block do we call
215    //  the non-inlined *_need_resize() functions.
216    // Returns 0 if out-of-memory
217    void * malloc()
218    {
219      // Look for a non-empty storage
220      if (!store().empty())
221        return store().malloc();
222      return malloc_need_resize();
223    }
224
225    void * ordered_malloc()
226    {
227      // Look for a non-empty storage
228      if (!store().empty())
229        return store().malloc();
230      return ordered_malloc_need_resize();
231    }
232
233    // Returns 0 if out-of-memory
234    // Allocate a contiguous section of n chunks
235    void * ordered_malloc(size_type n);
236
237    // pre: 'chunk' must have been previously
238    //        returned by *this.malloc().
239    void free(void * const chunk)
240    { store().free(chunk); }
241
242    // pre: 'chunk' must have been previously
243    //        returned by *this.malloc().
244    void ordered_free(void * const chunk)
245    { store().ordered_free(chunk); }
246
247    // pre: 'chunk' must have been previously
248    //        returned by *this.malloc(n).
249    void free(void * const chunks, const size_type n)
250    {
251      const size_type partition_size = alloc_size();
252      const size_type total_req_size = n * requested_size;
253      const size_type num_chunks = total_req_size / partition_size +
254          ((total_req_size % partition_size) ? true : false);
255
256      store().free_n(chunks, num_chunks, partition_size);
257    }
258
259    // pre: 'chunk' must have been previously
260    //        returned by *this.malloc(n).
261    void ordered_free(void * const chunks, const size_type n)
262    {
263      const size_type partition_size = alloc_size();
264      const size_type total_req_size = n * requested_size;
265      const size_type num_chunks = total_req_size / partition_size +
266          ((total_req_size % partition_size) ? true : false);
267
268      store().ordered_free_n(chunks, num_chunks, partition_size);
269    }
270
271    // is_from() tests a chunk to determine if it was allocated from *this
272    bool is_from(void * const chunk) const
273    {
274      return (find_POD(chunk).valid());
275    }
276};
277
278template <typename UserAllocator>
279bool pool<UserAllocator>::release_memory()
280{
281  // This is the return value: it will be set to true when we actually call
282  //  UserAllocator::free(..)
283  bool ret = false;
284
285  // This is a current & previous iterator pair over the memory block list
286  details::PODptr<size_type> ptr = list;
287  details::PODptr<size_type> prev;
288
289  // This is a current & previous iterator pair over the free memory chunk list
290  //  Note that "prev_free" in this case does NOT point to the previous memory
291  //  chunk in the free list, but rather the last free memory chunk before the
292  //  current block.
293  void * free = this->first;
294  void * prev_free = 0;
295
296  const size_type partition_size = alloc_size();
297
298  // Search through all the all the allocated memory blocks
299  while (ptr.valid())
300  {
301    // At this point:
302    //  ptr points to a valid memory block
303    //  free points to either:
304    //    0 if there are no more free chunks
305    //    the first free chunk in this or some next memory block
306    //  prev_free points to either:
307    //    the last free chunk in some previous memory block
308    //    0 if there is no such free chunk
309    //  prev is either:
310    //    the PODptr whose next() is ptr
311    //    !valid() if there is no such PODptr
312
313    // If there are no more free memory chunks, then every remaining
314    //  block is allocated out to its fullest capacity, and we can't
315    //  release any more memory
316    if (free == 0)
317      return ret;
318
319    // We have to check all the chunks.  If they are *all* free (i.e., present
320    //  in the free list), then we can free the block.
321    bool all_chunks_free = true;
322
323    // Iterate 'i' through all chunks in the memory block
324    // if free starts in the memory block, be careful to keep it there
325    void * saved_free = free;
326    for (char * i = ptr.begin(); i != ptr.end(); i += partition_size)
327    {
328      // If this chunk is not free
329      if (i != free)
330      {
331        // We won't be able to free this block
332        all_chunks_free = false;
333
334        // free might have travelled outside ptr
335        free = saved_free;
336        // Abort searching the chunks; we won't be able to free this
337        //  block because a chunk is not free.
338        break;
339      }
340
341      // We do not increment prev_free because we are in the same block
342      free = nextof(free);
343    }
344
345    // post: if the memory block has any chunks, free points to one of them
346    // otherwise, our assertions above are still valid
347
348    const details::PODptr<size_type> next = ptr.next();
349
350    if (!all_chunks_free)
351    {
352      if (is_from(free, ptr.begin(), ptr.element_size()))
353      {
354        std::less<void *> lt;
355        void * const end = ptr.end();
356        do
357        {
358          prev_free = free;
359          free = nextof(free);
360        } while (free && lt(free, end));
361      }
362      // This invariant is now restored:
363      //     free points to the first free chunk in some next memory block, or
364      //       0 if there is no such chunk.
365      //     prev_free points to the last free chunk in this memory block.
366     
367      // We are just about to advance ptr.  Maintain the invariant:
368      // prev is the PODptr whose next() is ptr, or !valid()
369      // if there is no such PODptr
370      prev = ptr;
371    }
372    else
373    {
374      // All chunks from this block are free
375
376      // Remove block from list
377      if (prev.valid())
378        prev.next(next);
379      else
380        list = next;
381
382      // Remove all entries in the free list from this block
383      if (prev_free != 0)
384        nextof(prev_free) = free;
385      else
386        this->first = free;
387
388      // And release memory
389      UserAllocator::free(ptr.begin());
390      ret = true;
391    }
392
393    // Increment ptr
394    ptr = next;
395  }
396
397  return ret;
398}
399
400template <typename UserAllocator>
401bool pool<UserAllocator>::purge_memory()
402{
403  details::PODptr<size_type> iter = list;
404
405  if (!iter.valid())
406    return false;
407
408  do
409  {
410    // hold "next" pointer
411    const details::PODptr<size_type> next = iter.next();
412
413    // delete the storage
414    UserAllocator::free(iter.begin());
415
416    // increment iter
417    iter = next;
418  } while (iter.valid());
419
420  list.invalidate();
421  this->first = 0;
422
423  return true;
424}
425
426template <typename UserAllocator>
427void * pool<UserAllocator>::malloc_need_resize()
428{
429  // No memory in any of our storages; make a new storage,
430  const size_type partition_size = alloc_size();
431  const size_type POD_size = next_size * partition_size +
432      details::pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type);
433  char * const ptr = UserAllocator::malloc(POD_size);
434  if (ptr == 0)
435    return 0;
436  const details::PODptr<size_type> node(ptr, POD_size);
437  next_size <<= 1;
438
439  //  initialize it,
440  store().add_block(node.begin(), node.element_size(), partition_size);
441
442  //  insert it into the list,
443  node.next(list);
444  list = node;
445
446  //  and return a chunk from it.
447  return store().malloc();
448}
449
450template <typename UserAllocator>
451void * pool<UserAllocator>::ordered_malloc_need_resize()
452{
453  // No memory in any of our storages; make a new storage,
454  const size_type partition_size = alloc_size();
455  const size_type POD_size = next_size * partition_size +
456      details::pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type);
457  char * const ptr = UserAllocator::malloc(POD_size);
458  if (ptr == 0)
459    return 0;
460  const details::PODptr<size_type> node(ptr, POD_size);
461  next_size <<= 1;
462
463  //  initialize it,
464  //  (we can use "add_block" here because we know that
465  //  the free list is empty, so we don't have to use
466  //  the slower ordered version)
467  store().add_block(node.begin(), node.element_size(), partition_size);
468
469  //  insert it into the list,
470  //   handle border case
471  if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
472  {
473    node.next(list);
474    list = node;
475  }
476  else
477  {
478    details::PODptr<size_type> prev = list;
479
480    while (true)
481    {
482      // if we're about to hit the end or
483      //  if we've found where "node" goes
484      if (prev.next_ptr() == 0
485          || std::greater<void *>()(prev.next_ptr(), node.begin()))
486        break;
487
488      prev = prev.next();
489    }
490
491    node.next(prev.next());
492    prev.next(node);
493  }
494
495  //  and return a chunk from it.
496  return store().malloc();
497}
498
499template <typename UserAllocator>
500void * pool<UserAllocator>::ordered_malloc(const size_type n)
501{
502  const size_type partition_size = alloc_size();
503  const size_type total_req_size = n * requested_size;
504  const size_type num_chunks = total_req_size / partition_size +
505      ((total_req_size % partition_size) ? true : false);
506
507  void * ret = store().malloc_n(num_chunks, partition_size);
508
509  if (ret != 0)
510    return ret;
511
512  // Not enougn memory in our storages; make a new storage,
513  BOOST_USING_STD_MAX();
514  next_size = max BOOST_PREVENT_MACRO_SUBSTITUTION(next_size, num_chunks);
515  const size_type POD_size = next_size * partition_size +
516      details::pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type);
517  char * const ptr = UserAllocator::malloc(POD_size);
518  if (ptr == 0)
519    return 0;
520  const details::PODptr<size_type> node(ptr, POD_size);
521
522  // Split up block so we can use what wasn't requested
523  //  (we can use "add_block" here because we know that
524  //  the free list is empty, so we don't have to use
525  //  the slower ordered version)
526  if (next_size > num_chunks)
527    store().add_block(node.begin() + num_chunks * partition_size,
528        node.element_size() - num_chunks * partition_size, partition_size);
529
530  next_size <<= 1;
531
532  //  insert it into the list,
533  //   handle border case
534  if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
535  {
536    node.next(list);
537    list = node;
538  }
539  else
540  {
541    details::PODptr<size_type> prev = list;
542
543    while (true)
544    {
545      // if we're about to hit the end or
546      //  if we've found where "node" goes
547      if (prev.next_ptr() == 0
548          || std::greater<void *>()(prev.next_ptr(), node.begin()))
549        break;
550
551      prev = prev.next();
552    }
553
554    node.next(prev.next());
555    prev.next(node);
556  }
557
558  //  and return it.
559  return node.begin();
560}
561
562template <typename UserAllocator>
563details::PODptr<typename pool<UserAllocator>::size_type>
564pool<UserAllocator>::find_POD(void * const chunk) const
565{
566  // We have to find which storage this chunk is from.
567  details::PODptr<size_type> iter = list;
568  while (iter.valid())
569  {
570    if (is_from(chunk, iter.begin(), iter.element_size()))
571      return iter;
572    iter = iter.next();
573  }
574
575  return iter;
576}
577
578} // namespace boost
579
580#endif
Note: See TracBrowser for help on using the repository browser.