annotate DEPENDENCIES/generic/include/boost/pool/pool.hpp @ 133:4acb5d8d80b6 tip

Don't fail environmental check if README.md exists (but .txt and no-suffix don't)
author Chris Cannam
date Tue, 30 Jul 2019 12:25:44 +0100
parents c530137014c0
children
rev   line source
Chris@16 1 // Copyright (C) 2000, 2001 Stephen Cleary
Chris@16 2 //
Chris@16 3 // Distributed under the Boost Software License, Version 1.0. (See
Chris@16 4 // accompanying file LICENSE_1_0.txt or copy at
Chris@16 5 // http://www.boost.org/LICENSE_1_0.txt)
Chris@16 6 //
Chris@16 7 // See http://www.boost.org for updates, documentation, and revision history.
Chris@16 8
Chris@16 9 #ifndef BOOST_POOL_HPP
Chris@16 10 #define BOOST_POOL_HPP
Chris@16 11
Chris@16 12 #include <boost/config.hpp> // for workarounds
Chris@16 13
Chris@16 14 // std::less, std::less_equal, std::greater
Chris@16 15 #include <functional>
Chris@16 16 // new[], delete[], std::nothrow
Chris@16 17 #include <new>
Chris@16 18 // std::size_t, std::ptrdiff_t
Chris@16 19 #include <cstddef>
Chris@16 20 // std::malloc, std::free
Chris@16 21 #include <cstdlib>
Chris@16 22 // std::invalid_argument
Chris@16 23 #include <exception>
Chris@16 24 // std::max
Chris@16 25 #include <algorithm>
Chris@16 26
Chris@16 27 #include <boost/pool/poolfwd.hpp>
Chris@16 28
Chris@101 29 // boost::integer::static_lcm
Chris@101 30 #include <boost/integer/common_factor_ct.hpp>
Chris@16 31 // boost::simple_segregated_storage
Chris@16 32 #include <boost/pool/simple_segregated_storage.hpp>
Chris@16 33 // boost::alignment_of
Chris@16 34 #include <boost/type_traits/alignment_of.hpp>
Chris@16 35 // BOOST_ASSERT
Chris@16 36 #include <boost/assert.hpp>
Chris@16 37
Chris@16 38 #ifdef BOOST_POOL_INSTRUMENT
Chris@16 39 #include <iostream>
Chris@16 40 #include<iomanip>
Chris@16 41 #endif
Chris@16 42 #ifdef BOOST_POOL_VALGRIND
Chris@16 43 #include <set>
Chris@16 44 #include <valgrind/memcheck.h>
Chris@16 45 #endif
Chris@16 46
Chris@16 47 #ifdef BOOST_NO_STDC_NAMESPACE
Chris@16 48 namespace std { using ::malloc; using ::free; }
Chris@16 49 #endif
Chris@16 50
Chris@16 51 // There are a few places in this file where the expression "this->m" is used.
Chris@16 52 // This expression is used to force instantiation-time name lookup, which I am
Chris@16 53 // informed is required for strict Standard compliance. It's only necessary
Chris@16 54 // if "m" is a member of a base class that is dependent on a template
Chris@16 55 // parameter.
Chris@16 56 // Thanks to Jens Maurer for pointing this out!
Chris@16 57
Chris@16 58 /*!
Chris@16 59 \file
Chris@16 60 \brief Provides class \ref pool: a fast memory allocator that guarantees proper alignment of all allocated chunks,
Chris@16 61 and which extends and generalizes the framework provided by the simple segregated storage solution.
Chris@16 62 Also provides two UserAllocator classes which can be used in conjuction with \ref pool.
Chris@16 63 */
Chris@16 64
Chris@16 65 /*!
Chris@16 66 \mainpage Boost.Pool Memory Allocation Scheme
Chris@16 67
Chris@16 68 \section intro_sec Introduction
Chris@16 69
Chris@16 70 Pool allocation is a memory allocation scheme that is very fast, but limited in its usage.
Chris@16 71
Chris@16 72 This Doxygen-style documentation is complementary to the
Chris@16 73 full Quickbook-generated html and pdf documentation at www.boost.org.
Chris@16 74
Chris@16 75 This page generated from file pool.hpp.
Chris@16 76
Chris@16 77 */
Chris@16 78
Chris@16 79 #ifdef BOOST_MSVC
Chris@16 80 #pragma warning(push)
Chris@16 81 #pragma warning(disable:4127) // Conditional expression is constant
Chris@16 82 #endif
Chris@16 83
Chris@16 84 namespace boost
Chris@16 85 {
Chris@16 86
Chris@16 87 //! \brief Allocator used as the default template parameter for
Chris@16 88 //! a <a href="boost_pool/pool/pooling.html#boost_pool.pool.pooling.user_allocator">UserAllocator</a>
Chris@16 89 //! template parameter. Uses new and delete.
Chris@16 90 struct default_user_allocator_new_delete
Chris@16 91 {
Chris@16 92 typedef std::size_t size_type; //!< An unsigned integral type that can represent the size of the largest object to be allocated.
Chris@16 93 typedef std::ptrdiff_t difference_type; //!< A signed integral type that can represent the difference of any two pointers.
Chris@16 94
Chris@16 95 static char * malloc BOOST_PREVENT_MACRO_SUBSTITUTION(const size_type bytes)
Chris@16 96 { //! Attempts to allocate n bytes from the system. Returns 0 if out-of-memory
Chris@16 97 return new (std::nothrow) char[bytes];
Chris@16 98 }
Chris@16 99 static void free BOOST_PREVENT_MACRO_SUBSTITUTION(char * const block)
Chris@16 100 { //! Attempts to de-allocate block.
Chris@16 101 //! \pre Block must have been previously returned from a call to UserAllocator::malloc.
Chris@16 102 delete [] block;
Chris@16 103 }
Chris@16 104 };
Chris@16 105
Chris@16 106 //! \brief <a href="boost_pool/pool/pooling.html#boost_pool.pool.pooling.user_allocator">UserAllocator</a>
Chris@16 107 //! used as template parameter for \ref pool and \ref object_pool.
Chris@16 108 //! Uses malloc and free internally.
Chris@16 109 struct default_user_allocator_malloc_free
Chris@16 110 {
Chris@16 111 typedef std::size_t size_type; //!< An unsigned integral type that can represent the size of the largest object to be allocated.
Chris@16 112 typedef std::ptrdiff_t difference_type; //!< A signed integral type that can represent the difference of any two pointers.
Chris@16 113
Chris@16 114 static char * malloc BOOST_PREVENT_MACRO_SUBSTITUTION(const size_type bytes)
Chris@16 115 { return static_cast<char *>((std::malloc)(bytes)); }
Chris@16 116 static void free BOOST_PREVENT_MACRO_SUBSTITUTION(char * const block)
Chris@16 117 { (std::free)(block); }
Chris@16 118 };
Chris@16 119
Chris@16 120 namespace details
Chris@16 121 { //! Implemention only.
Chris@16 122
Chris@16 123 template <typename SizeType>
Chris@16 124 class PODptr
Chris@16 125 { //! PODptr is a class that pretends to be a "pointer" to different class types
Chris@16 126 //! that don't really exist. It provides member functions to access the "data"
Chris@16 127 //! of the "object" it points to. Since these "class" types are of variable
Chris@16 128 //! size, and contains some information at the *end* of its memory
Chris@16 129 //! (for alignment reasons),
Chris@16 130 //! PODptr must contain the size of this "class" as well as the pointer to this "object".
Chris@16 131
Chris@16 132 /*! \details A PODptr holds the location and size of a memory block allocated from the system.
Chris@16 133 Each memory block is split logically into three sections:
Chris@16 134
Chris@16 135 <b>Chunk area</b>. This section may be different sizes. PODptr does not care what the size of the chunks is,
Chris@16 136 but it does care (and keep track of) the total size of the chunk area.
Chris@16 137
Chris@16 138 <b>Next pointer</b>. This section is always the same size for a given SizeType. It holds a pointer
Chris@16 139 to the location of the next memory block in the memory block list, or 0 if there is no such block.
Chris@16 140
Chris@16 141 <b>Next size</b>. This section is always the same size for a given SizeType. It holds the size of the
Chris@16 142 next memory block in the memory block list.
Chris@16 143
Chris@16 144 The PODptr class just provides cleaner ways of dealing with raw memory blocks.
Chris@16 145
Chris@16 146 A PODptr object is either valid or invalid. An invalid PODptr is analogous to a null pointer.
Chris@16 147 The default constructor for PODptr will result in an invalid object.
Chris@16 148 Calling the member function invalidate will result in that object becoming invalid.
Chris@16 149 The member function valid can be used to test for validity.
Chris@16 150 */
Chris@16 151 public:
Chris@16 152 typedef SizeType size_type;
Chris@16 153
Chris@16 154 private:
Chris@16 155 char * ptr;
Chris@16 156 size_type sz;
Chris@16 157
Chris@16 158 char * ptr_next_size() const
Chris@16 159 {
Chris@16 160 return (ptr + sz - sizeof(size_type));
Chris@16 161 }
Chris@16 162 char * ptr_next_ptr() const
Chris@16 163 {
Chris@16 164 return (ptr_next_size() -
Chris@101 165 integer::static_lcm<sizeof(size_type), sizeof(void *)>::value);
Chris@16 166 }
Chris@16 167
Chris@16 168 public:
Chris@16 169 PODptr(char * const nptr, const size_type nsize)
Chris@16 170 :ptr(nptr), sz(nsize)
Chris@16 171 {
Chris@16 172 //! A PODptr may be created to point to a memory block by passing
Chris@16 173 //! the address and size of that memory block into the constructor.
Chris@16 174 //! A PODptr constructed in this way is valid.
Chris@16 175 }
Chris@16 176 PODptr()
Chris@16 177 : ptr(0), sz(0)
Chris@16 178 { //! default constructor for PODptr will result in an invalid object.
Chris@16 179 }
Chris@16 180
Chris@16 181 bool valid() const
Chris@16 182 { //! A PODptr object is either valid or invalid.
Chris@16 183 //! An invalid PODptr is analogous to a null pointer.
Chris@16 184 //! \returns true if PODptr is valid, false if invalid.
Chris@16 185 return (begin() != 0);
Chris@16 186 }
Chris@16 187 void invalidate()
Chris@16 188 { //! Make object invalid.
Chris@16 189 begin() = 0;
Chris@16 190 }
Chris@16 191 char * & begin()
Chris@16 192 { //! Each PODptr keeps the address and size of its memory block.
Chris@16 193 //! \returns The address of its memory block.
Chris@16 194 return ptr;
Chris@16 195 }
Chris@16 196 char * begin() const
Chris@16 197 { //! Each PODptr keeps the address and size of its memory block.
Chris@16 198 //! \return The address of its memory block.
Chris@16 199 return ptr;
Chris@16 200 }
Chris@16 201 char * end() const
Chris@16 202 { //! \returns begin() plus element_size (a 'past the end' value).
Chris@16 203 return ptr_next_ptr();
Chris@16 204 }
Chris@16 205 size_type total_size() const
Chris@16 206 { //! Each PODptr keeps the address and size of its memory block.
Chris@16 207 //! The address may be read or written by the member functions begin.
Chris@16 208 //! The size of the memory block may only be read,
Chris@16 209 //! \returns size of the memory block.
Chris@16 210 return sz;
Chris@16 211 }
Chris@16 212 size_type element_size() const
Chris@16 213 { //! \returns size of element pointer area.
Chris@16 214 return static_cast<size_type>(sz - sizeof(size_type) -
Chris@101 215 integer::static_lcm<sizeof(size_type), sizeof(void *)>::value);
Chris@16 216 }
Chris@16 217
Chris@16 218 size_type & next_size() const
Chris@16 219 { //!
Chris@16 220 //! \returns next_size.
Chris@16 221 return *(static_cast<size_type *>(static_cast<void*>((ptr_next_size()))));
Chris@16 222 }
Chris@16 223 char * & next_ptr() const
Chris@16 224 { //! \returns pointer to next pointer area.
Chris@16 225 return *(static_cast<char **>(static_cast<void*>(ptr_next_ptr())));
Chris@16 226 }
Chris@16 227
Chris@16 228 PODptr next() const
Chris@16 229 { //! \returns next PODptr.
Chris@16 230 return PODptr<size_type>(next_ptr(), next_size());
Chris@16 231 }
Chris@16 232 void next(const PODptr & arg) const
Chris@16 233 { //! Sets next PODptr.
Chris@16 234 next_ptr() = arg.begin();
Chris@16 235 next_size() = arg.total_size();
Chris@16 236 }
Chris@16 237 }; // class PODptr
Chris@16 238 } // namespace details
Chris@16 239
Chris@16 240 #ifndef BOOST_POOL_VALGRIND
Chris@16 241 /*!
Chris@16 242 \brief A fast memory allocator that guarantees proper alignment of all allocated chunks.
Chris@16 243
Chris@16 244 \details Whenever an object of type pool needs memory from the system,
Chris@16 245 it will request it from its UserAllocator template parameter.
Chris@16 246 The amount requested is determined using a doubling algorithm;
Chris@16 247 that is, each time more system memory is allocated,
Chris@16 248 the amount of system memory requested is doubled.
Chris@16 249
Chris@16 250 Users may control the doubling algorithm by using the following extensions:
Chris@16 251
Chris@16 252 Users may pass an additional constructor parameter to pool.
Chris@16 253 This parameter is of type size_type,
Chris@16 254 and is the number of chunks to request from the system
Chris@16 255 the first time that object needs to allocate system memory.
Chris@16 256 The default is 32. This parameter may not be 0.
Chris@16 257
Chris@16 258 Users may also pass an optional third parameter to pool's
Chris@16 259 constructor. This parameter is of type size_type,
Chris@16 260 and sets a maximum size for allocated chunks. When this
Chris@16 261 parameter takes the default value of 0, then there is no upper
Chris@16 262 limit on chunk size.
Chris@16 263
Chris@16 264 Finally, if the doubling algorithm results in no memory
Chris@16 265 being allocated, the pool will backtrack just once, halving
Chris@16 266 the chunk size and trying again.
Chris@16 267
Chris@16 268 <b>UserAllocator type</b> - the method that the Pool will use to allocate memory from the system.
Chris@16 269
Chris@16 270 There are essentially two ways to use class pool: the client can call \ref malloc() and \ref free() to allocate
Chris@16 271 and free single chunks of memory, this is the most efficient way to use a pool, but does not allow for
Chris@16 272 the efficient allocation of arrays of chunks. Alternatively, the client may call \ref ordered_malloc() and \ref
Chris@16 273 ordered_free(), in which case the free list is maintained in an ordered state, and efficient allocation of arrays
Chris@16 274 of chunks are possible. However, this latter option can suffer from poor performance when large numbers of
Chris@16 275 allocations are performed.
Chris@16 276
Chris@16 277 */
Chris@16 278 template <typename UserAllocator>
Chris@16 279 class pool: protected simple_segregated_storage < typename UserAllocator::size_type >
Chris@16 280 {
Chris@16 281 public:
Chris@16 282 typedef UserAllocator user_allocator; //!< User allocator.
Chris@16 283 typedef typename UserAllocator::size_type size_type; //!< An unsigned integral type that can represent the size of the largest object to be allocated.
Chris@16 284 typedef typename UserAllocator::difference_type difference_type; //!< A signed integral type that can represent the difference of any two pointers.
Chris@16 285
Chris@16 286 private:
Chris@16 287 BOOST_STATIC_CONSTANT(size_type, min_alloc_size =
Chris@101 288 (::boost::integer::static_lcm<sizeof(void *), sizeof(size_type)>::value) );
Chris@16 289 BOOST_STATIC_CONSTANT(size_type, min_align =
Chris@101 290 (::boost::integer::static_lcm< ::boost::alignment_of<void *>::value, ::boost::alignment_of<size_type>::value>::value) );
Chris@16 291
Chris@16 292 //! \returns 0 if out-of-memory.
Chris@16 293 //! Called if malloc/ordered_malloc needs to resize the free list.
Chris@16 294 void * malloc_need_resize(); //! Called if malloc needs to resize the free list.
Chris@16 295 void * ordered_malloc_need_resize(); //! Called if ordered_malloc needs to resize the free list.
Chris@16 296
Chris@16 297 protected:
Chris@16 298 details::PODptr<size_type> list; //!< List structure holding ordered blocks.
Chris@16 299
Chris@16 300 simple_segregated_storage<size_type> & store()
Chris@16 301 { //! \returns pointer to store.
Chris@16 302 return *this;
Chris@16 303 }
Chris@16 304 const simple_segregated_storage<size_type> & store() const
Chris@16 305 { //! \returns pointer to store.
Chris@16 306 return *this;
Chris@16 307 }
Chris@16 308 const size_type requested_size;
Chris@16 309 size_type next_size;
Chris@16 310 size_type start_size;
Chris@16 311 size_type max_size;
Chris@16 312
Chris@16 313 //! finds which POD in the list 'chunk' was allocated from.
Chris@16 314 details::PODptr<size_type> find_POD(void * const chunk) const;
Chris@16 315
Chris@16 316 // is_from() tests a chunk to determine if it belongs in a block.
Chris@16 317 static bool is_from(void * const chunk, char * const i,
Chris@16 318 const size_type sizeof_i)
Chris@16 319 { //! \param chunk chunk to check if is from this pool.
Chris@16 320 //! \param i memory chunk at i with element sizeof_i.
Chris@16 321 //! \param sizeof_i element size (size of the chunk area of that block, not the total size of that block).
Chris@16 322 //! \returns true if chunk was allocated or may be returned.
Chris@16 323 //! as the result of a future allocation.
Chris@16 324 //!
Chris@16 325 //! Returns false if chunk was allocated from some other pool,
Chris@16 326 //! or may be returned as the result of a future allocation from some other pool.
Chris@16 327 //! Otherwise, the return value is meaningless.
Chris@16 328 //!
Chris@16 329 //! Note that this function may not be used to reliably test random pointer values.
Chris@16 330
Chris@16 331 // We use std::less_equal and std::less to test 'chunk'
Chris@16 332 // against the array bounds because standard operators
Chris@16 333 // may return unspecified results.
Chris@16 334 // This is to ensure portability. The operators < <= > >= are only
Chris@16 335 // defined for pointers to objects that are 1) in the same array, or
Chris@16 336 // 2) subobjects of the same object [5.9/2].
Chris@16 337 // The functor objects guarantee a total order for any pointer [20.3.3/8]
Chris@16 338 std::less_equal<void *> lt_eq;
Chris@16 339 std::less<void *> lt;
Chris@16 340 return (lt_eq(i, chunk) && lt(chunk, i + sizeof_i));
Chris@16 341 }
Chris@16 342
Chris@16 343 size_type alloc_size() const
Chris@16 344 { //! Calculated size of the memory chunks that will be allocated by this Pool.
Chris@16 345 //! \returns allocated size.
Chris@16 346 // For alignment reasons, this used to be defined to be lcm(requested_size, sizeof(void *), sizeof(size_type)),
Chris@16 347 // but is now more parsimonious: just rounding up to the minimum required alignment of our housekeeping data
Chris@16 348 // when required. This works provided all alignments are powers of two.
Chris@16 349 size_type s = (std::max)(requested_size, min_alloc_size);
Chris@16 350 size_type rem = s % min_align;
Chris@16 351 if(rem)
Chris@16 352 s += min_align - rem;
Chris@16 353 BOOST_ASSERT(s >= min_alloc_size);
Chris@16 354 BOOST_ASSERT(s % min_align == 0);
Chris@16 355 return s;
Chris@16 356 }
Chris@16 357
Chris@16 358 static void * & nextof(void * const ptr)
Chris@16 359 { //! \returns Pointer dereferenced.
Chris@16 360 //! (Provided and used for the sake of code readability :)
Chris@16 361 return *(static_cast<void **>(ptr));
Chris@16 362 }
Chris@16 363
Chris@16 364 public:
Chris@16 365 // pre: npartition_size != 0 && nnext_size != 0
Chris@16 366 explicit pool(const size_type nrequested_size,
Chris@16 367 const size_type nnext_size = 32,
Chris@16 368 const size_type nmax_size = 0)
Chris@16 369 :
Chris@16 370 list(0, 0), requested_size(nrequested_size), next_size(nnext_size), start_size(nnext_size),max_size(nmax_size)
Chris@16 371 { //! Constructs a new empty Pool that can be used to allocate chunks of size RequestedSize.
Chris@16 372 //! \param nrequested_size Requested chunk size
Chris@16 373 //! \param nnext_size parameter is of type size_type,
Chris@16 374 //! is the number of chunks to request from the system
Chris@16 375 //! the first time that object needs to allocate system memory.
Chris@16 376 //! The default is 32. This parameter may not be 0.
Chris@16 377 //! \param nmax_size is the maximum number of chunks to allocate in one block.
Chris@16 378 }
Chris@16 379
Chris@16 380 ~pool()
Chris@16 381 { //! Destructs the Pool, freeing its list of memory blocks.
Chris@16 382 purge_memory();
Chris@16 383 }
Chris@16 384
Chris@16 385 // Releases memory blocks that don't have chunks allocated
Chris@16 386 // pre: lists are ordered
Chris@16 387 // Returns true if memory was actually deallocated
Chris@16 388 bool release_memory();
Chris@16 389
Chris@16 390 // Releases *all* memory blocks, even if chunks are still allocated
Chris@16 391 // Returns true if memory was actually deallocated
Chris@16 392 bool purge_memory();
Chris@16 393
Chris@16 394 size_type get_next_size() const
Chris@16 395 { //! Number of chunks to request from the system the next time that object needs to allocate system memory. This value should never be 0.
Chris@16 396 //! \returns next_size;
Chris@16 397 return next_size;
Chris@16 398 }
Chris@16 399 void set_next_size(const size_type nnext_size)
Chris@16 400 { //! Set number of chunks to request from the system the next time that object needs to allocate system memory. This value should never be set to 0.
Chris@16 401 //! \returns nnext_size.
Chris@16 402 next_size = start_size = nnext_size;
Chris@16 403 }
Chris@16 404 size_type get_max_size() const
Chris@16 405 { //! \returns max_size.
Chris@16 406 return max_size;
Chris@16 407 }
Chris@16 408 void set_max_size(const size_type nmax_size)
Chris@16 409 { //! Set max_size.
Chris@16 410 max_size = nmax_size;
Chris@16 411 }
Chris@16 412 size_type get_requested_size() const
Chris@16 413 { //! \returns the requested size passed into the constructor.
Chris@16 414 //! (This value will not change during the lifetime of a Pool object).
Chris@16 415 return requested_size;
Chris@16 416 }
Chris@16 417
Chris@16 418 // Both malloc and ordered_malloc do a quick inlined check first for any
Chris@16 419 // free chunks. Only if we need to get another memory block do we call
Chris@16 420 // the non-inlined *_need_resize() functions.
Chris@16 421 // Returns 0 if out-of-memory
Chris@16 422 void * malloc BOOST_PREVENT_MACRO_SUBSTITUTION()
Chris@16 423 { //! Allocates a chunk of memory. Searches in the list of memory blocks
Chris@16 424 //! for a block that has a free chunk, and returns that free chunk if found.
Chris@16 425 //! Otherwise, creates a new memory block, adds its free list to pool's free list,
Chris@16 426 //! \returns a free chunk from that block.
Chris@16 427 //! If a new memory block cannot be allocated, returns 0. Amortized O(1).
Chris@16 428 // Look for a non-empty storage
Chris@16 429 if (!store().empty())
Chris@16 430 return (store().malloc)();
Chris@16 431 return malloc_need_resize();
Chris@16 432 }
Chris@16 433
Chris@16 434 void * ordered_malloc()
Chris@16 435 { //! Same as malloc, only merges the free lists, to preserve order. Amortized O(1).
Chris@16 436 //! \returns a free chunk from that block.
Chris@16 437 //! If a new memory block cannot be allocated, returns 0. Amortized O(1).
Chris@16 438 // Look for a non-empty storage
Chris@16 439 if (!store().empty())
Chris@16 440 return (store().malloc)();
Chris@16 441 return ordered_malloc_need_resize();
Chris@16 442 }
Chris@16 443
Chris@16 444 // Returns 0 if out-of-memory
Chris@16 445 // Allocate a contiguous section of n chunks
Chris@16 446 void * ordered_malloc(size_type n);
Chris@16 447 //! Same as malloc, only allocates enough contiguous chunks to cover n * requested_size bytes. Amortized O(n).
Chris@16 448 //! \returns a free chunk from that block.
Chris@16 449 //! If a new memory block cannot be allocated, returns 0. Amortized O(1).
Chris@16 450
Chris@16 451 // pre: 'chunk' must have been previously
Chris@16 452 // returned by *this.malloc().
Chris@16 453 void free BOOST_PREVENT_MACRO_SUBSTITUTION(void * const chunk)
Chris@16 454 { //! Deallocates a chunk of memory. Note that chunk may not be 0. O(1).
Chris@16 455 //!
Chris@16 456 //! Chunk must have been previously returned by t.malloc() or t.ordered_malloc().
Chris@16 457 //! Assumes that chunk actually refers to a block of chunks
Chris@16 458 //! spanning n * partition_sz bytes.
Chris@16 459 //! deallocates each chunk in that block.
Chris@16 460 //! Note that chunk may not be 0. O(n).
Chris@16 461 (store().free)(chunk);
Chris@16 462 }
Chris@16 463
Chris@16 464 // pre: 'chunk' must have been previously
Chris@16 465 // returned by *this.malloc().
Chris@16 466 void ordered_free(void * const chunk)
Chris@16 467 { //! Same as above, but is order-preserving.
Chris@16 468 //!
Chris@16 469 //! Note that chunk may not be 0. O(N) with respect to the size of the free list.
Chris@16 470 //! chunk must have been previously returned by t.malloc() or t.ordered_malloc().
Chris@16 471 store().ordered_free(chunk);
Chris@16 472 }
Chris@16 473
Chris@16 474 // pre: 'chunk' must have been previously
Chris@16 475 // returned by *this.malloc(n).
Chris@16 476 void free BOOST_PREVENT_MACRO_SUBSTITUTION(void * const chunks, const size_type n)
Chris@16 477 { //! Assumes that chunk actually refers to a block of chunks.
Chris@16 478 //!
Chris@16 479 //! chunk must have been previously returned by t.ordered_malloc(n)
Chris@16 480 //! spanning n * partition_sz bytes.
Chris@16 481 //! Deallocates each chunk in that block.
Chris@16 482 //! Note that chunk may not be 0. O(n).
Chris@16 483 const size_type partition_size = alloc_size();
Chris@16 484 const size_type total_req_size = n * requested_size;
Chris@16 485 const size_type num_chunks = total_req_size / partition_size +
Chris@16 486 ((total_req_size % partition_size) ? true : false);
Chris@16 487
Chris@16 488 store().free_n(chunks, num_chunks, partition_size);
Chris@16 489 }
Chris@16 490
Chris@16 491 // pre: 'chunk' must have been previously
Chris@16 492 // returned by *this.malloc(n).
Chris@16 493 void ordered_free(void * const chunks, const size_type n)
Chris@16 494 { //! Assumes that chunk actually refers to a block of chunks spanning n * partition_sz bytes;
Chris@16 495 //! deallocates each chunk in that block.
Chris@16 496 //!
Chris@16 497 //! Note that chunk may not be 0. Order-preserving. O(N + n) where N is the size of the free list.
Chris@16 498 //! chunk must have been previously returned by t.malloc() or t.ordered_malloc().
Chris@16 499
Chris@16 500 const size_type partition_size = alloc_size();
Chris@16 501 const size_type total_req_size = n * requested_size;
Chris@16 502 const size_type num_chunks = total_req_size / partition_size +
Chris@16 503 ((total_req_size % partition_size) ? true : false);
Chris@16 504
Chris@16 505 store().ordered_free_n(chunks, num_chunks, partition_size);
Chris@16 506 }
Chris@16 507
Chris@16 508 // is_from() tests a chunk to determine if it was allocated from *this
Chris@16 509 bool is_from(void * const chunk) const
Chris@16 510 { //! \returns Returns true if chunk was allocated from u or
Chris@16 511 //! may be returned as the result of a future allocation from u.
Chris@16 512 //! Returns false if chunk was allocated from some other pool or
Chris@16 513 //! may be returned as the result of a future allocation from some other pool.
Chris@16 514 //! Otherwise, the return value is meaningless.
Chris@16 515 //! Note that this function may not be used to reliably test random pointer values.
Chris@16 516 return (find_POD(chunk).valid());
Chris@16 517 }
Chris@16 518 };
Chris@16 519
Chris@16 520 #ifndef BOOST_NO_INCLASS_MEMBER_INITIALIZATION
Chris@16 521 template <typename UserAllocator>
Chris@16 522 typename pool<UserAllocator>::size_type const pool<UserAllocator>::min_alloc_size;
Chris@16 523 template <typename UserAllocator>
Chris@16 524 typename pool<UserAllocator>::size_type const pool<UserAllocator>::min_align;
Chris@16 525 #endif
Chris@16 526
Chris@16 527 template <typename UserAllocator>
Chris@16 528 bool pool<UserAllocator>::release_memory()
Chris@16 529 { //! pool must be ordered. Frees every memory block that doesn't have any allocated chunks.
Chris@16 530 //! \returns true if at least one memory block was freed.
Chris@16 531
Chris@16 532 // ret is the return value: it will be set to true when we actually call
Chris@16 533 // UserAllocator::free(..)
Chris@16 534 bool ret = false;
Chris@16 535
Chris@16 536 // This is a current & previous iterator pair over the memory block list
Chris@16 537 details::PODptr<size_type> ptr = list;
Chris@16 538 details::PODptr<size_type> prev;
Chris@16 539
Chris@16 540 // This is a current & previous iterator pair over the free memory chunk list
Chris@16 541 // Note that "prev_free" in this case does NOT point to the previous memory
Chris@16 542 // chunk in the free list, but rather the last free memory chunk before the
Chris@16 543 // current block.
Chris@16 544 void * free_p = this->first;
Chris@16 545 void * prev_free_p = 0;
Chris@16 546
Chris@16 547 const size_type partition_size = alloc_size();
Chris@16 548
Chris@16 549 // Search through all the all the allocated memory blocks
Chris@16 550 while (ptr.valid())
Chris@16 551 {
Chris@16 552 // At this point:
Chris@16 553 // ptr points to a valid memory block
Chris@16 554 // free_p points to either:
Chris@16 555 // 0 if there are no more free chunks
Chris@16 556 // the first free chunk in this or some next memory block
Chris@16 557 // prev_free_p points to either:
Chris@16 558 // the last free chunk in some previous memory block
Chris@16 559 // 0 if there is no such free chunk
Chris@16 560 // prev is either:
Chris@16 561 // the PODptr whose next() is ptr
Chris@16 562 // !valid() if there is no such PODptr
Chris@16 563
Chris@16 564 // If there are no more free memory chunks, then every remaining
Chris@16 565 // block is allocated out to its fullest capacity, and we can't
Chris@16 566 // release any more memory
Chris@16 567 if (free_p == 0)
Chris@16 568 break;
Chris@16 569
Chris@16 570 // We have to check all the chunks. If they are *all* free (i.e., present
Chris@16 571 // in the free list), then we can free the block.
Chris@16 572 bool all_chunks_free = true;
Chris@16 573
Chris@16 574 // Iterate 'i' through all chunks in the memory block
Chris@16 575 // if free starts in the memory block, be careful to keep it there
Chris@16 576 void * saved_free = free_p;
Chris@16 577 for (char * i = ptr.begin(); i != ptr.end(); i += partition_size)
Chris@16 578 {
Chris@16 579 // If this chunk is not free
Chris@16 580 if (i != free_p)
Chris@16 581 {
Chris@16 582 // We won't be able to free this block
Chris@16 583 all_chunks_free = false;
Chris@16 584
Chris@16 585 // free_p might have travelled outside ptr
Chris@16 586 free_p = saved_free;
Chris@16 587 // Abort searching the chunks; we won't be able to free this
Chris@16 588 // block because a chunk is not free.
Chris@16 589 break;
Chris@16 590 }
Chris@16 591
Chris@16 592 // We do not increment prev_free_p because we are in the same block
Chris@16 593 free_p = nextof(free_p);
Chris@16 594 }
Chris@16 595
Chris@16 596 // post: if the memory block has any chunks, free_p points to one of them
Chris@16 597 // otherwise, our assertions above are still valid
Chris@16 598
Chris@16 599 const details::PODptr<size_type> next = ptr.next();
Chris@16 600
Chris@16 601 if (!all_chunks_free)
Chris@16 602 {
Chris@16 603 if (is_from(free_p, ptr.begin(), ptr.element_size()))
Chris@16 604 {
Chris@16 605 std::less<void *> lt;
Chris@16 606 void * const end = ptr.end();
Chris@16 607 do
Chris@16 608 {
Chris@16 609 prev_free_p = free_p;
Chris@16 610 free_p = nextof(free_p);
Chris@16 611 } while (free_p && lt(free_p, end));
Chris@16 612 }
Chris@16 613 // This invariant is now restored:
Chris@16 614 // free_p points to the first free chunk in some next memory block, or
Chris@16 615 // 0 if there is no such chunk.
Chris@16 616 // prev_free_p points to the last free chunk in this memory block.
Chris@16 617
Chris@16 618 // We are just about to advance ptr. Maintain the invariant:
Chris@16 619 // prev is the PODptr whose next() is ptr, or !valid()
Chris@16 620 // if there is no such PODptr
Chris@16 621 prev = ptr;
Chris@16 622 }
Chris@16 623 else
Chris@16 624 {
Chris@16 625 // All chunks from this block are free
Chris@16 626
Chris@16 627 // Remove block from list
Chris@16 628 if (prev.valid())
Chris@16 629 prev.next(next);
Chris@16 630 else
Chris@16 631 list = next;
Chris@16 632
Chris@16 633 // Remove all entries in the free list from this block
Chris@16 634 if (prev_free_p != 0)
Chris@16 635 nextof(prev_free_p) = free_p;
Chris@16 636 else
Chris@16 637 this->first = free_p;
Chris@16 638
Chris@16 639 // And release memory
Chris@16 640 (UserAllocator::free)(ptr.begin());
Chris@16 641 ret = true;
Chris@16 642 }
Chris@16 643
Chris@16 644 // Increment ptr
Chris@16 645 ptr = next;
Chris@16 646 }
Chris@16 647
Chris@16 648 next_size = start_size;
Chris@16 649 return ret;
Chris@16 650 }
Chris@16 651
Chris@16 652 template <typename UserAllocator>
Chris@16 653 bool pool<UserAllocator>::purge_memory()
Chris@16 654 { //! pool must be ordered.
Chris@16 655 //! Frees every memory block.
Chris@16 656 //!
Chris@16 657 //! This function invalidates any pointers previously returned
Chris@16 658 //! by allocation functions of t.
Chris@16 659 //! \returns true if at least one memory block was freed.
Chris@16 660
Chris@16 661 details::PODptr<size_type> iter = list;
Chris@16 662
Chris@16 663 if (!iter.valid())
Chris@16 664 return false;
Chris@16 665
Chris@16 666 do
Chris@16 667 {
Chris@16 668 // hold "next" pointer
Chris@16 669 const details::PODptr<size_type> next = iter.next();
Chris@16 670
Chris@16 671 // delete the storage
Chris@16 672 (UserAllocator::free)(iter.begin());
Chris@16 673
Chris@16 674 // increment iter
Chris@16 675 iter = next;
Chris@16 676 } while (iter.valid());
Chris@16 677
Chris@16 678 list.invalidate();
Chris@16 679 this->first = 0;
Chris@16 680 next_size = start_size;
Chris@16 681
Chris@16 682 return true;
Chris@16 683 }
Chris@16 684
Chris@16 685 template <typename UserAllocator>
Chris@16 686 void * pool<UserAllocator>::malloc_need_resize()
Chris@16 687 { //! No memory in any of our storages; make a new storage,
Chris@16 688 //! Allocates chunk in newly malloc aftert resize.
Chris@16 689 //! \returns pointer to chunk.
Chris@16 690 size_type partition_size = alloc_size();
Chris@16 691 size_type POD_size = static_cast<size_type>(next_size * partition_size +
Chris@101 692 integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type));
Chris@16 693 char * ptr = (UserAllocator::malloc)(POD_size);
Chris@16 694 if (ptr == 0)
Chris@16 695 {
Chris@16 696 if(next_size > 4)
Chris@16 697 {
Chris@16 698 next_size >>= 1;
Chris@16 699 partition_size = alloc_size();
Chris@16 700 POD_size = static_cast<size_type>(next_size * partition_size +
Chris@101 701 integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type));
Chris@16 702 ptr = (UserAllocator::malloc)(POD_size);
Chris@16 703 }
Chris@16 704 if(ptr == 0)
Chris@16 705 return 0;
Chris@16 706 }
Chris@16 707 const details::PODptr<size_type> node(ptr, POD_size);
Chris@16 708
Chris@16 709 BOOST_USING_STD_MIN();
Chris@16 710 if(!max_size)
Chris@16 711 next_size <<= 1;
Chris@16 712 else if( next_size*partition_size/requested_size < max_size)
Chris@16 713 next_size = min BOOST_PREVENT_MACRO_SUBSTITUTION(next_size << 1, max_size*requested_size/ partition_size);
Chris@16 714
Chris@16 715 // initialize it,
Chris@16 716 store().add_block(node.begin(), node.element_size(), partition_size);
Chris@16 717
Chris@16 718 // insert it into the list,
Chris@16 719 node.next(list);
Chris@16 720 list = node;
Chris@16 721
Chris@16 722 // and return a chunk from it.
Chris@16 723 return (store().malloc)();
Chris@16 724 }
Chris@16 725
Chris@16 726 template <typename UserAllocator>
Chris@16 727 void * pool<UserAllocator>::ordered_malloc_need_resize()
Chris@16 728 { //! No memory in any of our storages; make a new storage,
Chris@16 729 //! \returns pointer to new chunk.
Chris@16 730 size_type partition_size = alloc_size();
Chris@16 731 size_type POD_size = static_cast<size_type>(next_size * partition_size +
Chris@101 732 integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type));
Chris@16 733 char * ptr = (UserAllocator::malloc)(POD_size);
Chris@16 734 if (ptr == 0)
Chris@16 735 {
Chris@16 736 if(next_size > 4)
Chris@16 737 {
Chris@16 738 next_size >>= 1;
Chris@16 739 partition_size = alloc_size();
Chris@16 740 POD_size = static_cast<size_type>(next_size * partition_size +
Chris@101 741 integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type));
Chris@16 742 ptr = (UserAllocator::malloc)(POD_size);
Chris@16 743 }
Chris@16 744 if(ptr == 0)
Chris@16 745 return 0;
Chris@16 746 }
Chris@16 747 const details::PODptr<size_type> node(ptr, POD_size);
Chris@16 748
Chris@16 749 BOOST_USING_STD_MIN();
Chris@16 750 if(!max_size)
Chris@16 751 next_size <<= 1;
Chris@16 752 else if( next_size*partition_size/requested_size < max_size)
Chris@16 753 next_size = min BOOST_PREVENT_MACRO_SUBSTITUTION(next_size << 1, max_size*requested_size/ partition_size);
Chris@16 754
Chris@16 755 // initialize it,
Chris@16 756 // (we can use "add_block" here because we know that
Chris@16 757 // the free list is empty, so we don't have to use
Chris@16 758 // the slower ordered version)
Chris@16 759 store().add_ordered_block(node.begin(), node.element_size(), partition_size);
Chris@16 760
Chris@16 761 // insert it into the list,
Chris@16 762 // handle border case
Chris@16 763 if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
Chris@16 764 {
Chris@16 765 node.next(list);
Chris@16 766 list = node;
Chris@16 767 }
Chris@16 768 else
Chris@16 769 {
Chris@16 770 details::PODptr<size_type> prev = list;
Chris@16 771
Chris@16 772 while (true)
Chris@16 773 {
Chris@16 774 // if we're about to hit the end or
Chris@16 775 // if we've found where "node" goes
Chris@16 776 if (prev.next_ptr() == 0
Chris@16 777 || std::greater<void *>()(prev.next_ptr(), node.begin()))
Chris@16 778 break;
Chris@16 779
Chris@16 780 prev = prev.next();
Chris@16 781 }
Chris@16 782
Chris@16 783 node.next(prev.next());
Chris@16 784 prev.next(node);
Chris@16 785 }
Chris@16 786 // and return a chunk from it.
Chris@16 787 return (store().malloc)();
Chris@16 788 }
Chris@16 789
Chris@16 790 template <typename UserAllocator>
Chris@16 791 void * pool<UserAllocator>::ordered_malloc(const size_type n)
Chris@16 792 { //! Gets address of a chunk n, allocating new memory if not already available.
Chris@16 793 //! \returns Address of chunk n if allocated ok.
Chris@16 794 //! \returns 0 if not enough memory for n chunks.
Chris@16 795
Chris@16 796 const size_type partition_size = alloc_size();
Chris@16 797 const size_type total_req_size = n * requested_size;
Chris@16 798 const size_type num_chunks = total_req_size / partition_size +
Chris@16 799 ((total_req_size % partition_size) ? true : false);
Chris@16 800
Chris@16 801 void * ret = store().malloc_n(num_chunks, partition_size);
Chris@16 802
Chris@16 803 #ifdef BOOST_POOL_INSTRUMENT
Chris@16 804 std::cout << "Allocating " << n << " chunks from pool of size " << partition_size << std::endl;
Chris@16 805 #endif
Chris@16 806 if ((ret != 0) || (n == 0))
Chris@16 807 return ret;
Chris@16 808
Chris@16 809 #ifdef BOOST_POOL_INSTRUMENT
Chris@16 810 std::cout << "Cache miss, allocating another chunk...\n";
Chris@16 811 #endif
Chris@16 812
Chris@16 813 // Not enough memory in our storages; make a new storage,
Chris@16 814 BOOST_USING_STD_MAX();
Chris@16 815 next_size = max BOOST_PREVENT_MACRO_SUBSTITUTION(next_size, num_chunks);
Chris@16 816 size_type POD_size = static_cast<size_type>(next_size * partition_size +
Chris@101 817 integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type));
Chris@16 818 char * ptr = (UserAllocator::malloc)(POD_size);
Chris@16 819 if (ptr == 0)
Chris@16 820 {
Chris@16 821 if(num_chunks < next_size)
Chris@16 822 {
Chris@16 823 // Try again with just enough memory to do the job, or at least whatever we
Chris@16 824 // allocated last time:
Chris@16 825 next_size >>= 1;
Chris@16 826 next_size = max BOOST_PREVENT_MACRO_SUBSTITUTION(next_size, num_chunks);
Chris@16 827 POD_size = static_cast<size_type>(next_size * partition_size +
Chris@101 828 integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type));
Chris@16 829 ptr = (UserAllocator::malloc)(POD_size);
Chris@16 830 }
Chris@16 831 if(ptr == 0)
Chris@16 832 return 0;
Chris@16 833 }
Chris@16 834 const details::PODptr<size_type> node(ptr, POD_size);
Chris@16 835
Chris@16 836 // Split up block so we can use what wasn't requested.
Chris@16 837 if (next_size > num_chunks)
Chris@16 838 store().add_ordered_block(node.begin() + num_chunks * partition_size,
Chris@16 839 node.element_size() - num_chunks * partition_size, partition_size);
Chris@16 840
Chris@16 841 BOOST_USING_STD_MIN();
Chris@16 842 if(!max_size)
Chris@16 843 next_size <<= 1;
Chris@16 844 else if( next_size*partition_size/requested_size < max_size)
Chris@16 845 next_size = min BOOST_PREVENT_MACRO_SUBSTITUTION(next_size << 1, max_size*requested_size/ partition_size);
Chris@16 846
Chris@16 847 // insert it into the list,
Chris@16 848 // handle border case.
Chris@16 849 if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
Chris@16 850 {
Chris@16 851 node.next(list);
Chris@16 852 list = node;
Chris@16 853 }
Chris@16 854 else
Chris@16 855 {
Chris@16 856 details::PODptr<size_type> prev = list;
Chris@16 857
Chris@16 858 while (true)
Chris@16 859 {
Chris@16 860 // if we're about to hit the end, or if we've found where "node" goes.
Chris@16 861 if (prev.next_ptr() == 0
Chris@16 862 || std::greater<void *>()(prev.next_ptr(), node.begin()))
Chris@16 863 break;
Chris@16 864
Chris@16 865 prev = prev.next();
Chris@16 866 }
Chris@16 867
Chris@16 868 node.next(prev.next());
Chris@16 869 prev.next(node);
Chris@16 870 }
Chris@16 871
Chris@16 872 // and return it.
Chris@16 873 return node.begin();
Chris@16 874 }
Chris@16 875
Chris@16 876 template <typename UserAllocator>
Chris@16 877 details::PODptr<typename pool<UserAllocator>::size_type>
Chris@16 878 pool<UserAllocator>::find_POD(void * const chunk) const
Chris@16 879 { //! find which PODptr storage memory that this chunk is from.
Chris@16 880 //! \returns the PODptr that holds this chunk.
Chris@16 881 // Iterate down list to find which storage this chunk is from.
Chris@16 882 details::PODptr<size_type> iter = list;
Chris@16 883 while (iter.valid())
Chris@16 884 {
Chris@16 885 if (is_from(chunk, iter.begin(), iter.element_size()))
Chris@16 886 return iter;
Chris@16 887 iter = iter.next();
Chris@16 888 }
Chris@16 889
Chris@16 890 return iter;
Chris@16 891 }
Chris@16 892
Chris@16 893 #else // BOOST_POOL_VALGRIND
Chris@16 894
Chris@16 895 template<typename UserAllocator>
Chris@16 896 class pool
Chris@16 897 {
Chris@16 898 public:
Chris@16 899 // types
Chris@16 900 typedef UserAllocator user_allocator; // User allocator.
Chris@16 901 typedef typename UserAllocator::size_type size_type; // An unsigned integral type that can represent the size of the largest object to be allocated.
Chris@16 902 typedef typename UserAllocator::difference_type difference_type; // A signed integral type that can represent the difference of any two pointers.
Chris@16 903
Chris@16 904 // construct/copy/destruct
Chris@16 905 explicit pool(const size_type s, const size_type = 32, const size_type m = 0) : chunk_size(s), max_alloc_size(m) {}
Chris@16 906 ~pool()
Chris@16 907 {
Chris@16 908 purge_memory();
Chris@16 909 }
Chris@16 910
Chris@16 911 bool release_memory()
Chris@16 912 {
Chris@16 913 bool ret = free_list.empty() ? false : true;
Chris@16 914 for(std::set<void*>::iterator pos = free_list.begin(); pos != free_list.end(); ++pos)
Chris@16 915 {
Chris@16 916 (user_allocator::free)(static_cast<char*>(*pos));
Chris@16 917 }
Chris@16 918 free_list.clear();
Chris@16 919 return ret;
Chris@16 920 }
Chris@16 921 bool purge_memory()
Chris@16 922 {
Chris@16 923 bool ret = free_list.empty() && used_list.empty() ? false : true;
Chris@16 924 for(std::set<void*>::iterator pos = free_list.begin(); pos != free_list.end(); ++pos)
Chris@16 925 {
Chris@16 926 (user_allocator::free)(static_cast<char*>(*pos));
Chris@16 927 }
Chris@16 928 free_list.clear();
Chris@16 929 for(std::set<void*>::iterator pos = used_list.begin(); pos != used_list.end(); ++pos)
Chris@16 930 {
Chris@16 931 (user_allocator::free)(static_cast<char*>(*pos));
Chris@16 932 }
Chris@16 933 used_list.clear();
Chris@16 934 return ret;
Chris@16 935 }
Chris@16 936 size_type get_next_size() const
Chris@16 937 {
Chris@16 938 return 1;
Chris@16 939 }
Chris@16 940 void set_next_size(const size_type){}
Chris@16 941 size_type get_max_size() const
Chris@16 942 {
Chris@16 943 return max_alloc_size;
Chris@16 944 }
Chris@16 945 void set_max_size(const size_type s)
Chris@16 946 {
Chris@16 947 max_alloc_size = s;
Chris@16 948 }
Chris@16 949 size_type get_requested_size() const
Chris@16 950 {
Chris@16 951 return chunk_size;
Chris@16 952 }
Chris@16 953 void * malloc BOOST_PREVENT_MACRO_SUBSTITUTION()
Chris@16 954 {
Chris@16 955 void* ret;
Chris@16 956 if(free_list.empty())
Chris@16 957 {
Chris@16 958 ret = (user_allocator::malloc)(chunk_size);
Chris@16 959 VALGRIND_MAKE_MEM_UNDEFINED(ret, chunk_size);
Chris@16 960 }
Chris@16 961 else
Chris@16 962 {
Chris@16 963 ret = *free_list.begin();
Chris@16 964 free_list.erase(free_list.begin());
Chris@16 965 VALGRIND_MAKE_MEM_UNDEFINED(ret, chunk_size);
Chris@16 966 }
Chris@16 967 used_list.insert(ret);
Chris@16 968 return ret;
Chris@16 969 }
Chris@16 970 void * ordered_malloc()
Chris@16 971 {
Chris@16 972 return (this->malloc)();
Chris@16 973 }
Chris@16 974 void * ordered_malloc(size_type n)
Chris@16 975 {
Chris@16 976 if(max_alloc_size && (n > max_alloc_size))
Chris@16 977 return 0;
Chris@16 978 void* ret = (user_allocator::malloc)(chunk_size * n);
Chris@16 979 used_list.insert(ret);
Chris@16 980 return ret;
Chris@16 981 }
Chris@16 982 void free BOOST_PREVENT_MACRO_SUBSTITUTION(void *const chunk)
Chris@16 983 {
Chris@16 984 BOOST_ASSERT(used_list.count(chunk) == 1);
Chris@16 985 BOOST_ASSERT(free_list.count(chunk) == 0);
Chris@16 986 used_list.erase(chunk);
Chris@16 987 free_list.insert(chunk);
Chris@16 988 VALGRIND_MAKE_MEM_NOACCESS(chunk, chunk_size);
Chris@16 989 }
Chris@16 990 void ordered_free(void *const chunk)
Chris@16 991 {
Chris@16 992 return (this->free)(chunk);
Chris@16 993 }
Chris@16 994 void free BOOST_PREVENT_MACRO_SUBSTITUTION(void *const chunk, const size_type)
Chris@16 995 {
Chris@16 996 BOOST_ASSERT(used_list.count(chunk) == 1);
Chris@16 997 BOOST_ASSERT(free_list.count(chunk) == 0);
Chris@16 998 used_list.erase(chunk);
Chris@16 999 (user_allocator::free)(static_cast<char*>(chunk));
Chris@16 1000 }
Chris@16 1001 void ordered_free(void *const chunk, const size_type n)
Chris@16 1002 {
Chris@16 1003 (this->free)(chunk, n);
Chris@16 1004 }
Chris@16 1005 bool is_from(void *const chunk) const
Chris@16 1006 {
Chris@16 1007 return used_list.count(chunk) || free_list.count(chunk);
Chris@16 1008 }
Chris@16 1009
Chris@16 1010 protected:
Chris@16 1011 size_type chunk_size, max_alloc_size;
Chris@16 1012 std::set<void*> free_list, used_list;
Chris@16 1013 };
Chris@16 1014
Chris@16 1015 #endif
Chris@16 1016
Chris@16 1017 } // namespace boost
Chris@16 1018
Chris@16 1019 #ifdef BOOST_MSVC
Chris@16 1020 #pragma warning(pop)
Chris@16 1021 #endif
Chris@16 1022
Chris@16 1023 #endif // #ifdef BOOST_POOL_HPP
Chris@16 1024