annotate DEPENDENCIES/generic/include/boost/interprocess/mem_algo/rbtree_best_fit.hpp @ 125:34e428693f5d vext

Vext -> Repoint
author Chris Cannam
date Thu, 14 Jun 2018 11:15:39 +0100
parents c530137014c0
children
rev   line source
Chris@16 1 //////////////////////////////////////////////////////////////////////////////
Chris@16 2 //
Chris@16 3 // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
Chris@16 4 // Software License, Version 1.0. (See accompanying file
Chris@16 5 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
Chris@16 6 //
Chris@16 7 // See http://www.boost.org/libs/interprocess for documentation.
Chris@16 8 //
Chris@16 9 //////////////////////////////////////////////////////////////////////////////
Chris@16 10
Chris@16 11 #ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
Chris@16 12 #define BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
Chris@16 13
Chris@101 14 #ifndef BOOST_CONFIG_HPP
Chris@101 15 # include <boost/config.hpp>
Chris@101 16 #endif
Chris@101 17 #
Chris@101 18 #if defined(BOOST_HAS_PRAGMA_ONCE)
Chris@16 19 # pragma once
Chris@16 20 #endif
Chris@16 21
Chris@16 22 #include <boost/interprocess/detail/config_begin.hpp>
Chris@16 23 #include <boost/interprocess/detail/workaround.hpp>
Chris@16 24
Chris@101 25 // interprocess
Chris@101 26 #include <boost/interprocess/containers/allocation_type.hpp>
Chris@101 27 #include <boost/interprocess/exceptions.hpp>
Chris@16 28 #include <boost/interprocess/interprocess_fwd.hpp>
Chris@16 29 #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
Chris@16 30 #include <boost/interprocess/offset_ptr.hpp>
Chris@101 31 #include <boost/interprocess/sync/scoped_lock.hpp>
Chris@101 32 // interprocess/detail
Chris@16 33 #include <boost/interprocess/detail/min_max.hpp>
Chris@16 34 #include <boost/interprocess/detail/math_functions.hpp>
Chris@16 35 #include <boost/interprocess/detail/type_traits.hpp>
Chris@101 36 #include <boost/interprocess/detail/utilities.hpp>
Chris@101 37 // container
Chris@101 38 #include <boost/container/detail/multiallocation_chain.hpp>
Chris@101 39 // container/detail
Chris@101 40 #include <boost/container/detail/placement_new.hpp>
Chris@101 41 // move/detail
Chris@101 42 #include <boost/move/detail/type_traits.hpp> //make_unsigned, alignment_of
Chris@101 43 // intrusive
Chris@16 44 #include <boost/intrusive/pointer_traits.hpp>
Chris@101 45 #include <boost/intrusive/set.hpp>
Chris@101 46 // other boost
Chris@16 47 #include <boost/assert.hpp>
Chris@16 48 #include <boost/static_assert.hpp>
Chris@101 49 // std
Chris@16 50 #include <climits>
Chris@16 51 #include <cstring>
Chris@16 52
Chris@16 53 //#define BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
Chris@16 54 //to maintain ABI compatible with the original version
Chris@16 55 //ABI had to be updated to fix compatibility issues when
Chris@16 56 //sharing shared memory between 32 adn 64 bit processes.
Chris@16 57
Chris@16 58 //!\file
Chris@16 59 //!Describes a best-fit algorithm based in an intrusive red-black tree used to allocate
Chris@16 60 //!objects in shared memory. This class is intended as a base class for single segment
Chris@16 61 //!and multi-segment implementations.
Chris@16 62
Chris@16 63 namespace boost {
Chris@16 64 namespace interprocess {
Chris@16 65
Chris@16 66 //!This class implements an algorithm that stores the free nodes in a red-black tree
Chris@16 67 //!to have logarithmic search/insert times.
Chris@16 68 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 69 class rbtree_best_fit
Chris@16 70 {
Chris@101 71 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
Chris@16 72 //Non-copyable
Chris@16 73 rbtree_best_fit();
Chris@16 74 rbtree_best_fit(const rbtree_best_fit &);
Chris@16 75 rbtree_best_fit &operator=(const rbtree_best_fit &);
Chris@16 76
Chris@16 77 private:
Chris@16 78 struct block_ctrl;
Chris@16 79 typedef typename boost::intrusive::
Chris@16 80 pointer_traits<VoidPointer>::template
Chris@16 81 rebind_pointer<block_ctrl>::type block_ctrl_ptr;
Chris@16 82
Chris@16 83 typedef typename boost::intrusive::
Chris@16 84 pointer_traits<VoidPointer>::template
Chris@16 85 rebind_pointer<char>::type char_ptr;
Chris@16 86
Chris@101 87 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
Chris@16 88
Chris@16 89 public:
Chris@16 90 //!Shared mutex family used for the rest of the Interprocess framework
Chris@16 91 typedef MutexFamily mutex_family;
Chris@16 92 //!Pointer type to be used with the rest of the Interprocess framework
Chris@16 93 typedef VoidPointer void_pointer;
Chris@16 94 typedef ipcdetail::basic_multiallocation_chain<VoidPointer> multiallocation_chain;
Chris@16 95
Chris@16 96 typedef typename boost::intrusive::pointer_traits<char_ptr>::difference_type difference_type;
Chris@101 97 typedef typename boost::container::container_detail::make_unsigned<difference_type>::type size_type;
Chris@16 98
Chris@101 99 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
Chris@16 100
Chris@16 101 private:
Chris@16 102
Chris@16 103 typedef typename bi::make_set_base_hook
Chris@16 104 < bi::void_pointer<VoidPointer>
Chris@16 105 , bi::optimize_size<true>
Chris@16 106 , bi::link_mode<bi::normal_link> >::type TreeHook;
Chris@16 107
Chris@16 108 struct SizeHolder
Chris@16 109 {
Chris@16 110 //!This block's memory size (including block_ctrl
Chris@16 111 //!header) in Alignment units
Chris@16 112 size_type m_prev_size : sizeof(size_type)*CHAR_BIT;
Chris@16 113 size_type m_size : sizeof(size_type)*CHAR_BIT - 2;
Chris@16 114 size_type m_prev_allocated : 1;
Chris@16 115 size_type m_allocated : 1;
Chris@16 116 };
Chris@16 117
Chris@16 118 //!Block control structure
Chris@16 119 struct block_ctrl
Chris@16 120 : public SizeHolder, public TreeHook
Chris@16 121 {
Chris@16 122 block_ctrl()
Chris@16 123 { this->m_size = 0; this->m_allocated = 0, this->m_prev_allocated = 0; }
Chris@16 124
Chris@16 125 friend bool operator<(const block_ctrl &a, const block_ctrl &b)
Chris@16 126 { return a.m_size < b.m_size; }
Chris@16 127 friend bool operator==(const block_ctrl &a, const block_ctrl &b)
Chris@16 128 { return a.m_size == b.m_size; }
Chris@16 129 };
Chris@16 130
Chris@16 131 struct size_block_ctrl_compare
Chris@16 132 {
Chris@16 133 bool operator()(size_type size, const block_ctrl &block) const
Chris@16 134 { return size < block.m_size; }
Chris@16 135
Chris@16 136 bool operator()(const block_ctrl &block, size_type size) const
Chris@16 137 { return block.m_size < size; }
Chris@16 138 };
Chris@16 139
Chris@16 140 //!Shared mutex to protect memory allocate/deallocate
Chris@16 141 typedef typename MutexFamily::mutex_type mutex_type;
Chris@16 142 typedef typename bi::make_multiset
Chris@16 143 <block_ctrl, bi::base_hook<TreeHook> >::type Imultiset;
Chris@16 144
Chris@16 145 typedef typename Imultiset::iterator imultiset_iterator;
Chris@101 146 typedef typename Imultiset::const_iterator imultiset_const_iterator;
Chris@16 147
Chris@16 148 //!This struct includes needed data and derives from
Chris@16 149 //!mutex_type to allow EBO when using null mutex_type
Chris@16 150 struct header_t : public mutex_type
Chris@16 151 {
Chris@16 152 Imultiset m_imultiset;
Chris@16 153
Chris@16 154 //!The extra size required by the segment
Chris@16 155 size_type m_extra_hdr_bytes;
Chris@16 156 //!Allocated bytes for internal checking
Chris@16 157 size_type m_allocated;
Chris@16 158 //!The size of the memory segment
Chris@16 159 size_type m_size;
Chris@16 160 } m_header;
Chris@16 161
Chris@16 162 friend class ipcdetail::memory_algorithm_common<rbtree_best_fit>;
Chris@16 163
Chris@16 164 typedef ipcdetail::memory_algorithm_common<rbtree_best_fit> algo_impl_t;
Chris@16 165
Chris@16 166 public:
Chris@101 167 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
Chris@16 168
Chris@16 169 //!Constructor. "size" is the total size of the managed memory segment,
Chris@16 170 //!"extra_hdr_bytes" indicates the extra bytes beginning in the sizeof(rbtree_best_fit)
Chris@16 171 //!offset that the allocator should not use at all.
Chris@16 172 rbtree_best_fit (size_type size, size_type extra_hdr_bytes);
Chris@16 173
Chris@16 174 //!Destructor.
Chris@16 175 ~rbtree_best_fit();
Chris@16 176
Chris@16 177 //!Obtains the minimum size needed by the algorithm
Chris@16 178 static size_type get_min_size (size_type extra_hdr_bytes);
Chris@16 179
Chris@16 180 //Functions for single segment management
Chris@16 181
Chris@16 182 //!Allocates bytes, returns 0 if there is not more memory
Chris@16 183 void* allocate (size_type nbytes);
Chris@16 184
Chris@101 185 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
Chris@16 186
Chris@16 187 //Experimental. Dont' use
Chris@16 188
Chris@16 189 //!Multiple element allocation, same size
Chris@16 190 void allocate_many(size_type elem_bytes, size_type num_elements, multiallocation_chain &chain)
Chris@16 191 {
Chris@16 192 //-----------------------
Chris@16 193 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
Chris@16 194 //-----------------------
Chris@16 195 algo_impl_t::allocate_many(this, elem_bytes, num_elements, chain);
Chris@16 196 }
Chris@16 197
Chris@16 198 //!Multiple element allocation, different size
Chris@16 199 void allocate_many(const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
Chris@16 200 {
Chris@16 201 //-----------------------
Chris@16 202 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
Chris@16 203 //-----------------------
Chris@16 204 algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element, chain);
Chris@16 205 }
Chris@16 206
Chris@16 207 //!Multiple element allocation, different size
Chris@16 208 void deallocate_many(multiallocation_chain &chain);
Chris@16 209
Chris@101 210 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
Chris@16 211
Chris@16 212 //!Deallocates previously allocated bytes
Chris@16 213 void deallocate (void *addr);
Chris@16 214
Chris@16 215 //!Returns the size of the memory segment
Chris@16 216 size_type get_size() const;
Chris@16 217
Chris@16 218 //!Returns the number of free bytes of the segment
Chris@16 219 size_type get_free_memory() const;
Chris@16 220
Chris@16 221 //!Initializes to zero all the memory that's not in use.
Chris@16 222 //!This function is normally used for security reasons.
Chris@16 223 void zero_free_memory();
Chris@16 224
Chris@16 225 //!Increases managed memory in
Chris@16 226 //!extra_size bytes more
Chris@16 227 void grow(size_type extra_size);
Chris@16 228
Chris@16 229 //!Decreases managed memory as much as possible
Chris@16 230 void shrink_to_fit();
Chris@16 231
Chris@16 232 //!Returns true if all allocated memory has been deallocated
Chris@16 233 bool all_memory_deallocated();
Chris@16 234
Chris@16 235 //!Makes an internal sanity check
Chris@16 236 //!and returns true if success
Chris@16 237 bool check_sanity();
Chris@16 238
Chris@16 239 template<class T>
Chris@101 240 T * allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
Chris@101 241 size_type &prefer_in_recvd_out_size, T *&reuse);
Chris@16 242
Chris@101 243 void * raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_object,
Chris@101 244 size_type &prefer_in_recvd_out_size,
Chris@101 245 void *&reuse_ptr, size_type sizeof_object = 1);
Chris@16 246
Chris@16 247 //!Returns the size of the buffer previously allocated pointed by ptr
Chris@16 248 size_type size(const void *ptr) const;
Chris@16 249
Chris@16 250 //!Allocates aligned bytes, returns 0 if there is not more memory.
Chris@16 251 //!Alignment must be power of 2
Chris@16 252 void* allocate_aligned (size_type nbytes, size_type alignment);
Chris@16 253
Chris@101 254 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
Chris@16 255 private:
Chris@16 256 static size_type priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes);
Chris@16 257
Chris@16 258 block_ctrl *priv_first_block();
Chris@16 259
Chris@16 260 block_ctrl *priv_end_block();
Chris@16 261
Chris@101 262 void* priv_allocation_command(boost::interprocess::allocation_type command, size_type limit_size,
Chris@101 263 size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object);
Chris@16 264
Chris@16 265
Chris@16 266 //!Real allocation algorithm with min allocation option
Chris@101 267 void * priv_allocate( boost::interprocess::allocation_type command
Chris@101 268 , size_type limit_size, size_type &prefer_in_recvd_out_size
Chris@101 269 , void *&reuse_ptr, size_type backwards_multiple = 1);
Chris@16 270
Chris@16 271 //!Obtains the block control structure of the user buffer
Chris@16 272 static block_ctrl *priv_get_block(const void *ptr);
Chris@16 273
Chris@16 274 //!Obtains the pointer returned to the user from the block control
Chris@16 275 static void *priv_get_user_buffer(const block_ctrl *block);
Chris@16 276
Chris@16 277 //!Returns the number of total units that a user buffer
Chris@16 278 //!of "userbytes" bytes really occupies (including header)
Chris@16 279 static size_type priv_get_total_units(size_type userbytes);
Chris@16 280
Chris@16 281 //!Real expand function implementation
Chris@101 282 bool priv_expand(void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size);
Chris@16 283
Chris@16 284 //!Real expand to both sides implementation
Chris@16 285 void* priv_expand_both_sides(boost::interprocess::allocation_type command
Chris@16 286 ,size_type min_size
Chris@101 287 ,size_type &prefer_in_recvd_out_size
Chris@16 288 ,void *reuse_ptr
Chris@16 289 ,bool only_preferred_backwards
Chris@16 290 ,size_type backwards_multiple);
Chris@16 291
Chris@16 292 //!Returns true if the previous block is allocated
Chris@16 293 bool priv_is_prev_allocated(block_ctrl *ptr);
Chris@16 294
Chris@16 295 //!Get a pointer of the "end" block from the first block of the segment
Chris@16 296 static block_ctrl * priv_end_block(block_ctrl *first_segment_block);
Chris@16 297
Chris@16 298 //!Get a pointer of the "first" block from the end block of the segment
Chris@16 299 static block_ctrl * priv_first_block(block_ctrl *end_segment_block);
Chris@16 300
Chris@16 301 //!Get poitner of the previous block (previous block must be free)
Chris@16 302 static block_ctrl * priv_prev_block(block_ctrl *ptr);
Chris@16 303
Chris@16 304 //!Get the size in the tail of the previous block
Chris@16 305 static block_ctrl * priv_next_block(block_ctrl *ptr);
Chris@16 306
Chris@16 307 //!Check if this block is free (not allocated)
Chris@16 308 bool priv_is_allocated_block(block_ctrl *ptr);
Chris@16 309
Chris@16 310 //!Marks the block as allocated
Chris@16 311 void priv_mark_as_allocated_block(block_ctrl *ptr);
Chris@16 312
Chris@16 313 //!Marks the block as allocated
Chris@16 314 void priv_mark_new_allocated_block(block_ctrl *ptr)
Chris@16 315 { return priv_mark_as_allocated_block(ptr); }
Chris@16 316
Chris@16 317 //!Marks the block as allocated
Chris@16 318 void priv_mark_as_free_block(block_ctrl *ptr);
Chris@16 319
Chris@16 320 //!Checks if block has enough memory and splits/unlinks the block
Chris@16 321 //!returning the address to the users
Chris@16 322 void* priv_check_and_allocate(size_type units
Chris@16 323 ,block_ctrl* block
Chris@16 324 ,size_type &received_size);
Chris@16 325 //!Real deallocation algorithm
Chris@16 326 void priv_deallocate(void *addr);
Chris@16 327
Chris@16 328 //!Makes a new memory portion available for allocation
Chris@16 329 void priv_add_segment(void *addr, size_type size);
Chris@16 330
Chris@16 331 public:
Chris@16 332
Chris@16 333 static const size_type Alignment = !MemAlignment
Chris@101 334 ? size_type(::boost::container::container_detail::alignment_of
Chris@101 335 < ::boost::container::container_detail::max_align_t>::value)
Chris@16 336 : size_type(MemAlignment)
Chris@16 337 ;
Chris@16 338
Chris@16 339 private:
Chris@16 340 //Due to embedded bits in size, Alignment must be at least 4
Chris@16 341 BOOST_STATIC_ASSERT((Alignment >= 4));
Chris@16 342 //Due to rbtree size optimizations, Alignment must have at least pointer alignment
Chris@101 343 BOOST_STATIC_ASSERT((Alignment >= ::boost::container::container_detail::alignment_of<void_pointer>::value));
Chris@16 344 static const size_type AlignmentMask = (Alignment - 1);
Chris@16 345 static const size_type BlockCtrlBytes = ipcdetail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
Chris@16 346 static const size_type BlockCtrlUnits = BlockCtrlBytes/Alignment;
Chris@16 347 static const size_type AllocatedCtrlBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
Chris@16 348 static const size_type AllocatedCtrlUnits = AllocatedCtrlBytes/Alignment;
Chris@16 349 static const size_type EndCtrlBlockBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
Chris@16 350 static const size_type EndCtrlBlockUnits = EndCtrlBlockBytes/Alignment;
Chris@16 351 static const size_type MinBlockUnits = BlockCtrlUnits;
Chris@16 352 static const size_type UsableByPreviousChunk = sizeof(size_type);
Chris@16 353
Chris@16 354 //Make sure the maximum alignment is power of two
Chris@16 355 BOOST_STATIC_ASSERT((0 == (Alignment & (Alignment - size_type(1u)))));
Chris@101 356 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
Chris@16 357 public:
Chris@16 358 static const size_type PayloadPerAllocation = AllocatedCtrlBytes - UsableByPreviousChunk;
Chris@16 359 };
Chris@16 360
Chris@101 361 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
Chris@16 362
Chris@16 363 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 364 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
Chris@16 365 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
Chris@16 366 ::priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes)
Chris@16 367 {
Chris@16 368 size_type uint_this = (std::size_t)this_ptr;
Chris@16 369 size_type main_hdr_end = uint_this + sizeof(rbtree_best_fit) + extra_hdr_bytes;
Chris@16 370 size_type aligned_main_hdr_end = ipcdetail::get_rounded_size(main_hdr_end, Alignment);
Chris@16 371 size_type block1_off = aligned_main_hdr_end - uint_this;
Chris@16 372 algo_impl_t::assert_alignment(aligned_main_hdr_end);
Chris@16 373 algo_impl_t::assert_alignment(uint_this + block1_off);
Chris@16 374 return block1_off;
Chris@16 375 }
Chris@16 376
Chris@16 377 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 378 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
Chris@16 379 priv_add_segment(void *addr, size_type segment_size)
Chris@16 380 {
Chris@16 381 //Check alignment
Chris@16 382 algo_impl_t::check_alignment(addr);
Chris@16 383 //Check size
Chris@16 384 BOOST_ASSERT(segment_size >= (BlockCtrlBytes + EndCtrlBlockBytes));
Chris@16 385
Chris@16 386 //Initialize the first big block and the "end" node
Chris@101 387 block_ctrl *first_big_block = ::new(addr, boost_container_new_t())block_ctrl;
Chris@16 388 first_big_block->m_size = segment_size/Alignment - EndCtrlBlockUnits;
Chris@16 389 BOOST_ASSERT(first_big_block->m_size >= BlockCtrlUnits);
Chris@16 390
Chris@16 391 //The "end" node is just a node of size 0 with the "end" bit set
Chris@16 392 block_ctrl *end_block = static_cast<block_ctrl*>
Chris@16 393 (new (reinterpret_cast<char*>(addr) + first_big_block->m_size*Alignment)SizeHolder);
Chris@16 394
Chris@16 395 //This will overwrite the prev part of the "end" node
Chris@16 396 priv_mark_as_free_block (first_big_block);
Chris@16 397 #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
Chris@16 398 first_big_block->m_prev_size = end_block->m_size =
Chris@16 399 (reinterpret_cast<char*>(first_big_block) - reinterpret_cast<char*>(end_block))/Alignment;
Chris@16 400 #else
Chris@16 401 first_big_block->m_prev_size = end_block->m_size =
Chris@16 402 (reinterpret_cast<char*>(end_block) - reinterpret_cast<char*>(first_big_block))/Alignment;
Chris@16 403 #endif
Chris@16 404 end_block->m_allocated = 1;
Chris@16 405 first_big_block->m_prev_allocated = 1;
Chris@16 406
Chris@16 407 BOOST_ASSERT(priv_next_block(first_big_block) == end_block);
Chris@16 408 BOOST_ASSERT(priv_prev_block(end_block) == first_big_block);
Chris@16 409 BOOST_ASSERT(priv_first_block() == first_big_block);
Chris@16 410 BOOST_ASSERT(priv_end_block() == end_block);
Chris@16 411
Chris@16 412 //Some check to validate the algorithm, since it makes some assumptions
Chris@16 413 //to optimize the space wasted in bookkeeping:
Chris@16 414
Chris@16 415 //Check that the sizes of the header are placed before the rbtree
Chris@16 416 BOOST_ASSERT(static_cast<void*>(static_cast<SizeHolder*>(first_big_block))
Chris@16 417 < static_cast<void*>(static_cast<TreeHook*>(first_big_block)));
Chris@16 418 //Insert it in the intrusive containers
Chris@16 419 m_header.m_imultiset.insert(*first_big_block);
Chris@16 420 }
Chris@16 421
Chris@16 422 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 423 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
Chris@16 424 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
Chris@16 425 ::priv_first_block()
Chris@16 426 {
Chris@16 427 size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
Chris@16 428 return reinterpret_cast<block_ctrl *>(reinterpret_cast<char*>(this) + block1_off);
Chris@16 429 }
Chris@16 430
Chris@16 431 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 432 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
Chris@16 433 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
Chris@16 434 ::priv_end_block()
Chris@16 435 {
Chris@16 436 size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
Chris@16 437 const size_type original_first_block_size = m_header.m_size/Alignment*Alignment - block1_off/Alignment*Alignment - EndCtrlBlockBytes;
Chris@16 438 block_ctrl *end_block = reinterpret_cast<block_ctrl*>
Chris@16 439 (reinterpret_cast<char*>(this) + block1_off + original_first_block_size);
Chris@16 440 return end_block;
Chris@16 441 }
Chris@16 442
Chris@16 443 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 444 inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
Chris@16 445 rbtree_best_fit(size_type segment_size, size_type extra_hdr_bytes)
Chris@16 446 {
Chris@16 447 //Initialize the header
Chris@16 448 m_header.m_allocated = 0;
Chris@16 449 m_header.m_size = segment_size;
Chris@16 450 m_header.m_extra_hdr_bytes = extra_hdr_bytes;
Chris@16 451
Chris@16 452 //Now write calculate the offset of the first big block that will
Chris@16 453 //cover the whole segment
Chris@16 454 BOOST_ASSERT(get_min_size(extra_hdr_bytes) <= segment_size);
Chris@16 455 size_type block1_off = priv_first_block_offset_from_this(this, extra_hdr_bytes);
Chris@16 456 priv_add_segment(reinterpret_cast<char*>(this) + block1_off, segment_size - block1_off);
Chris@16 457 }
Chris@16 458
Chris@16 459 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 460 inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::~rbtree_best_fit()
Chris@16 461 {
Chris@16 462 //There is a memory leak!
Chris@16 463 // BOOST_ASSERT(m_header.m_allocated == 0);
Chris@16 464 // BOOST_ASSERT(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));
Chris@16 465 }
Chris@16 466
Chris@16 467 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 468 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::grow(size_type extra_size)
Chris@16 469 {
Chris@16 470 //Get the address of the first block
Chris@16 471 block_ctrl *first_block = priv_first_block();
Chris@16 472 block_ctrl *old_end_block = priv_end_block();
Chris@16 473 size_type old_border_offset = (size_type)(reinterpret_cast<char*>(old_end_block) -
Chris@16 474 reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
Chris@16 475
Chris@16 476 //Update managed buffer's size
Chris@16 477 m_header.m_size += extra_size;
Chris@16 478
Chris@16 479 //We need at least MinBlockUnits blocks to create a new block
Chris@16 480 if((m_header.m_size - old_border_offset) < MinBlockUnits){
Chris@16 481 return;
Chris@16 482 }
Chris@16 483
Chris@16 484 //Now create a new block between the old end and the new end
Chris@16 485 size_type align_offset = (m_header.m_size - old_border_offset)/Alignment;
Chris@16 486 block_ctrl *new_end_block = reinterpret_cast<block_ctrl*>
Chris@16 487 (reinterpret_cast<char*>(old_end_block) + align_offset*Alignment);
Chris@16 488
Chris@16 489 //the last and first block are special:
Chris@16 490 //new_end_block->m_size & first_block->m_prev_size store the absolute value
Chris@16 491 //between them
Chris@16 492 new_end_block->m_allocated = 1;
Chris@16 493 #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
Chris@16 494 new_end_block->m_size = (reinterpret_cast<char*>(first_block) -
Chris@16 495 reinterpret_cast<char*>(new_end_block))/Alignment;
Chris@16 496 #else
Chris@16 497 new_end_block->m_size = (reinterpret_cast<char*>(new_end_block) -
Chris@16 498 reinterpret_cast<char*>(first_block))/Alignment;
Chris@16 499 #endif
Chris@16 500 first_block->m_prev_size = new_end_block->m_size;
Chris@16 501 first_block->m_prev_allocated = 1;
Chris@16 502 BOOST_ASSERT(new_end_block == priv_end_block());
Chris@16 503
Chris@16 504 //The old end block is the new block
Chris@16 505 block_ctrl *new_block = old_end_block;
Chris@16 506 new_block->m_size = (reinterpret_cast<char*>(new_end_block) -
Chris@16 507 reinterpret_cast<char*>(new_block))/Alignment;
Chris@16 508 BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
Chris@16 509 priv_mark_as_allocated_block(new_block);
Chris@16 510 BOOST_ASSERT(priv_next_block(new_block) == new_end_block);
Chris@16 511
Chris@16 512 m_header.m_allocated += (size_type)new_block->m_size*Alignment;
Chris@16 513
Chris@16 514 //Now deallocate the newly created block
Chris@16 515 this->priv_deallocate(priv_get_user_buffer(new_block));
Chris@16 516 }
Chris@16 517
Chris@16 518 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 519 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::shrink_to_fit()
Chris@16 520 {
Chris@16 521 //Get the address of the first block
Chris@16 522 block_ctrl *first_block = priv_first_block();
Chris@16 523 algo_impl_t::assert_alignment(first_block);
Chris@16 524
Chris@16 525 //block_ctrl *old_end_block = priv_end_block(first_block);
Chris@16 526 block_ctrl *old_end_block = priv_end_block();
Chris@16 527 algo_impl_t::assert_alignment(old_end_block);
Chris@16 528 size_type old_end_block_size = old_end_block->m_size;
Chris@16 529
Chris@16 530 void *unique_buffer = 0;
Chris@16 531 block_ctrl *last_block;
Chris@16 532 //Check if no memory is allocated between the first and last block
Chris@16 533 if(priv_next_block(first_block) == old_end_block){
Chris@16 534 //If so check if we can allocate memory
Chris@101 535 size_type ignore_recvd = 0;
Chris@101 536 void *ignore_reuse = 0;
Chris@101 537 unique_buffer = priv_allocate(boost::interprocess::allocate_new, 0, ignore_recvd, ignore_reuse);
Chris@16 538 //If not, return, we can't shrink
Chris@16 539 if(!unique_buffer)
Chris@16 540 return;
Chris@16 541 //If we can, mark the position just after the new allocation as the new end
Chris@16 542 algo_impl_t::assert_alignment(unique_buffer);
Chris@16 543 block_ctrl *unique_block = priv_get_block(unique_buffer);
Chris@16 544 BOOST_ASSERT(priv_is_allocated_block(unique_block));
Chris@16 545 algo_impl_t::assert_alignment(unique_block);
Chris@16 546 last_block = priv_next_block(unique_block);
Chris@16 547 BOOST_ASSERT(!priv_is_allocated_block(last_block));
Chris@16 548 algo_impl_t::assert_alignment(last_block);
Chris@16 549 }
Chris@16 550 else{
Chris@16 551 //If memory is allocated, check if the last block is allocated
Chris@16 552 if(priv_is_prev_allocated(old_end_block))
Chris@16 553 return;
Chris@16 554 //If not, mark last block after the free block
Chris@16 555 last_block = priv_prev_block(old_end_block);
Chris@16 556 }
Chris@16 557
Chris@16 558 size_type last_block_size = last_block->m_size;
Chris@16 559
Chris@16 560 //Erase block from the free tree, since we will erase it
Chris@16 561 m_header.m_imultiset.erase(Imultiset::s_iterator_to(*last_block));
Chris@16 562
Chris@16 563 size_type shrunk_border_offset = (size_type)(reinterpret_cast<char*>(last_block) -
Chris@16 564 reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
Chris@16 565
Chris@16 566 block_ctrl *new_end_block = last_block;
Chris@16 567 algo_impl_t::assert_alignment(new_end_block);
Chris@16 568
Chris@16 569 //Write new end block attributes
Chris@16 570 #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
Chris@16 571 new_end_block->m_size = first_block->m_prev_size =
Chris@16 572 (reinterpret_cast<char*>(first_block) - reinterpret_cast<char*>(new_end_block))/Alignment;
Chris@16 573 #else
Chris@16 574 new_end_block->m_size = first_block->m_prev_size =
Chris@16 575 (reinterpret_cast<char*>(new_end_block) - reinterpret_cast<char*>(first_block))/Alignment;
Chris@16 576 #endif
Chris@16 577
Chris@16 578 new_end_block->m_allocated = 1;
Chris@16 579 (void)last_block_size;
Chris@16 580 (void)old_end_block_size;
Chris@16 581 BOOST_ASSERT(new_end_block->m_size == (old_end_block_size - last_block_size));
Chris@16 582
Chris@16 583 //Update managed buffer's size
Chris@16 584 m_header.m_size = shrunk_border_offset;
Chris@16 585 BOOST_ASSERT(priv_end_block() == new_end_block);
Chris@16 586 if(unique_buffer)
Chris@16 587 priv_deallocate(unique_buffer);
Chris@16 588 }
Chris@16 589
Chris@16 590 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 591 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
Chris@16 592 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_size() const
Chris@16 593 { return m_header.m_size; }
Chris@16 594
Chris@16 595 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 596 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
Chris@16 597 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_free_memory() const
Chris@16 598 {
Chris@16 599 return m_header.m_size - m_header.m_allocated -
Chris@16 600 priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
Chris@16 601 }
Chris@16 602
Chris@16 603 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 604 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
Chris@16 605 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
Chris@16 606 get_min_size (size_type extra_hdr_bytes)
Chris@16 607 {
Chris@16 608 return (algo_impl_t::ceil_units(sizeof(rbtree_best_fit)) +
Chris@16 609 algo_impl_t::ceil_units(extra_hdr_bytes) +
Chris@16 610 MinBlockUnits + EndCtrlBlockUnits)*Alignment;
Chris@16 611 }
Chris@16 612
Chris@16 613 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 614 inline bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
Chris@16 615 all_memory_deallocated()
Chris@16 616 {
Chris@16 617 //-----------------------
Chris@16 618 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
Chris@16 619 //-----------------------
Chris@16 620 size_type block1_off =
Chris@16 621 priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
Chris@16 622
Chris@16 623 return m_header.m_allocated == 0 &&
Chris@16 624 m_header.m_imultiset.begin() != m_header.m_imultiset.end() &&
Chris@16 625 (++m_header.m_imultiset.begin()) == m_header.m_imultiset.end()
Chris@16 626 && m_header.m_imultiset.begin()->m_size ==
Chris@16 627 (m_header.m_size - block1_off - EndCtrlBlockBytes)/Alignment;
Chris@16 628 }
Chris@16 629
Chris@16 630 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 631 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
Chris@16 632 check_sanity()
Chris@16 633 {
Chris@16 634 //-----------------------
Chris@16 635 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
Chris@16 636 //-----------------------
Chris@16 637 imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
Chris@16 638
Chris@16 639 size_type free_memory = 0;
Chris@16 640
Chris@16 641 //Iterate through all blocks obtaining their size
Chris@16 642 for(; ib != ie; ++ib){
Chris@16 643 free_memory += (size_type)ib->m_size*Alignment;
Chris@16 644 algo_impl_t::assert_alignment(&*ib);
Chris@16 645 if(!algo_impl_t::check_alignment(&*ib))
Chris@16 646 return false;
Chris@16 647 }
Chris@16 648
Chris@16 649 //Check allocated bytes are less than size
Chris@16 650 if(m_header.m_allocated > m_header.m_size){
Chris@16 651 return false;
Chris@16 652 }
Chris@16 653
Chris@16 654 size_type block1_off =
Chris@16 655 priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
Chris@16 656
Chris@16 657 //Check free bytes are less than size
Chris@16 658 if(free_memory > (m_header.m_size - block1_off)){
Chris@16 659 return false;
Chris@16 660 }
Chris@16 661 return true;
Chris@16 662 }
Chris@16 663
Chris@16 664 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 665 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
Chris@16 666 allocate(size_type nbytes)
Chris@16 667 {
Chris@16 668 //-----------------------
Chris@16 669 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
Chris@16 670 //-----------------------
Chris@101 671 size_type ignore_recvd = nbytes;
Chris@101 672 void *ignore_reuse = 0;
Chris@101 673 return priv_allocate(boost::interprocess::allocate_new, nbytes, ignore_recvd, ignore_reuse);
Chris@16 674 }
Chris@16 675
Chris@16 676 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 677 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
Chris@16 678 allocate_aligned(size_type nbytes, size_type alignment)
Chris@16 679 {
Chris@16 680 //-----------------------
Chris@16 681 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
Chris@16 682 //-----------------------
Chris@16 683 return algo_impl_t::allocate_aligned(this, nbytes, alignment);
Chris@16 684 }
Chris@16 685
Chris@16 686 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 687 template<class T>
Chris@101 688 inline T* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
Chris@16 689 allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
Chris@101 690 size_type &prefer_in_recvd_out_size, T *&reuse)
Chris@16 691 {
Chris@101 692 void* raw_reuse = reuse;
Chris@101 693 void* const ret = priv_allocation_command(command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T));
Chris@101 694 reuse = static_cast<T*>(raw_reuse);
Chris@101 695 BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::container_detail::alignment_of<T>::value));
Chris@101 696 return static_cast<T*>(ret);
Chris@16 697 }
Chris@16 698
Chris@16 699 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@101 700 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
Chris@16 701 raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_objects,
Chris@101 702 size_type &prefer_in_recvd_out_objects, void *&reuse_ptr, size_type sizeof_object)
Chris@16 703 {
Chris@101 704 size_type const preferred_objects = prefer_in_recvd_out_objects;
Chris@16 705 if(!sizeof_object)
Chris@101 706 return reuse_ptr = 0, static_cast<void*>(0);
Chris@16 707 if(command & boost::interprocess::try_shrink_in_place){
Chris@101 708 if(!reuse_ptr) return static_cast<void*>(0);
Chris@101 709 const bool success = algo_impl_t::try_shrink
Chris@16 710 ( this, reuse_ptr, limit_objects*sizeof_object
Chris@101 711 , prefer_in_recvd_out_objects = preferred_objects*sizeof_object);
Chris@101 712 prefer_in_recvd_out_objects /= sizeof_object;
Chris@101 713 return success ? reuse_ptr : 0;
Chris@16 714 }
Chris@101 715 else{
Chris@101 716 return priv_allocation_command
Chris@101 717 (command, limit_objects, prefer_in_recvd_out_objects, reuse_ptr, sizeof_object);
Chris@101 718 }
Chris@16 719 }
Chris@16 720
Chris@16 721
Chris@16 722 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@101 723 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
Chris@16 724 priv_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
Chris@101 725 size_type &prefer_in_recvd_out_size,
Chris@101 726 void *&reuse_ptr, size_type sizeof_object)
Chris@16 727 {
Chris@101 728 void* ret;
Chris@101 729 size_type const preferred_size = prefer_in_recvd_out_size;
Chris@101 730 size_type const max_count = m_header.m_size/sizeof_object;
Chris@16 731 if(limit_size > max_count || preferred_size > max_count){
Chris@101 732 return reuse_ptr = 0, static_cast<void*>(0);
Chris@16 733 }
Chris@16 734 size_type l_size = limit_size*sizeof_object;
Chris@16 735 size_type p_size = preferred_size*sizeof_object;
Chris@16 736 size_type r_size;
Chris@16 737 {
Chris@16 738 //-----------------------
Chris@16 739 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
Chris@16 740 //-----------------------
Chris@101 741 ret = priv_allocate(command, l_size, r_size = p_size, reuse_ptr, sizeof_object);
Chris@16 742 }
Chris@101 743 prefer_in_recvd_out_size = r_size/sizeof_object;
Chris@16 744 return ret;
Chris@16 745 }
Chris@16 746
Chris@16 747 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 748 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
Chris@16 749 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
Chris@16 750 size(const void *ptr) const
Chris@16 751 {
Chris@16 752 //We need no synchronization since this block's size is not going
Chris@16 753 //to be modified by anyone else
Chris@16 754 //Obtain the real size of the block
Chris@16 755 return ((size_type)priv_get_block(ptr)->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
Chris@16 756 }
Chris@16 757
Chris@16 758 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 759 inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::zero_free_memory()
Chris@16 760 {
Chris@16 761 //-----------------------
Chris@16 762 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
Chris@16 763 //-----------------------
Chris@16 764 imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
Chris@16 765
Chris@16 766 //Iterate through all blocks obtaining their size
Chris@16 767 while(ib != ie){
Chris@16 768 //Just clear user the memory part reserved for the user
Chris@16 769 volatile char *ptr = reinterpret_cast<char*>(&*ib) + BlockCtrlBytes;
Chris@16 770 size_type s = (size_type)ib->m_size*Alignment - BlockCtrlBytes;
Chris@16 771 while(s--){
Chris@16 772 *ptr++ = 0;
Chris@16 773 }
Chris@16 774
Chris@16 775 //This surprisingly is optimized out by Visual C++ 7.1 in release mode!
Chris@16 776 //std::memset( reinterpret_cast<char*>(&*ib) + BlockCtrlBytes
Chris@16 777 // , 0
Chris@16 778 // , ib->m_size*Alignment - BlockCtrlBytes);
Chris@16 779 ++ib;
Chris@16 780 }
Chris@16 781 }
Chris@16 782
Chris@16 783 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 784 void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
Chris@16 785 priv_expand_both_sides(boost::interprocess::allocation_type command
Chris@16 786 ,size_type min_size
Chris@101 787 ,size_type &prefer_in_recvd_out_size
Chris@16 788 ,void *reuse_ptr
Chris@16 789 ,bool only_preferred_backwards
Chris@16 790 ,size_type backwards_multiple)
Chris@16 791 {
Chris@101 792 size_type const preferred_size = prefer_in_recvd_out_size;
Chris@16 793 algo_impl_t::assert_alignment(reuse_ptr);
Chris@16 794 if(command & boost::interprocess::expand_fwd){
Chris@101 795 if(priv_expand(reuse_ptr, min_size, prefer_in_recvd_out_size = preferred_size))
Chris@16 796 return reuse_ptr;
Chris@16 797 }
Chris@16 798 else{
Chris@101 799 prefer_in_recvd_out_size = this->size(reuse_ptr);
Chris@101 800 if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
Chris@16 801 return reuse_ptr;
Chris@16 802 }
Chris@16 803
Chris@16 804 if(backwards_multiple){
Chris@16 805 BOOST_ASSERT(0 == (min_size % backwards_multiple));
Chris@16 806 BOOST_ASSERT(0 == (preferred_size % backwards_multiple));
Chris@16 807 }
Chris@16 808
Chris@16 809 if(command & boost::interprocess::expand_bwd){
Chris@16 810 //Obtain the real size of the block
Chris@16 811 block_ctrl *reuse = priv_get_block(reuse_ptr);
Chris@16 812
Chris@16 813 //Sanity check
Chris@16 814 algo_impl_t::assert_alignment(reuse);
Chris@16 815
Chris@16 816 block_ctrl *prev_block;
Chris@16 817
Chris@16 818 //If the previous block is not free, there is nothing to do
Chris@16 819 if(priv_is_prev_allocated(reuse)){
Chris@16 820 return 0;
Chris@16 821 }
Chris@16 822
Chris@16 823 prev_block = priv_prev_block(reuse);
Chris@16 824 BOOST_ASSERT(!priv_is_allocated_block(prev_block));
Chris@16 825
Chris@16 826 //Some sanity checks
Chris@16 827 BOOST_ASSERT(prev_block->m_size == reuse->m_prev_size);
Chris@16 828 algo_impl_t::assert_alignment(prev_block);
Chris@16 829
Chris@16 830 size_type needs_backwards_aligned;
Chris@16 831 size_type lcm;
Chris@16 832 if(!algo_impl_t::calculate_lcm_and_needs_backwards_lcmed
Chris@16 833 ( backwards_multiple
Chris@101 834 , prefer_in_recvd_out_size
Chris@16 835 , only_preferred_backwards ? preferred_size : min_size
Chris@16 836 , lcm, needs_backwards_aligned)){
Chris@16 837 return 0;
Chris@16 838 }
Chris@16 839
Chris@16 840 //Check if previous block has enough size
Chris@16 841 if(size_type(prev_block->m_size*Alignment) >= needs_backwards_aligned){
Chris@16 842 //Now take all next space. This will succeed
Chris@16 843 if(command & boost::interprocess::expand_fwd){
Chris@16 844 size_type received_size2;
Chris@101 845 if(!priv_expand(reuse_ptr, prefer_in_recvd_out_size, received_size2 = prefer_in_recvd_out_size)){
Chris@16 846 BOOST_ASSERT(0);
Chris@16 847 }
Chris@101 848 BOOST_ASSERT(prefer_in_recvd_out_size == received_size2);
Chris@16 849 }
Chris@16 850 //We need a minimum size to split the previous one
Chris@16 851 if(prev_block->m_size >= (needs_backwards_aligned/Alignment + BlockCtrlUnits)){
Chris@16 852 block_ctrl *new_block = reinterpret_cast<block_ctrl *>
Chris@16 853 (reinterpret_cast<char*>(reuse) - needs_backwards_aligned);
Chris@16 854
Chris@16 855 //Free old previous buffer
Chris@16 856 new_block->m_size =
Chris@101 857 AllocatedCtrlUnits + (needs_backwards_aligned + (prefer_in_recvd_out_size - UsableByPreviousChunk))/Alignment;
Chris@16 858 BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
Chris@16 859 priv_mark_as_allocated_block(new_block);
Chris@16 860
Chris@16 861 prev_block->m_size = (reinterpret_cast<char*>(new_block) -
Chris@16 862 reinterpret_cast<char*>(prev_block))/Alignment;
Chris@16 863 BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
Chris@16 864 priv_mark_as_free_block(prev_block);
Chris@16 865
Chris@16 866 //Update the old previous block in the free blocks tree
Chris@16 867 //If the new size fulfills tree invariants do nothing,
Chris@16 868 //otherwise erase() + insert()
Chris@16 869 {
Chris@16 870 imultiset_iterator prev_block_it(Imultiset::s_iterator_to(*prev_block));
Chris@16 871 imultiset_iterator was_smaller_it(prev_block_it);
Chris@16 872 if(prev_block_it != m_header.m_imultiset.begin() &&
Chris@16 873 (--(was_smaller_it = prev_block_it))->m_size > prev_block->m_size){
Chris@16 874 m_header.m_imultiset.erase(prev_block_it);
Chris@16 875 m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *prev_block);
Chris@16 876 }
Chris@16 877 }
Chris@16 878
Chris@101 879 prefer_in_recvd_out_size = needs_backwards_aligned + prefer_in_recvd_out_size;
Chris@16 880 m_header.m_allocated += needs_backwards_aligned;
Chris@16 881
Chris@16 882 //Check alignment
Chris@16 883 algo_impl_t::assert_alignment(new_block);
Chris@16 884
Chris@16 885 //If the backwards expansion has remaining bytes in the
Chris@16 886 //first bytes, fill them with a pattern
Chris@16 887 void *p = priv_get_user_buffer(new_block);
Chris@16 888 void *user_ptr = reinterpret_cast<char*>(p);
Chris@16 889 BOOST_ASSERT((static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
Chris@16 890 algo_impl_t::assert_alignment(user_ptr);
Chris@16 891 return user_ptr;
Chris@16 892 }
Chris@16 893 //Check if there is no place to create a new block and
Chris@16 894 //the whole new block is multiple of the backwards expansion multiple
Chris@16 895 else if(prev_block->m_size >= needs_backwards_aligned/Alignment &&
Chris@16 896 0 == ((prev_block->m_size*Alignment) % lcm)) {
Chris@16 897 //Erase old previous block, since we will change it
Chris@16 898 m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block));
Chris@16 899
Chris@16 900 //Just merge the whole previous block
Chris@16 901 //prev_block->m_size*Alignment is multiple of lcm (and backwards_multiple)
Chris@101 902 prefer_in_recvd_out_size = prefer_in_recvd_out_size + (size_type)prev_block->m_size*Alignment;
Chris@16 903
Chris@16 904 m_header.m_allocated += (size_type)prev_block->m_size*Alignment;
Chris@16 905 //Now update sizes
Chris@16 906 prev_block->m_size = prev_block->m_size + reuse->m_size;
Chris@16 907 BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
Chris@16 908 priv_mark_as_allocated_block(prev_block);
Chris@16 909
Chris@16 910 //If the backwards expansion has remaining bytes in the
Chris@16 911 //first bytes, fill them with a pattern
Chris@16 912 void *user_ptr = priv_get_user_buffer(prev_block);
Chris@16 913 BOOST_ASSERT((static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
Chris@16 914 algo_impl_t::assert_alignment(user_ptr);
Chris@16 915 return user_ptr;
Chris@16 916 }
Chris@16 917 else{
Chris@16 918 //Alignment issues
Chris@16 919 }
Chris@16 920 }
Chris@16 921 }
Chris@16 922 return 0;
Chris@16 923 }
Chris@16 924
Chris@16 925 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 926 inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
Chris@16 927 deallocate_many(typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::multiallocation_chain &chain)
Chris@16 928 {
Chris@16 929 //-----------------------
Chris@16 930 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
Chris@16 931 //-----------------------
Chris@16 932 algo_impl_t::deallocate_many(this, chain);
Chris@16 933 }
Chris@16 934
Chris@16 935 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@101 936 void * rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
Chris@16 937 priv_allocate(boost::interprocess::allocation_type command
Chris@16 938 ,size_type limit_size
Chris@101 939 ,size_type &prefer_in_recvd_out_size
Chris@101 940 ,void *&reuse_ptr
Chris@16 941 ,size_type backwards_multiple)
Chris@16 942 {
Chris@101 943 size_type const preferred_size = prefer_in_recvd_out_size;
Chris@16 944 if(command & boost::interprocess::shrink_in_place){
Chris@101 945 if(!reuse_ptr) return static_cast<void*>(0);
Chris@16 946 bool success =
Chris@101 947 algo_impl_t::shrink(this, reuse_ptr, limit_size, prefer_in_recvd_out_size = preferred_size);
Chris@101 948 return success ? reuse_ptr : 0;
Chris@16 949 }
Chris@16 950
Chris@101 951 prefer_in_recvd_out_size = 0;
Chris@16 952
Chris@16 953 if(limit_size > preferred_size)
Chris@101 954 return reuse_ptr = 0, static_cast<void*>(0);
Chris@16 955
Chris@16 956 //Number of units to request (including block_ctrl header)
Chris@16 957 size_type preferred_units = priv_get_total_units(preferred_size);
Chris@16 958
Chris@16 959 //Number of units to request (including block_ctrl header)
Chris@16 960 size_type limit_units = priv_get_total_units(limit_size);
Chris@16 961
Chris@16 962 //Expand in place
Chris@101 963 prefer_in_recvd_out_size = preferred_size;
Chris@16 964 if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
Chris@16 965 void *ret = priv_expand_both_sides
Chris@101 966 (command, limit_size, prefer_in_recvd_out_size, reuse_ptr, true, backwards_multiple);
Chris@16 967 if(ret)
Chris@101 968 return ret;
Chris@16 969 }
Chris@16 970
Chris@16 971 if(command & boost::interprocess::allocate_new){
Chris@16 972 size_block_ctrl_compare comp;
Chris@16 973 imultiset_iterator it(m_header.m_imultiset.lower_bound(preferred_units, comp));
Chris@16 974
Chris@16 975 if(it != m_header.m_imultiset.end()){
Chris@101 976 return reuse_ptr = 0, this->priv_check_and_allocate
Chris@101 977 (preferred_units, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
Chris@16 978 }
Chris@16 979
Chris@16 980 if(it != m_header.m_imultiset.begin()&&
Chris@16 981 (--it)->m_size >= limit_units){
Chris@101 982 return reuse_ptr = 0, this->priv_check_and_allocate
Chris@101 983 (it->m_size, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
Chris@16 984 }
Chris@16 985 }
Chris@16 986
Chris@16 987
Chris@16 988 //Now try to expand both sides with min size
Chris@16 989 if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
Chris@101 990 return priv_expand_both_sides
Chris@101 991 (command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false, backwards_multiple);
Chris@16 992 }
Chris@101 993 return reuse_ptr = 0, static_cast<void*>(0);
Chris@16 994 }
Chris@16 995
Chris@16 996 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 997 inline
Chris@16 998 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
Chris@16 999 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_get_block(const void *ptr)
Chris@16 1000 {
Chris@16 1001 return const_cast<block_ctrl*>
Chris@16 1002 (reinterpret_cast<const block_ctrl*>
Chris@16 1003 (reinterpret_cast<const char*>(ptr) - AllocatedCtrlBytes));
Chris@16 1004 }
Chris@16 1005
Chris@16 1006 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 1007 inline
Chris@16 1008 void *rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
Chris@16 1009 priv_get_user_buffer(const typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
Chris@16 1010 { return const_cast<char*>(reinterpret_cast<const char*>(block) + AllocatedCtrlBytes); }
Chris@16 1011
Chris@16 1012 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 1013 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
Chris@16 1014 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
Chris@16 1015 priv_get_total_units(size_type userbytes)
Chris@16 1016 {
Chris@16 1017 if(userbytes < UsableByPreviousChunk)
Chris@16 1018 userbytes = UsableByPreviousChunk;
Chris@16 1019 size_type units = ipcdetail::get_rounded_size(userbytes - UsableByPreviousChunk, Alignment)/Alignment + AllocatedCtrlUnits;
Chris@16 1020 if(units < BlockCtrlUnits) units = BlockCtrlUnits;
Chris@16 1021 return units;
Chris@16 1022 }
Chris@16 1023
Chris@16 1024 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 1025 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
Chris@101 1026 priv_expand (void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size)
Chris@16 1027 {
Chris@101 1028 size_type const preferred_size = prefer_in_recvd_out_size;
Chris@16 1029 //Obtain the real size of the block
Chris@16 1030 block_ctrl *block = priv_get_block(ptr);
Chris@16 1031 size_type old_block_units = block->m_size;
Chris@16 1032
Chris@16 1033 //The block must be marked as allocated and the sizes must be equal
Chris@16 1034 BOOST_ASSERT(priv_is_allocated_block(block));
Chris@16 1035
Chris@16 1036 //Put this to a safe value
Chris@101 1037 prefer_in_recvd_out_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
Chris@101 1038 if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
Chris@16 1039 return true;
Chris@16 1040
Chris@16 1041 //Now translate it to Alignment units
Chris@16 1042 const size_type min_user_units = algo_impl_t::ceil_units(min_size - UsableByPreviousChunk);
Chris@16 1043 const size_type preferred_user_units = algo_impl_t::ceil_units(preferred_size - UsableByPreviousChunk);
Chris@16 1044
Chris@16 1045 //Some parameter checks
Chris@16 1046 BOOST_ASSERT(min_user_units <= preferred_user_units);
Chris@16 1047
Chris@16 1048 block_ctrl *next_block;
Chris@16 1049
Chris@16 1050 if(priv_is_allocated_block(next_block = priv_next_block(block))){
Chris@101 1051 return prefer_in_recvd_out_size >= min_size;
Chris@16 1052 }
Chris@16 1053 algo_impl_t::assert_alignment(next_block);
Chris@16 1054
Chris@16 1055 //Is "block" + "next_block" big enough?
Chris@16 1056 const size_type merged_units = old_block_units + (size_type)next_block->m_size;
Chris@16 1057
Chris@16 1058 //Now get the expansion size
Chris@16 1059 const size_type merged_user_units = merged_units - AllocatedCtrlUnits;
Chris@16 1060
Chris@16 1061 if(merged_user_units < min_user_units){
Chris@101 1062 prefer_in_recvd_out_size = merged_units*Alignment - UsableByPreviousChunk;
Chris@16 1063 return false;
Chris@16 1064 }
Chris@16 1065
Chris@16 1066 //Now get the maximum size the user can allocate
Chris@16 1067 size_type intended_user_units = (merged_user_units < preferred_user_units) ?
Chris@16 1068 merged_user_units : preferred_user_units;
Chris@16 1069
Chris@16 1070 //These are total units of the merged block (supposing the next block can be split)
Chris@16 1071 const size_type intended_units = AllocatedCtrlUnits + intended_user_units;
Chris@16 1072
Chris@16 1073 //Check if we can split the next one in two parts
Chris@16 1074 if((merged_units - intended_units) >= BlockCtrlUnits){
Chris@16 1075 //This block is bigger than needed, split it in
Chris@16 1076 //two blocks, the first one will be merged and
Chris@16 1077 //the second's size will be the remaining space
Chris@16 1078 BOOST_ASSERT(next_block->m_size == priv_next_block(next_block)->m_prev_size);
Chris@16 1079 const size_type rem_units = merged_units - intended_units;
Chris@16 1080
Chris@16 1081 //Check if we we need to update the old next block in the free blocks tree
Chris@16 1082 //If the new size fulfills tree invariants, we just need to replace the node
Chris@16 1083 //(the block start has been displaced), otherwise erase() + insert().
Chris@16 1084 //
Chris@16 1085 //This fixup must be done in two parts, because the new next block might
Chris@16 1086 //overwrite the tree hook of the old next block. So we first erase the
Chris@16 1087 //old if needed and we'll insert the new one after creating the new next
Chris@16 1088 imultiset_iterator old_next_block_it(Imultiset::s_iterator_to(*next_block));
Chris@16 1089 const bool size_invariants_broken =
Chris@16 1090 (next_block->m_size - rem_units ) < BlockCtrlUnits ||
Chris@16 1091 (old_next_block_it != m_header.m_imultiset.begin() &&
Chris@16 1092 (--imultiset_iterator(old_next_block_it))->m_size > rem_units);
Chris@16 1093 if(size_invariants_broken){
Chris@16 1094 m_header.m_imultiset.erase(old_next_block_it);
Chris@16 1095 }
Chris@16 1096 //This is the remaining block
Chris@101 1097 block_ctrl *rem_block = ::new(reinterpret_cast<block_ctrl*>
Chris@101 1098 (reinterpret_cast<char*>(block) + intended_units*Alignment), boost_container_new_t())block_ctrl;
Chris@16 1099 rem_block->m_size = rem_units;
Chris@16 1100 algo_impl_t::assert_alignment(rem_block);
Chris@16 1101 BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
Chris@16 1102 priv_mark_as_free_block(rem_block);
Chris@16 1103
Chris@16 1104 //Now the second part of the fixup
Chris@16 1105 if(size_invariants_broken)
Chris@16 1106 m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *rem_block);
Chris@16 1107 else
Chris@16 1108 m_header.m_imultiset.replace_node(old_next_block_it, *rem_block);
Chris@16 1109
Chris@16 1110 //Write the new length
Chris@16 1111 block->m_size = intended_user_units + AllocatedCtrlUnits;
Chris@16 1112 BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
Chris@16 1113 m_header.m_allocated += (intended_units - old_block_units)*Alignment;
Chris@16 1114 }
Chris@16 1115 //There is no free space to create a new node: just merge both blocks
Chris@16 1116 else{
Chris@16 1117 //Now we have to update the data in the tree
Chris@16 1118 m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block));
Chris@16 1119
Chris@16 1120 //Write the new length
Chris@16 1121 block->m_size = merged_units;
Chris@16 1122 BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
Chris@16 1123 m_header.m_allocated += (merged_units - old_block_units)*Alignment;
Chris@16 1124 }
Chris@16 1125 priv_mark_as_allocated_block(block);
Chris@101 1126 prefer_in_recvd_out_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
Chris@16 1127 return true;
Chris@16 1128 }
Chris@16 1129
Chris@16 1130 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
Chris@16 1131 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
Chris@16 1132 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_prev_block
Chris@16 1133 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
Chris@16 1134 {
Chris@16 1135 BOOST_ASSERT(!ptr->m_prev_allocated);
Chris@16 1136 return reinterpret_cast<block_ctrl *>
Chris@16 1137 (reinterpret_cast<char*>(ptr) - ptr->m_prev_size*Alignment);
Chris@16 1138 }
Chris@16 1139
Chris@16 1140
Chris@16 1141
Chris@16 1142 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
Chris@16 1143 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
Chris@16 1144 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_end_block
Chris@16 1145 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *first_segment_block)
Chris@16 1146 {
Chris@16 1147 //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
Chris@16 1148 //distance with the end block
Chris@16 1149 BOOST_ASSERT(first_segment_block->m_prev_allocated);
Chris@16 1150 block_ctrl *end_block = reinterpret_cast<block_ctrl *>
Chris@16 1151 (reinterpret_cast<char*>(first_segment_block) + first_segment_block->m_prev_size*Alignment);
Chris@16 1152 (void)end_block;
Chris@16 1153 BOOST_ASSERT(end_block->m_allocated == 1);
Chris@16 1154 BOOST_ASSERT(end_block->m_size == first_segment_block->m_prev_size);
Chris@16 1155 BOOST_ASSERT(end_block > first_segment_block);
Chris@16 1156 return end_block;
Chris@16 1157 }
Chris@16 1158
Chris@16 1159 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
Chris@16 1160 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
Chris@16 1161 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_first_block
Chris@16 1162 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *end_segment_block)
Chris@16 1163 {
Chris@16 1164 //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
Chris@16 1165 //distance with the end block
Chris@16 1166 BOOST_ASSERT(end_segment_block->m_allocated);
Chris@16 1167 block_ctrl *first_block = reinterpret_cast<block_ctrl *>
Chris@16 1168 (reinterpret_cast<char*>(end_segment_block) - end_segment_block->m_size*Alignment);
Chris@16 1169 (void)first_block;
Chris@16 1170 BOOST_ASSERT(first_block->m_prev_allocated == 1);
Chris@16 1171 BOOST_ASSERT(first_block->m_prev_size == end_segment_block->m_size);
Chris@16 1172 BOOST_ASSERT(end_segment_block > first_block);
Chris@16 1173 return first_block;
Chris@16 1174 }
Chris@16 1175
Chris@16 1176
Chris@16 1177 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
Chris@16 1178 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
Chris@16 1179 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_next_block
Chris@16 1180 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
Chris@16 1181 {
Chris@16 1182 return reinterpret_cast<block_ctrl *>
Chris@16 1183 (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);
Chris@16 1184 }
Chris@16 1185
Chris@16 1186 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
Chris@16 1187 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_allocated_block
Chris@16 1188 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
Chris@16 1189 {
Chris@16 1190 bool allocated = block->m_allocated != 0;
Chris@16 1191 #ifndef NDEBUG
Chris@16 1192 if(block != priv_end_block()){
Chris@16 1193 block_ctrl *next_block = reinterpret_cast<block_ctrl *>
Chris@16 1194 (reinterpret_cast<char*>(block) + block->m_size*Alignment);
Chris@16 1195 bool next_block_prev_allocated = next_block->m_prev_allocated != 0;
Chris@16 1196 (void)next_block_prev_allocated;
Chris@16 1197 BOOST_ASSERT(allocated == next_block_prev_allocated);
Chris@16 1198 }
Chris@16 1199 #endif
Chris@16 1200 return allocated;
Chris@16 1201 }
Chris@16 1202
Chris@16 1203 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
Chris@16 1204 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_prev_allocated
Chris@16 1205 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
Chris@16 1206 {
Chris@16 1207 if(block->m_prev_allocated){
Chris@16 1208 return true;
Chris@16 1209 }
Chris@16 1210 else{
Chris@16 1211 #ifndef NDEBUG
Chris@16 1212 if(block != priv_first_block()){
Chris@16 1213 block_ctrl *prev = priv_prev_block(block);
Chris@16 1214 (void)prev;
Chris@16 1215 BOOST_ASSERT(!prev->m_allocated);
Chris@101 1216 BOOST_ASSERT(prev->m_size == block->m_prev_size);
Chris@16 1217 }
Chris@16 1218 #endif
Chris@16 1219 return false;
Chris@16 1220 }
Chris@16 1221 }
Chris@16 1222
Chris@16 1223 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
Chris@16 1224 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_allocated_block
Chris@16 1225 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
Chris@16 1226 {
Chris@16 1227 block->m_allocated = 1;
Chris@16 1228 reinterpret_cast<block_ctrl *>
Chris@16 1229 (reinterpret_cast<char*>(block)+ block->m_size*Alignment)->m_prev_allocated = 1;
Chris@16 1230 }
Chris@16 1231
Chris@16 1232 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
Chris@16 1233 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_free_block
Chris@16 1234 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
Chris@16 1235 {
Chris@16 1236 block->m_allocated = 0;
Chris@16 1237 block_ctrl *next_block = priv_next_block(block);
Chris@16 1238 next_block->m_prev_allocated = 0;
Chris@16 1239 next_block->m_prev_size = block->m_size;
Chris@16 1240 }
Chris@16 1241
Chris@16 1242 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
Chris@16 1243 void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_check_and_allocate
Chris@16 1244 (size_type nunits
Chris@16 1245 ,typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl* block
Chris@16 1246 ,size_type &received_size)
Chris@16 1247 {
Chris@16 1248 size_type upper_nunits = nunits + BlockCtrlUnits;
Chris@16 1249 imultiset_iterator it_old = Imultiset::s_iterator_to(*block);
Chris@16 1250 algo_impl_t::assert_alignment(block);
Chris@16 1251
Chris@16 1252 if (block->m_size >= upper_nunits){
Chris@16 1253 //This block is bigger than needed, split it in
Chris@16 1254 //two blocks, the first's size will be "units" and
Chris@16 1255 //the second's size "block->m_size-units"
Chris@16 1256 size_type block_old_size = block->m_size;
Chris@16 1257 block->m_size = nunits;
Chris@16 1258 BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
Chris@16 1259
Chris@16 1260 //This is the remaining block
Chris@101 1261 block_ctrl *rem_block = ::new(reinterpret_cast<block_ctrl*>
Chris@101 1262 (reinterpret_cast<char*>(block) + Alignment*nunits), boost_container_new_t())block_ctrl;
Chris@16 1263 algo_impl_t::assert_alignment(rem_block);
Chris@16 1264 rem_block->m_size = block_old_size - nunits;
Chris@16 1265 BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
Chris@16 1266 priv_mark_as_free_block(rem_block);
Chris@16 1267
Chris@16 1268 imultiset_iterator it_hint;
Chris@16 1269 if(it_old == m_header.m_imultiset.begin()
Chris@101 1270 || (--imultiset_iterator(it_old))->m_size <= rem_block->m_size){
Chris@16 1271 //option a: slow but secure
Chris@16 1272 //m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *rem_block);
Chris@16 1273 //option b: Construct an empty node and swap
Chris@16 1274 //Imultiset::init_node(*rem_block);
Chris@16 1275 //block->swap_nodes(*rem_block);
Chris@16 1276 //option c: replace the node directly
Chris@16 1277 m_header.m_imultiset.replace_node(Imultiset::s_iterator_to(*it_old), *rem_block);
Chris@16 1278 }
Chris@16 1279 else{
Chris@16 1280 //Now we have to update the data in the tree
Chris@16 1281 m_header.m_imultiset.erase(it_old);
Chris@16 1282 m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *rem_block);
Chris@16 1283 }
Chris@16 1284
Chris@16 1285 }
Chris@16 1286 else if (block->m_size >= nunits){
Chris@16 1287 m_header.m_imultiset.erase(it_old);
Chris@16 1288 }
Chris@16 1289 else{
Chris@16 1290 BOOST_ASSERT(0);
Chris@16 1291 return 0;
Chris@16 1292 }
Chris@16 1293 //We need block_ctrl for deallocation stuff, so
Chris@16 1294 //return memory user can overwrite
Chris@16 1295 m_header.m_allocated += (size_type)block->m_size*Alignment;
Chris@16 1296 received_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
Chris@16 1297
Chris@16 1298 //Mark the block as allocated
Chris@16 1299 priv_mark_as_allocated_block(block);
Chris@16 1300
Chris@16 1301 //Clear the memory occupied by the tree hook, since this won't be
Chris@16 1302 //cleared with zero_free_memory
Chris@16 1303 TreeHook *t = static_cast<TreeHook*>(block);
Chris@16 1304 //Just clear the memory part reserved for the user
Chris@16 1305 std::size_t tree_hook_offset_in_block = (char*)t - (char*)block;
Chris@16 1306 //volatile char *ptr =
Chris@16 1307 char *ptr = reinterpret_cast<char*>(block)+tree_hook_offset_in_block;
Chris@16 1308 const std::size_t s = BlockCtrlBytes - tree_hook_offset_in_block;
Chris@16 1309 std::memset(ptr, 0, s);
Chris@16 1310 this->priv_next_block(block)->m_prev_size = 0;
Chris@16 1311 return priv_get_user_buffer(block);
Chris@16 1312 }
Chris@16 1313
Chris@16 1314 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 1315 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::deallocate(void* addr)
Chris@16 1316 {
Chris@16 1317 if(!addr) return;
Chris@16 1318 //-----------------------
Chris@16 1319 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
Chris@16 1320 //-----------------------
Chris@16 1321 return this->priv_deallocate(addr);
Chris@16 1322 }
Chris@16 1323
Chris@16 1324 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
Chris@16 1325 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_deallocate(void* addr)
Chris@16 1326 {
Chris@16 1327 if(!addr) return;
Chris@16 1328
Chris@16 1329 block_ctrl *block = priv_get_block(addr);
Chris@16 1330
Chris@16 1331 //The blocks must be marked as allocated and the sizes must be equal
Chris@16 1332 BOOST_ASSERT(priv_is_allocated_block(block));
Chris@16 1333
Chris@16 1334 //Check if alignment and block size are right
Chris@16 1335 algo_impl_t::assert_alignment(addr);
Chris@16 1336
Chris@16 1337 size_type block_old_size = Alignment*(size_type)block->m_size;
Chris@16 1338 BOOST_ASSERT(m_header.m_allocated >= block_old_size);
Chris@16 1339
Chris@16 1340 //Update used memory count
Chris@16 1341 m_header.m_allocated -= block_old_size;
Chris@16 1342
Chris@16 1343 //The block to insert in the tree
Chris@16 1344 block_ctrl *block_to_insert = block;
Chris@16 1345
Chris@16 1346 //Get the next block
Chris@101 1347 block_ctrl *const next_block = priv_next_block(block);
Chris@101 1348 const bool merge_with_prev = !priv_is_prev_allocated(block);
Chris@101 1349 const bool merge_with_next = !priv_is_allocated_block(next_block);
Chris@16 1350
Chris@16 1351 //Merge logic. First just update block sizes, then fix free blocks tree
Chris@16 1352 if(merge_with_prev || merge_with_next){
Chris@16 1353 //Merge if the previous is free
Chris@16 1354 if(merge_with_prev){
Chris@16 1355 //Get the previous block
Chris@101 1356 block_to_insert = priv_prev_block(block);
Chris@101 1357 block_to_insert->m_size += block->m_size;
Chris@101 1358 BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
Chris@16 1359 }
Chris@16 1360 //Merge if the next is free
Chris@16 1361 if(merge_with_next){
Chris@16 1362 block_to_insert->m_size += next_block->m_size;
Chris@16 1363 BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
Chris@101 1364 const imultiset_iterator next_it = Imultiset::s_iterator_to(*next_block);
Chris@101 1365 if(merge_with_prev){
Chris@101 1366 m_header.m_imultiset.erase(next_it);
Chris@101 1367 }
Chris@101 1368 else{
Chris@101 1369 m_header.m_imultiset.replace_node(next_it, *block_to_insert);
Chris@101 1370 }
Chris@16 1371 }
Chris@16 1372
Chris@16 1373 //Now try to shortcut erasure + insertion (O(log(N))) with
Chris@16 1374 //a O(1) operation if merging does not alter tree positions
Chris@101 1375 const imultiset_iterator block_to_check_it = Imultiset::s_iterator_to(*block_to_insert);
Chris@101 1376 imultiset_const_iterator next_to_check_it(block_to_check_it), end_it(m_header.m_imultiset.end());
Chris@101 1377
Chris@101 1378 if(++next_to_check_it != end_it && block_to_insert->m_size > next_to_check_it->m_size){
Chris@101 1379 //Block is bigger than next, so move it
Chris@101 1380 m_header.m_imultiset.erase(block_to_check_it);
Chris@101 1381 m_header.m_imultiset.insert(end_it, *block_to_insert);
Chris@16 1382 }
Chris@101 1383 else{
Chris@101 1384 //Block size increment didn't violate tree invariants so there is nothing to fix
Chris@16 1385 }
Chris@16 1386 }
Chris@16 1387 else{
Chris@16 1388 m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *block_to_insert);
Chris@16 1389 }
Chris@16 1390 priv_mark_as_free_block(block_to_insert);
Chris@16 1391 }
Chris@16 1392
Chris@101 1393 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
Chris@16 1394
Chris@16 1395 } //namespace interprocess {
Chris@16 1396 } //namespace boost {
Chris@16 1397
Chris@16 1398 #include <boost/interprocess/detail/config_end.hpp>
Chris@16 1399
Chris@16 1400 #endif //#ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP