Chris@16: ////////////////////////////////////////////////////////////////////////////// Chris@16: // Chris@16: // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost Chris@16: // Software License, Version 1.0. (See accompanying file Chris@16: // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) Chris@16: // Chris@16: // See http://www.boost.org/libs/interprocess for documentation. Chris@16: // Chris@16: ////////////////////////////////////////////////////////////////////////////// Chris@16: Chris@16: #ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP Chris@16: #define BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP Chris@16: Chris@101: #ifndef BOOST_CONFIG_HPP Chris@101: # include Chris@101: #endif Chris@101: # Chris@101: #if defined(BOOST_HAS_PRAGMA_ONCE) Chris@16: # pragma once Chris@16: #endif Chris@16: Chris@16: #include Chris@16: #include Chris@16: Chris@101: // interprocess Chris@101: #include Chris@101: #include Chris@16: #include Chris@16: #include Chris@16: #include Chris@101: #include Chris@101: // interprocess/detail Chris@16: #include Chris@16: #include Chris@16: #include Chris@101: #include Chris@101: // container Chris@101: #include Chris@101: // container/detail Chris@101: #include Chris@101: // move/detail Chris@101: #include //make_unsigned, alignment_of Chris@101: // intrusive Chris@16: #include Chris@101: #include Chris@101: // other boost Chris@16: #include Chris@16: #include Chris@101: // std Chris@16: #include Chris@16: #include Chris@16: Chris@16: //#define BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP Chris@16: //to maintain ABI compatible with the original version Chris@16: //ABI had to be updated to fix compatibility issues when Chris@16: //sharing shared memory between 32 adn 64 bit processes. Chris@16: Chris@16: //!\file Chris@16: //!Describes a best-fit algorithm based in an intrusive red-black tree used to allocate Chris@16: //!objects in shared memory. This class is intended as a base class for single segment Chris@16: //!and multi-segment implementations. Chris@16: Chris@16: namespace boost { Chris@16: namespace interprocess { Chris@16: Chris@16: //!This class implements an algorithm that stores the free nodes in a red-black tree Chris@16: //!to have logarithmic search/insert times. Chris@16: template Chris@16: class rbtree_best_fit Chris@16: { Chris@101: #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED) Chris@16: //Non-copyable Chris@16: rbtree_best_fit(); Chris@16: rbtree_best_fit(const rbtree_best_fit &); Chris@16: rbtree_best_fit &operator=(const rbtree_best_fit &); Chris@16: Chris@16: private: Chris@16: struct block_ctrl; Chris@16: typedef typename boost::intrusive:: Chris@16: pointer_traits::template Chris@16: rebind_pointer::type block_ctrl_ptr; Chris@16: Chris@16: typedef typename boost::intrusive:: Chris@16: pointer_traits::template Chris@16: rebind_pointer::type char_ptr; Chris@16: Chris@101: #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED Chris@16: Chris@16: public: Chris@16: //!Shared mutex family used for the rest of the Interprocess framework Chris@16: typedef MutexFamily mutex_family; Chris@16: //!Pointer type to be used with the rest of the Interprocess framework Chris@16: typedef VoidPointer void_pointer; Chris@16: typedef ipcdetail::basic_multiallocation_chain multiallocation_chain; Chris@16: Chris@16: typedef typename boost::intrusive::pointer_traits::difference_type difference_type; Chris@101: typedef typename boost::container::container_detail::make_unsigned::type size_type; Chris@16: Chris@101: #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED) Chris@16: Chris@16: private: Chris@16: Chris@16: typedef typename bi::make_set_base_hook Chris@16: < bi::void_pointer Chris@16: , bi::optimize_size Chris@16: , bi::link_mode >::type TreeHook; Chris@16: Chris@16: struct SizeHolder Chris@16: { Chris@16: //!This block's memory size (including block_ctrl Chris@16: //!header) in Alignment units Chris@16: size_type m_prev_size : sizeof(size_type)*CHAR_BIT; Chris@16: size_type m_size : sizeof(size_type)*CHAR_BIT - 2; Chris@16: size_type m_prev_allocated : 1; Chris@16: size_type m_allocated : 1; Chris@16: }; Chris@16: Chris@16: //!Block control structure Chris@16: struct block_ctrl Chris@16: : public SizeHolder, public TreeHook Chris@16: { Chris@16: block_ctrl() Chris@16: { this->m_size = 0; this->m_allocated = 0, this->m_prev_allocated = 0; } Chris@16: Chris@16: friend bool operator<(const block_ctrl &a, const block_ctrl &b) Chris@16: { return a.m_size < b.m_size; } Chris@16: friend bool operator==(const block_ctrl &a, const block_ctrl &b) Chris@16: { return a.m_size == b.m_size; } Chris@16: }; Chris@16: Chris@16: struct size_block_ctrl_compare Chris@16: { Chris@16: bool operator()(size_type size, const block_ctrl &block) const Chris@16: { return size < block.m_size; } Chris@16: Chris@16: bool operator()(const block_ctrl &block, size_type size) const Chris@16: { return block.m_size < size; } Chris@16: }; Chris@16: Chris@16: //!Shared mutex to protect memory allocate/deallocate Chris@16: typedef typename MutexFamily::mutex_type mutex_type; Chris@16: typedef typename bi::make_multiset Chris@16: >::type Imultiset; Chris@16: Chris@16: typedef typename Imultiset::iterator imultiset_iterator; Chris@101: typedef typename Imultiset::const_iterator imultiset_const_iterator; Chris@16: Chris@16: //!This struct includes needed data and derives from Chris@16: //!mutex_type to allow EBO when using null mutex_type Chris@16: struct header_t : public mutex_type Chris@16: { Chris@16: Imultiset m_imultiset; Chris@16: Chris@16: //!The extra size required by the segment Chris@16: size_type m_extra_hdr_bytes; Chris@16: //!Allocated bytes for internal checking Chris@16: size_type m_allocated; Chris@16: //!The size of the memory segment Chris@16: size_type m_size; Chris@16: } m_header; Chris@16: Chris@16: friend class ipcdetail::memory_algorithm_common; Chris@16: Chris@16: typedef ipcdetail::memory_algorithm_common algo_impl_t; Chris@16: Chris@16: public: Chris@101: #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED Chris@16: Chris@16: //!Constructor. "size" is the total size of the managed memory segment, Chris@16: //!"extra_hdr_bytes" indicates the extra bytes beginning in the sizeof(rbtree_best_fit) Chris@16: //!offset that the allocator should not use at all. Chris@16: rbtree_best_fit (size_type size, size_type extra_hdr_bytes); Chris@16: Chris@16: //!Destructor. Chris@16: ~rbtree_best_fit(); Chris@16: Chris@16: //!Obtains the minimum size needed by the algorithm Chris@16: static size_type get_min_size (size_type extra_hdr_bytes); Chris@16: Chris@16: //Functions for single segment management Chris@16: Chris@16: //!Allocates bytes, returns 0 if there is not more memory Chris@16: void* allocate (size_type nbytes); Chris@16: Chris@101: #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED) Chris@16: Chris@16: //Experimental. Dont' use Chris@16: Chris@16: //!Multiple element allocation, same size Chris@16: void allocate_many(size_type elem_bytes, size_type num_elements, multiallocation_chain &chain) Chris@16: { Chris@16: //----------------------- Chris@16: boost::interprocess::scoped_lock guard(m_header); Chris@16: //----------------------- Chris@16: algo_impl_t::allocate_many(this, elem_bytes, num_elements, chain); Chris@16: } Chris@16: Chris@16: //!Multiple element allocation, different size Chris@16: void allocate_many(const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain) Chris@16: { Chris@16: //----------------------- Chris@16: boost::interprocess::scoped_lock guard(m_header); Chris@16: //----------------------- Chris@16: algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element, chain); Chris@16: } Chris@16: Chris@16: //!Multiple element allocation, different size Chris@16: void deallocate_many(multiallocation_chain &chain); Chris@16: Chris@101: #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED Chris@16: Chris@16: //!Deallocates previously allocated bytes Chris@16: void deallocate (void *addr); Chris@16: Chris@16: //!Returns the size of the memory segment Chris@16: size_type get_size() const; Chris@16: Chris@16: //!Returns the number of free bytes of the segment Chris@16: size_type get_free_memory() const; Chris@16: Chris@16: //!Initializes to zero all the memory that's not in use. Chris@16: //!This function is normally used for security reasons. Chris@16: void zero_free_memory(); Chris@16: Chris@16: //!Increases managed memory in Chris@16: //!extra_size bytes more Chris@16: void grow(size_type extra_size); Chris@16: Chris@16: //!Decreases managed memory as much as possible Chris@16: void shrink_to_fit(); Chris@16: Chris@16: //!Returns true if all allocated memory has been deallocated Chris@16: bool all_memory_deallocated(); Chris@16: Chris@16: //!Makes an internal sanity check Chris@16: //!and returns true if success Chris@16: bool check_sanity(); Chris@16: Chris@16: template Chris@101: T * allocation_command (boost::interprocess::allocation_type command, size_type limit_size, Chris@101: size_type &prefer_in_recvd_out_size, T *&reuse); Chris@16: Chris@101: void * raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_object, Chris@101: size_type &prefer_in_recvd_out_size, Chris@101: void *&reuse_ptr, size_type sizeof_object = 1); Chris@16: Chris@16: //!Returns the size of the buffer previously allocated pointed by ptr Chris@16: size_type size(const void *ptr) const; Chris@16: Chris@16: //!Allocates aligned bytes, returns 0 if there is not more memory. Chris@16: //!Alignment must be power of 2 Chris@16: void* allocate_aligned (size_type nbytes, size_type alignment); Chris@16: Chris@101: #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED) Chris@16: private: Chris@16: static size_type priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes); Chris@16: Chris@16: block_ctrl *priv_first_block(); Chris@16: Chris@16: block_ctrl *priv_end_block(); Chris@16: Chris@101: void* priv_allocation_command(boost::interprocess::allocation_type command, size_type limit_size, Chris@101: size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object); Chris@16: Chris@16: Chris@16: //!Real allocation algorithm with min allocation option Chris@101: void * priv_allocate( boost::interprocess::allocation_type command Chris@101: , size_type limit_size, size_type &prefer_in_recvd_out_size Chris@101: , void *&reuse_ptr, size_type backwards_multiple = 1); Chris@16: Chris@16: //!Obtains the block control structure of the user buffer Chris@16: static block_ctrl *priv_get_block(const void *ptr); Chris@16: Chris@16: //!Obtains the pointer returned to the user from the block control Chris@16: static void *priv_get_user_buffer(const block_ctrl *block); Chris@16: Chris@16: //!Returns the number of total units that a user buffer Chris@16: //!of "userbytes" bytes really occupies (including header) Chris@16: static size_type priv_get_total_units(size_type userbytes); Chris@16: Chris@16: //!Real expand function implementation Chris@101: bool priv_expand(void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size); Chris@16: Chris@16: //!Real expand to both sides implementation Chris@16: void* priv_expand_both_sides(boost::interprocess::allocation_type command Chris@16: ,size_type min_size Chris@101: ,size_type &prefer_in_recvd_out_size Chris@16: ,void *reuse_ptr Chris@16: ,bool only_preferred_backwards Chris@16: ,size_type backwards_multiple); Chris@16: Chris@16: //!Returns true if the previous block is allocated Chris@16: bool priv_is_prev_allocated(block_ctrl *ptr); Chris@16: Chris@16: //!Get a pointer of the "end" block from the first block of the segment Chris@16: static block_ctrl * priv_end_block(block_ctrl *first_segment_block); Chris@16: Chris@16: //!Get a pointer of the "first" block from the end block of the segment Chris@16: static block_ctrl * priv_first_block(block_ctrl *end_segment_block); Chris@16: Chris@16: //!Get poitner of the previous block (previous block must be free) Chris@16: static block_ctrl * priv_prev_block(block_ctrl *ptr); Chris@16: Chris@16: //!Get the size in the tail of the previous block Chris@16: static block_ctrl * priv_next_block(block_ctrl *ptr); Chris@16: Chris@16: //!Check if this block is free (not allocated) Chris@16: bool priv_is_allocated_block(block_ctrl *ptr); Chris@16: Chris@16: //!Marks the block as allocated Chris@16: void priv_mark_as_allocated_block(block_ctrl *ptr); Chris@16: Chris@16: //!Marks the block as allocated Chris@16: void priv_mark_new_allocated_block(block_ctrl *ptr) Chris@16: { return priv_mark_as_allocated_block(ptr); } Chris@16: Chris@16: //!Marks the block as allocated Chris@16: void priv_mark_as_free_block(block_ctrl *ptr); Chris@16: Chris@16: //!Checks if block has enough memory and splits/unlinks the block Chris@16: //!returning the address to the users Chris@16: void* priv_check_and_allocate(size_type units Chris@16: ,block_ctrl* block Chris@16: ,size_type &received_size); Chris@16: //!Real deallocation algorithm Chris@16: void priv_deallocate(void *addr); Chris@16: Chris@16: //!Makes a new memory portion available for allocation Chris@16: void priv_add_segment(void *addr, size_type size); Chris@16: Chris@16: public: Chris@16: Chris@16: static const size_type Alignment = !MemAlignment Chris@101: ? size_type(::boost::container::container_detail::alignment_of Chris@101: < ::boost::container::container_detail::max_align_t>::value) Chris@16: : size_type(MemAlignment) Chris@16: ; Chris@16: Chris@16: private: Chris@16: //Due to embedded bits in size, Alignment must be at least 4 Chris@16: BOOST_STATIC_ASSERT((Alignment >= 4)); Chris@16: //Due to rbtree size optimizations, Alignment must have at least pointer alignment Chris@101: BOOST_STATIC_ASSERT((Alignment >= ::boost::container::container_detail::alignment_of::value)); Chris@16: static const size_type AlignmentMask = (Alignment - 1); Chris@16: static const size_type BlockCtrlBytes = ipcdetail::ct_rounded_size::value; Chris@16: static const size_type BlockCtrlUnits = BlockCtrlBytes/Alignment; Chris@16: static const size_type AllocatedCtrlBytes = ipcdetail::ct_rounded_size::value; Chris@16: static const size_type AllocatedCtrlUnits = AllocatedCtrlBytes/Alignment; Chris@16: static const size_type EndCtrlBlockBytes = ipcdetail::ct_rounded_size::value; Chris@16: static const size_type EndCtrlBlockUnits = EndCtrlBlockBytes/Alignment; Chris@16: static const size_type MinBlockUnits = BlockCtrlUnits; Chris@16: static const size_type UsableByPreviousChunk = sizeof(size_type); Chris@16: Chris@16: //Make sure the maximum alignment is power of two Chris@16: BOOST_STATIC_ASSERT((0 == (Alignment & (Alignment - size_type(1u))))); Chris@101: #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED Chris@16: public: Chris@16: static const size_type PayloadPerAllocation = AllocatedCtrlBytes - UsableByPreviousChunk; Chris@16: }; Chris@16: Chris@101: #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED) Chris@16: Chris@16: template Chris@16: inline typename rbtree_best_fit::size_type Chris@16: rbtree_best_fit Chris@16: ::priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes) Chris@16: { Chris@16: size_type uint_this = (std::size_t)this_ptr; Chris@16: size_type main_hdr_end = uint_this + sizeof(rbtree_best_fit) + extra_hdr_bytes; Chris@16: size_type aligned_main_hdr_end = ipcdetail::get_rounded_size(main_hdr_end, Alignment); Chris@16: size_type block1_off = aligned_main_hdr_end - uint_this; Chris@16: algo_impl_t::assert_alignment(aligned_main_hdr_end); Chris@16: algo_impl_t::assert_alignment(uint_this + block1_off); Chris@16: return block1_off; Chris@16: } Chris@16: Chris@16: template Chris@16: void rbtree_best_fit:: Chris@16: priv_add_segment(void *addr, size_type segment_size) Chris@16: { Chris@16: //Check alignment Chris@16: algo_impl_t::check_alignment(addr); Chris@16: //Check size Chris@16: BOOST_ASSERT(segment_size >= (BlockCtrlBytes + EndCtrlBlockBytes)); Chris@16: Chris@16: //Initialize the first big block and the "end" node Chris@101: block_ctrl *first_big_block = ::new(addr, boost_container_new_t())block_ctrl; Chris@16: first_big_block->m_size = segment_size/Alignment - EndCtrlBlockUnits; Chris@16: BOOST_ASSERT(first_big_block->m_size >= BlockCtrlUnits); Chris@16: Chris@16: //The "end" node is just a node of size 0 with the "end" bit set Chris@16: block_ctrl *end_block = static_cast Chris@16: (new (reinterpret_cast(addr) + first_big_block->m_size*Alignment)SizeHolder); Chris@16: Chris@16: //This will overwrite the prev part of the "end" node Chris@16: priv_mark_as_free_block (first_big_block); Chris@16: #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP Chris@16: first_big_block->m_prev_size = end_block->m_size = Chris@16: (reinterpret_cast(first_big_block) - reinterpret_cast(end_block))/Alignment; Chris@16: #else Chris@16: first_big_block->m_prev_size = end_block->m_size = Chris@16: (reinterpret_cast(end_block) - reinterpret_cast(first_big_block))/Alignment; Chris@16: #endif Chris@16: end_block->m_allocated = 1; Chris@16: first_big_block->m_prev_allocated = 1; Chris@16: Chris@16: BOOST_ASSERT(priv_next_block(first_big_block) == end_block); Chris@16: BOOST_ASSERT(priv_prev_block(end_block) == first_big_block); Chris@16: BOOST_ASSERT(priv_first_block() == first_big_block); Chris@16: BOOST_ASSERT(priv_end_block() == end_block); Chris@16: Chris@16: //Some check to validate the algorithm, since it makes some assumptions Chris@16: //to optimize the space wasted in bookkeeping: Chris@16: Chris@16: //Check that the sizes of the header are placed before the rbtree Chris@16: BOOST_ASSERT(static_cast(static_cast(first_big_block)) Chris@16: < static_cast(static_cast(first_big_block))); Chris@16: //Insert it in the intrusive containers Chris@16: m_header.m_imultiset.insert(*first_big_block); Chris@16: } Chris@16: Chris@16: template Chris@16: inline typename rbtree_best_fit::block_ctrl * Chris@16: rbtree_best_fit Chris@16: ::priv_first_block() Chris@16: { Chris@16: size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes); Chris@16: return reinterpret_cast(reinterpret_cast(this) + block1_off); Chris@16: } Chris@16: Chris@16: template Chris@16: inline typename rbtree_best_fit::block_ctrl * Chris@16: rbtree_best_fit Chris@16: ::priv_end_block() Chris@16: { Chris@16: size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes); Chris@16: const size_type original_first_block_size = m_header.m_size/Alignment*Alignment - block1_off/Alignment*Alignment - EndCtrlBlockBytes; Chris@16: block_ctrl *end_block = reinterpret_cast Chris@16: (reinterpret_cast(this) + block1_off + original_first_block_size); Chris@16: return end_block; Chris@16: } Chris@16: Chris@16: template Chris@16: inline rbtree_best_fit:: Chris@16: rbtree_best_fit(size_type segment_size, size_type extra_hdr_bytes) Chris@16: { Chris@16: //Initialize the header Chris@16: m_header.m_allocated = 0; Chris@16: m_header.m_size = segment_size; Chris@16: m_header.m_extra_hdr_bytes = extra_hdr_bytes; Chris@16: Chris@16: //Now write calculate the offset of the first big block that will Chris@16: //cover the whole segment Chris@16: BOOST_ASSERT(get_min_size(extra_hdr_bytes) <= segment_size); Chris@16: size_type block1_off = priv_first_block_offset_from_this(this, extra_hdr_bytes); Chris@16: priv_add_segment(reinterpret_cast(this) + block1_off, segment_size - block1_off); Chris@16: } Chris@16: Chris@16: template Chris@16: inline rbtree_best_fit::~rbtree_best_fit() Chris@16: { Chris@16: //There is a memory leak! Chris@16: // BOOST_ASSERT(m_header.m_allocated == 0); Chris@16: // BOOST_ASSERT(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root)); Chris@16: } Chris@16: Chris@16: template Chris@16: void rbtree_best_fit::grow(size_type extra_size) Chris@16: { Chris@16: //Get the address of the first block Chris@16: block_ctrl *first_block = priv_first_block(); Chris@16: block_ctrl *old_end_block = priv_end_block(); Chris@16: size_type old_border_offset = (size_type)(reinterpret_cast(old_end_block) - Chris@16: reinterpret_cast(this)) + EndCtrlBlockBytes; Chris@16: Chris@16: //Update managed buffer's size Chris@16: m_header.m_size += extra_size; Chris@16: Chris@16: //We need at least MinBlockUnits blocks to create a new block Chris@16: if((m_header.m_size - old_border_offset) < MinBlockUnits){ Chris@16: return; Chris@16: } Chris@16: Chris@16: //Now create a new block between the old end and the new end Chris@16: size_type align_offset = (m_header.m_size - old_border_offset)/Alignment; Chris@16: block_ctrl *new_end_block = reinterpret_cast Chris@16: (reinterpret_cast(old_end_block) + align_offset*Alignment); Chris@16: Chris@16: //the last and first block are special: Chris@16: //new_end_block->m_size & first_block->m_prev_size store the absolute value Chris@16: //between them Chris@16: new_end_block->m_allocated = 1; Chris@16: #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP Chris@16: new_end_block->m_size = (reinterpret_cast(first_block) - Chris@16: reinterpret_cast(new_end_block))/Alignment; Chris@16: #else Chris@16: new_end_block->m_size = (reinterpret_cast(new_end_block) - Chris@16: reinterpret_cast(first_block))/Alignment; Chris@16: #endif Chris@16: first_block->m_prev_size = new_end_block->m_size; Chris@16: first_block->m_prev_allocated = 1; Chris@16: BOOST_ASSERT(new_end_block == priv_end_block()); Chris@16: Chris@16: //The old end block is the new block Chris@16: block_ctrl *new_block = old_end_block; Chris@16: new_block->m_size = (reinterpret_cast(new_end_block) - Chris@16: reinterpret_cast(new_block))/Alignment; Chris@16: BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits); Chris@16: priv_mark_as_allocated_block(new_block); Chris@16: BOOST_ASSERT(priv_next_block(new_block) == new_end_block); Chris@16: Chris@16: m_header.m_allocated += (size_type)new_block->m_size*Alignment; Chris@16: Chris@16: //Now deallocate the newly created block Chris@16: this->priv_deallocate(priv_get_user_buffer(new_block)); Chris@16: } Chris@16: Chris@16: template Chris@16: void rbtree_best_fit::shrink_to_fit() Chris@16: { Chris@16: //Get the address of the first block Chris@16: block_ctrl *first_block = priv_first_block(); Chris@16: algo_impl_t::assert_alignment(first_block); Chris@16: Chris@16: //block_ctrl *old_end_block = priv_end_block(first_block); Chris@16: block_ctrl *old_end_block = priv_end_block(); Chris@16: algo_impl_t::assert_alignment(old_end_block); Chris@16: size_type old_end_block_size = old_end_block->m_size; Chris@16: Chris@16: void *unique_buffer = 0; Chris@16: block_ctrl *last_block; Chris@16: //Check if no memory is allocated between the first and last block Chris@16: if(priv_next_block(first_block) == old_end_block){ Chris@16: //If so check if we can allocate memory Chris@101: size_type ignore_recvd = 0; Chris@101: void *ignore_reuse = 0; Chris@101: unique_buffer = priv_allocate(boost::interprocess::allocate_new, 0, ignore_recvd, ignore_reuse); Chris@16: //If not, return, we can't shrink Chris@16: if(!unique_buffer) Chris@16: return; Chris@16: //If we can, mark the position just after the new allocation as the new end Chris@16: algo_impl_t::assert_alignment(unique_buffer); Chris@16: block_ctrl *unique_block = priv_get_block(unique_buffer); Chris@16: BOOST_ASSERT(priv_is_allocated_block(unique_block)); Chris@16: algo_impl_t::assert_alignment(unique_block); Chris@16: last_block = priv_next_block(unique_block); Chris@16: BOOST_ASSERT(!priv_is_allocated_block(last_block)); Chris@16: algo_impl_t::assert_alignment(last_block); Chris@16: } Chris@16: else{ Chris@16: //If memory is allocated, check if the last block is allocated Chris@16: if(priv_is_prev_allocated(old_end_block)) Chris@16: return; Chris@16: //If not, mark last block after the free block Chris@16: last_block = priv_prev_block(old_end_block); Chris@16: } Chris@16: Chris@16: size_type last_block_size = last_block->m_size; Chris@16: Chris@16: //Erase block from the free tree, since we will erase it Chris@16: m_header.m_imultiset.erase(Imultiset::s_iterator_to(*last_block)); Chris@16: Chris@16: size_type shrunk_border_offset = (size_type)(reinterpret_cast(last_block) - Chris@16: reinterpret_cast(this)) + EndCtrlBlockBytes; Chris@16: Chris@16: block_ctrl *new_end_block = last_block; Chris@16: algo_impl_t::assert_alignment(new_end_block); Chris@16: Chris@16: //Write new end block attributes Chris@16: #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP Chris@16: new_end_block->m_size = first_block->m_prev_size = Chris@16: (reinterpret_cast(first_block) - reinterpret_cast(new_end_block))/Alignment; Chris@16: #else Chris@16: new_end_block->m_size = first_block->m_prev_size = Chris@16: (reinterpret_cast(new_end_block) - reinterpret_cast(first_block))/Alignment; Chris@16: #endif Chris@16: Chris@16: new_end_block->m_allocated = 1; Chris@16: (void)last_block_size; Chris@16: (void)old_end_block_size; Chris@16: BOOST_ASSERT(new_end_block->m_size == (old_end_block_size - last_block_size)); Chris@16: Chris@16: //Update managed buffer's size Chris@16: m_header.m_size = shrunk_border_offset; Chris@16: BOOST_ASSERT(priv_end_block() == new_end_block); Chris@16: if(unique_buffer) Chris@16: priv_deallocate(unique_buffer); Chris@16: } Chris@16: Chris@16: template Chris@16: inline typename rbtree_best_fit::size_type Chris@16: rbtree_best_fit::get_size() const Chris@16: { return m_header.m_size; } Chris@16: Chris@16: template Chris@16: typename rbtree_best_fit::size_type Chris@16: rbtree_best_fit::get_free_memory() const Chris@16: { Chris@16: return m_header.m_size - m_header.m_allocated - Chris@16: priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes); Chris@16: } Chris@16: Chris@16: template Chris@16: typename rbtree_best_fit::size_type Chris@16: rbtree_best_fit:: Chris@16: get_min_size (size_type extra_hdr_bytes) Chris@16: { Chris@16: return (algo_impl_t::ceil_units(sizeof(rbtree_best_fit)) + Chris@16: algo_impl_t::ceil_units(extra_hdr_bytes) + Chris@16: MinBlockUnits + EndCtrlBlockUnits)*Alignment; Chris@16: } Chris@16: Chris@16: template Chris@16: inline bool rbtree_best_fit:: Chris@16: all_memory_deallocated() Chris@16: { Chris@16: //----------------------- Chris@16: boost::interprocess::scoped_lock guard(m_header); Chris@16: //----------------------- Chris@16: size_type block1_off = Chris@16: priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes); Chris@16: Chris@16: return m_header.m_allocated == 0 && Chris@16: m_header.m_imultiset.begin() != m_header.m_imultiset.end() && Chris@16: (++m_header.m_imultiset.begin()) == m_header.m_imultiset.end() Chris@16: && m_header.m_imultiset.begin()->m_size == Chris@16: (m_header.m_size - block1_off - EndCtrlBlockBytes)/Alignment; Chris@16: } Chris@16: Chris@16: template Chris@16: bool rbtree_best_fit:: Chris@16: check_sanity() Chris@16: { Chris@16: //----------------------- Chris@16: boost::interprocess::scoped_lock guard(m_header); Chris@16: //----------------------- Chris@16: imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end()); Chris@16: Chris@16: size_type free_memory = 0; Chris@16: Chris@16: //Iterate through all blocks obtaining their size Chris@16: for(; ib != ie; ++ib){ Chris@16: free_memory += (size_type)ib->m_size*Alignment; Chris@16: algo_impl_t::assert_alignment(&*ib); Chris@16: if(!algo_impl_t::check_alignment(&*ib)) Chris@16: return false; Chris@16: } Chris@16: Chris@16: //Check allocated bytes are less than size Chris@16: if(m_header.m_allocated > m_header.m_size){ Chris@16: return false; Chris@16: } Chris@16: Chris@16: size_type block1_off = Chris@16: priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes); Chris@16: Chris@16: //Check free bytes are less than size Chris@16: if(free_memory > (m_header.m_size - block1_off)){ Chris@16: return false; Chris@16: } Chris@16: return true; Chris@16: } Chris@16: Chris@16: template Chris@16: inline void* rbtree_best_fit:: Chris@16: allocate(size_type nbytes) Chris@16: { Chris@16: //----------------------- Chris@16: boost::interprocess::scoped_lock guard(m_header); Chris@16: //----------------------- Chris@101: size_type ignore_recvd = nbytes; Chris@101: void *ignore_reuse = 0; Chris@101: return priv_allocate(boost::interprocess::allocate_new, nbytes, ignore_recvd, ignore_reuse); Chris@16: } Chris@16: Chris@16: template Chris@16: inline void* rbtree_best_fit:: Chris@16: allocate_aligned(size_type nbytes, size_type alignment) Chris@16: { Chris@16: //----------------------- Chris@16: boost::interprocess::scoped_lock guard(m_header); Chris@16: //----------------------- Chris@16: return algo_impl_t::allocate_aligned(this, nbytes, alignment); Chris@16: } Chris@16: Chris@16: template Chris@16: template Chris@101: inline T* rbtree_best_fit:: Chris@16: allocation_command (boost::interprocess::allocation_type command, size_type limit_size, Chris@101: size_type &prefer_in_recvd_out_size, T *&reuse) Chris@16: { Chris@101: void* raw_reuse = reuse; Chris@101: void* const ret = priv_allocation_command(command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T)); Chris@101: reuse = static_cast(raw_reuse); Chris@101: BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::container_detail::alignment_of::value)); Chris@101: return static_cast(ret); Chris@16: } Chris@16: Chris@16: template Chris@101: inline void* rbtree_best_fit:: Chris@16: raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_objects, Chris@101: size_type &prefer_in_recvd_out_objects, void *&reuse_ptr, size_type sizeof_object) Chris@16: { Chris@101: size_type const preferred_objects = prefer_in_recvd_out_objects; Chris@16: if(!sizeof_object) Chris@101: return reuse_ptr = 0, static_cast(0); Chris@16: if(command & boost::interprocess::try_shrink_in_place){ Chris@101: if(!reuse_ptr) return static_cast(0); Chris@101: const bool success = algo_impl_t::try_shrink Chris@16: ( this, reuse_ptr, limit_objects*sizeof_object Chris@101: , prefer_in_recvd_out_objects = preferred_objects*sizeof_object); Chris@101: prefer_in_recvd_out_objects /= sizeof_object; Chris@101: return success ? reuse_ptr : 0; Chris@16: } Chris@101: else{ Chris@101: return priv_allocation_command Chris@101: (command, limit_objects, prefer_in_recvd_out_objects, reuse_ptr, sizeof_object); Chris@101: } Chris@16: } Chris@16: Chris@16: Chris@16: template Chris@101: inline void* rbtree_best_fit:: Chris@16: priv_allocation_command (boost::interprocess::allocation_type command, size_type limit_size, Chris@101: size_type &prefer_in_recvd_out_size, Chris@101: void *&reuse_ptr, size_type sizeof_object) Chris@16: { Chris@101: void* ret; Chris@101: size_type const preferred_size = prefer_in_recvd_out_size; Chris@101: size_type const max_count = m_header.m_size/sizeof_object; Chris@16: if(limit_size > max_count || preferred_size > max_count){ Chris@101: return reuse_ptr = 0, static_cast(0); Chris@16: } Chris@16: size_type l_size = limit_size*sizeof_object; Chris@16: size_type p_size = preferred_size*sizeof_object; Chris@16: size_type r_size; Chris@16: { Chris@16: //----------------------- Chris@16: boost::interprocess::scoped_lock guard(m_header); Chris@16: //----------------------- Chris@101: ret = priv_allocate(command, l_size, r_size = p_size, reuse_ptr, sizeof_object); Chris@16: } Chris@101: prefer_in_recvd_out_size = r_size/sizeof_object; Chris@16: return ret; Chris@16: } Chris@16: Chris@16: template Chris@16: typename rbtree_best_fit::size_type Chris@16: rbtree_best_fit:: Chris@16: size(const void *ptr) const Chris@16: { Chris@16: //We need no synchronization since this block's size is not going Chris@16: //to be modified by anyone else Chris@16: //Obtain the real size of the block Chris@16: return ((size_type)priv_get_block(ptr)->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk; Chris@16: } Chris@16: Chris@16: template Chris@16: inline void rbtree_best_fit::zero_free_memory() Chris@16: { Chris@16: //----------------------- Chris@16: boost::interprocess::scoped_lock guard(m_header); Chris@16: //----------------------- Chris@16: imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end()); Chris@16: Chris@16: //Iterate through all blocks obtaining their size Chris@16: while(ib != ie){ Chris@16: //Just clear user the memory part reserved for the user Chris@16: volatile char *ptr = reinterpret_cast(&*ib) + BlockCtrlBytes; Chris@16: size_type s = (size_type)ib->m_size*Alignment - BlockCtrlBytes; Chris@16: while(s--){ Chris@16: *ptr++ = 0; Chris@16: } Chris@16: Chris@16: //This surprisingly is optimized out by Visual C++ 7.1 in release mode! Chris@16: //std::memset( reinterpret_cast(&*ib) + BlockCtrlBytes Chris@16: // , 0 Chris@16: // , ib->m_size*Alignment - BlockCtrlBytes); Chris@16: ++ib; Chris@16: } Chris@16: } Chris@16: Chris@16: template Chris@16: void* rbtree_best_fit:: Chris@16: priv_expand_both_sides(boost::interprocess::allocation_type command Chris@16: ,size_type min_size Chris@101: ,size_type &prefer_in_recvd_out_size Chris@16: ,void *reuse_ptr Chris@16: ,bool only_preferred_backwards Chris@16: ,size_type backwards_multiple) Chris@16: { Chris@101: size_type const preferred_size = prefer_in_recvd_out_size; Chris@16: algo_impl_t::assert_alignment(reuse_ptr); Chris@16: if(command & boost::interprocess::expand_fwd){ Chris@101: if(priv_expand(reuse_ptr, min_size, prefer_in_recvd_out_size = preferred_size)) Chris@16: return reuse_ptr; Chris@16: } Chris@16: else{ Chris@101: prefer_in_recvd_out_size = this->size(reuse_ptr); Chris@101: if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size) Chris@16: return reuse_ptr; Chris@16: } Chris@16: Chris@16: if(backwards_multiple){ Chris@16: BOOST_ASSERT(0 == (min_size % backwards_multiple)); Chris@16: BOOST_ASSERT(0 == (preferred_size % backwards_multiple)); Chris@16: } Chris@16: Chris@16: if(command & boost::interprocess::expand_bwd){ Chris@16: //Obtain the real size of the block Chris@16: block_ctrl *reuse = priv_get_block(reuse_ptr); Chris@16: Chris@16: //Sanity check Chris@16: algo_impl_t::assert_alignment(reuse); Chris@16: Chris@16: block_ctrl *prev_block; Chris@16: Chris@16: //If the previous block is not free, there is nothing to do Chris@16: if(priv_is_prev_allocated(reuse)){ Chris@16: return 0; Chris@16: } Chris@16: Chris@16: prev_block = priv_prev_block(reuse); Chris@16: BOOST_ASSERT(!priv_is_allocated_block(prev_block)); Chris@16: Chris@16: //Some sanity checks Chris@16: BOOST_ASSERT(prev_block->m_size == reuse->m_prev_size); Chris@16: algo_impl_t::assert_alignment(prev_block); Chris@16: Chris@16: size_type needs_backwards_aligned; Chris@16: size_type lcm; Chris@16: if(!algo_impl_t::calculate_lcm_and_needs_backwards_lcmed Chris@16: ( backwards_multiple Chris@101: , prefer_in_recvd_out_size Chris@16: , only_preferred_backwards ? preferred_size : min_size Chris@16: , lcm, needs_backwards_aligned)){ Chris@16: return 0; Chris@16: } Chris@16: Chris@16: //Check if previous block has enough size Chris@16: if(size_type(prev_block->m_size*Alignment) >= needs_backwards_aligned){ Chris@16: //Now take all next space. This will succeed Chris@16: if(command & boost::interprocess::expand_fwd){ Chris@16: size_type received_size2; Chris@101: if(!priv_expand(reuse_ptr, prefer_in_recvd_out_size, received_size2 = prefer_in_recvd_out_size)){ Chris@16: BOOST_ASSERT(0); Chris@16: } Chris@101: BOOST_ASSERT(prefer_in_recvd_out_size == received_size2); Chris@16: } Chris@16: //We need a minimum size to split the previous one Chris@16: if(prev_block->m_size >= (needs_backwards_aligned/Alignment + BlockCtrlUnits)){ Chris@16: block_ctrl *new_block = reinterpret_cast Chris@16: (reinterpret_cast(reuse) - needs_backwards_aligned); Chris@16: Chris@16: //Free old previous buffer Chris@16: new_block->m_size = Chris@101: AllocatedCtrlUnits + (needs_backwards_aligned + (prefer_in_recvd_out_size - UsableByPreviousChunk))/Alignment; Chris@16: BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits); Chris@16: priv_mark_as_allocated_block(new_block); Chris@16: Chris@16: prev_block->m_size = (reinterpret_cast(new_block) - Chris@16: reinterpret_cast(prev_block))/Alignment; Chris@16: BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits); Chris@16: priv_mark_as_free_block(prev_block); Chris@16: Chris@16: //Update the old previous block in the free blocks tree Chris@16: //If the new size fulfills tree invariants do nothing, Chris@16: //otherwise erase() + insert() Chris@16: { Chris@16: imultiset_iterator prev_block_it(Imultiset::s_iterator_to(*prev_block)); Chris@16: imultiset_iterator was_smaller_it(prev_block_it); Chris@16: if(prev_block_it != m_header.m_imultiset.begin() && Chris@16: (--(was_smaller_it = prev_block_it))->m_size > prev_block->m_size){ Chris@16: m_header.m_imultiset.erase(prev_block_it); Chris@16: m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *prev_block); Chris@16: } Chris@16: } Chris@16: Chris@101: prefer_in_recvd_out_size = needs_backwards_aligned + prefer_in_recvd_out_size; Chris@16: m_header.m_allocated += needs_backwards_aligned; Chris@16: Chris@16: //Check alignment Chris@16: algo_impl_t::assert_alignment(new_block); Chris@16: Chris@16: //If the backwards expansion has remaining bytes in the Chris@16: //first bytes, fill them with a pattern Chris@16: void *p = priv_get_user_buffer(new_block); Chris@16: void *user_ptr = reinterpret_cast(p); Chris@16: BOOST_ASSERT((static_cast(reuse_ptr) - static_cast(user_ptr)) % backwards_multiple == 0); Chris@16: algo_impl_t::assert_alignment(user_ptr); Chris@16: return user_ptr; Chris@16: } Chris@16: //Check if there is no place to create a new block and Chris@16: //the whole new block is multiple of the backwards expansion multiple Chris@16: else if(prev_block->m_size >= needs_backwards_aligned/Alignment && Chris@16: 0 == ((prev_block->m_size*Alignment) % lcm)) { Chris@16: //Erase old previous block, since we will change it Chris@16: m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block)); Chris@16: Chris@16: //Just merge the whole previous block Chris@16: //prev_block->m_size*Alignment is multiple of lcm (and backwards_multiple) Chris@101: prefer_in_recvd_out_size = prefer_in_recvd_out_size + (size_type)prev_block->m_size*Alignment; Chris@16: Chris@16: m_header.m_allocated += (size_type)prev_block->m_size*Alignment; Chris@16: //Now update sizes Chris@16: prev_block->m_size = prev_block->m_size + reuse->m_size; Chris@16: BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits); Chris@16: priv_mark_as_allocated_block(prev_block); Chris@16: Chris@16: //If the backwards expansion has remaining bytes in the Chris@16: //first bytes, fill them with a pattern Chris@16: void *user_ptr = priv_get_user_buffer(prev_block); Chris@16: BOOST_ASSERT((static_cast(reuse_ptr) - static_cast(user_ptr)) % backwards_multiple == 0); Chris@16: algo_impl_t::assert_alignment(user_ptr); Chris@16: return user_ptr; Chris@16: } Chris@16: else{ Chris@16: //Alignment issues Chris@16: } Chris@16: } Chris@16: } Chris@16: return 0; Chris@16: } Chris@16: Chris@16: template Chris@16: inline void rbtree_best_fit:: Chris@16: deallocate_many(typename rbtree_best_fit::multiallocation_chain &chain) Chris@16: { Chris@16: //----------------------- Chris@16: boost::interprocess::scoped_lock guard(m_header); Chris@16: //----------------------- Chris@16: algo_impl_t::deallocate_many(this, chain); Chris@16: } Chris@16: Chris@16: template Chris@101: void * rbtree_best_fit:: Chris@16: priv_allocate(boost::interprocess::allocation_type command Chris@16: ,size_type limit_size Chris@101: ,size_type &prefer_in_recvd_out_size Chris@101: ,void *&reuse_ptr Chris@16: ,size_type backwards_multiple) Chris@16: { Chris@101: size_type const preferred_size = prefer_in_recvd_out_size; Chris@16: if(command & boost::interprocess::shrink_in_place){ Chris@101: if(!reuse_ptr) return static_cast(0); Chris@16: bool success = Chris@101: algo_impl_t::shrink(this, reuse_ptr, limit_size, prefer_in_recvd_out_size = preferred_size); Chris@101: return success ? reuse_ptr : 0; Chris@16: } Chris@16: Chris@101: prefer_in_recvd_out_size = 0; Chris@16: Chris@16: if(limit_size > preferred_size) Chris@101: return reuse_ptr = 0, static_cast(0); Chris@16: Chris@16: //Number of units to request (including block_ctrl header) Chris@16: size_type preferred_units = priv_get_total_units(preferred_size); Chris@16: Chris@16: //Number of units to request (including block_ctrl header) Chris@16: size_type limit_units = priv_get_total_units(limit_size); Chris@16: Chris@16: //Expand in place Chris@101: prefer_in_recvd_out_size = preferred_size; Chris@16: if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){ Chris@16: void *ret = priv_expand_both_sides Chris@101: (command, limit_size, prefer_in_recvd_out_size, reuse_ptr, true, backwards_multiple); Chris@16: if(ret) Chris@101: return ret; Chris@16: } Chris@16: Chris@16: if(command & boost::interprocess::allocate_new){ Chris@16: size_block_ctrl_compare comp; Chris@16: imultiset_iterator it(m_header.m_imultiset.lower_bound(preferred_units, comp)); Chris@16: Chris@16: if(it != m_header.m_imultiset.end()){ Chris@101: return reuse_ptr = 0, this->priv_check_and_allocate Chris@101: (preferred_units, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size); Chris@16: } Chris@16: Chris@16: if(it != m_header.m_imultiset.begin()&& Chris@16: (--it)->m_size >= limit_units){ Chris@101: return reuse_ptr = 0, this->priv_check_and_allocate Chris@101: (it->m_size, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size); Chris@16: } Chris@16: } Chris@16: Chris@16: Chris@16: //Now try to expand both sides with min size Chris@16: if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){ Chris@101: return priv_expand_both_sides Chris@101: (command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false, backwards_multiple); Chris@16: } Chris@101: return reuse_ptr = 0, static_cast(0); Chris@16: } Chris@16: Chris@16: template Chris@16: inline Chris@16: typename rbtree_best_fit::block_ctrl * Chris@16: rbtree_best_fit::priv_get_block(const void *ptr) Chris@16: { Chris@16: return const_cast Chris@16: (reinterpret_cast Chris@16: (reinterpret_cast(ptr) - AllocatedCtrlBytes)); Chris@16: } Chris@16: Chris@16: template Chris@16: inline Chris@16: void *rbtree_best_fit:: Chris@16: priv_get_user_buffer(const typename rbtree_best_fit::block_ctrl *block) Chris@16: { return const_cast(reinterpret_cast(block) + AllocatedCtrlBytes); } Chris@16: Chris@16: template Chris@16: inline typename rbtree_best_fit::size_type Chris@16: rbtree_best_fit:: Chris@16: priv_get_total_units(size_type userbytes) Chris@16: { Chris@16: if(userbytes < UsableByPreviousChunk) Chris@16: userbytes = UsableByPreviousChunk; Chris@16: size_type units = ipcdetail::get_rounded_size(userbytes - UsableByPreviousChunk, Alignment)/Alignment + AllocatedCtrlUnits; Chris@16: if(units < BlockCtrlUnits) units = BlockCtrlUnits; Chris@16: return units; Chris@16: } Chris@16: Chris@16: template Chris@16: bool rbtree_best_fit:: Chris@101: priv_expand (void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size) Chris@16: { Chris@101: size_type const preferred_size = prefer_in_recvd_out_size; Chris@16: //Obtain the real size of the block Chris@16: block_ctrl *block = priv_get_block(ptr); Chris@16: size_type old_block_units = block->m_size; Chris@16: Chris@16: //The block must be marked as allocated and the sizes must be equal Chris@16: BOOST_ASSERT(priv_is_allocated_block(block)); Chris@16: Chris@16: //Put this to a safe value Chris@101: prefer_in_recvd_out_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk; Chris@101: if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size) Chris@16: return true; Chris@16: Chris@16: //Now translate it to Alignment units Chris@16: const size_type min_user_units = algo_impl_t::ceil_units(min_size - UsableByPreviousChunk); Chris@16: const size_type preferred_user_units = algo_impl_t::ceil_units(preferred_size - UsableByPreviousChunk); Chris@16: Chris@16: //Some parameter checks Chris@16: BOOST_ASSERT(min_user_units <= preferred_user_units); Chris@16: Chris@16: block_ctrl *next_block; Chris@16: Chris@16: if(priv_is_allocated_block(next_block = priv_next_block(block))){ Chris@101: return prefer_in_recvd_out_size >= min_size; Chris@16: } Chris@16: algo_impl_t::assert_alignment(next_block); Chris@16: Chris@16: //Is "block" + "next_block" big enough? Chris@16: const size_type merged_units = old_block_units + (size_type)next_block->m_size; Chris@16: Chris@16: //Now get the expansion size Chris@16: const size_type merged_user_units = merged_units - AllocatedCtrlUnits; Chris@16: Chris@16: if(merged_user_units < min_user_units){ Chris@101: prefer_in_recvd_out_size = merged_units*Alignment - UsableByPreviousChunk; Chris@16: return false; Chris@16: } Chris@16: Chris@16: //Now get the maximum size the user can allocate Chris@16: size_type intended_user_units = (merged_user_units < preferred_user_units) ? Chris@16: merged_user_units : preferred_user_units; Chris@16: Chris@16: //These are total units of the merged block (supposing the next block can be split) Chris@16: const size_type intended_units = AllocatedCtrlUnits + intended_user_units; Chris@16: Chris@16: //Check if we can split the next one in two parts Chris@16: if((merged_units - intended_units) >= BlockCtrlUnits){ Chris@16: //This block is bigger than needed, split it in Chris@16: //two blocks, the first one will be merged and Chris@16: //the second's size will be the remaining space Chris@16: BOOST_ASSERT(next_block->m_size == priv_next_block(next_block)->m_prev_size); Chris@16: const size_type rem_units = merged_units - intended_units; Chris@16: Chris@16: //Check if we we need to update the old next block in the free blocks tree Chris@16: //If the new size fulfills tree invariants, we just need to replace the node Chris@16: //(the block start has been displaced), otherwise erase() + insert(). Chris@16: // Chris@16: //This fixup must be done in two parts, because the new next block might Chris@16: //overwrite the tree hook of the old next block. So we first erase the Chris@16: //old if needed and we'll insert the new one after creating the new next Chris@16: imultiset_iterator old_next_block_it(Imultiset::s_iterator_to(*next_block)); Chris@16: const bool size_invariants_broken = Chris@16: (next_block->m_size - rem_units ) < BlockCtrlUnits || Chris@16: (old_next_block_it != m_header.m_imultiset.begin() && Chris@16: (--imultiset_iterator(old_next_block_it))->m_size > rem_units); Chris@16: if(size_invariants_broken){ Chris@16: m_header.m_imultiset.erase(old_next_block_it); Chris@16: } Chris@16: //This is the remaining block Chris@101: block_ctrl *rem_block = ::new(reinterpret_cast Chris@101: (reinterpret_cast(block) + intended_units*Alignment), boost_container_new_t())block_ctrl; Chris@16: rem_block->m_size = rem_units; Chris@16: algo_impl_t::assert_alignment(rem_block); Chris@16: BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits); Chris@16: priv_mark_as_free_block(rem_block); Chris@16: Chris@16: //Now the second part of the fixup Chris@16: if(size_invariants_broken) Chris@16: m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *rem_block); Chris@16: else Chris@16: m_header.m_imultiset.replace_node(old_next_block_it, *rem_block); Chris@16: Chris@16: //Write the new length Chris@16: block->m_size = intended_user_units + AllocatedCtrlUnits; Chris@16: BOOST_ASSERT(block->m_size >= BlockCtrlUnits); Chris@16: m_header.m_allocated += (intended_units - old_block_units)*Alignment; Chris@16: } Chris@16: //There is no free space to create a new node: just merge both blocks Chris@16: else{ Chris@16: //Now we have to update the data in the tree Chris@16: m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block)); Chris@16: Chris@16: //Write the new length Chris@16: block->m_size = merged_units; Chris@16: BOOST_ASSERT(block->m_size >= BlockCtrlUnits); Chris@16: m_header.m_allocated += (merged_units - old_block_units)*Alignment; Chris@16: } Chris@16: priv_mark_as_allocated_block(block); Chris@101: prefer_in_recvd_out_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk; Chris@16: return true; Chris@16: } Chris@16: Chris@16: template inline Chris@16: typename rbtree_best_fit::block_ctrl * Chris@16: rbtree_best_fit::priv_prev_block Chris@16: (typename rbtree_best_fit::block_ctrl *ptr) Chris@16: { Chris@16: BOOST_ASSERT(!ptr->m_prev_allocated); Chris@16: return reinterpret_cast Chris@16: (reinterpret_cast(ptr) - ptr->m_prev_size*Alignment); Chris@16: } Chris@16: Chris@16: Chris@16: Chris@16: template inline Chris@16: typename rbtree_best_fit::block_ctrl * Chris@16: rbtree_best_fit::priv_end_block Chris@16: (typename rbtree_best_fit::block_ctrl *first_segment_block) Chris@16: { Chris@16: //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute Chris@16: //distance with the end block Chris@16: BOOST_ASSERT(first_segment_block->m_prev_allocated); Chris@16: block_ctrl *end_block = reinterpret_cast Chris@16: (reinterpret_cast(first_segment_block) + first_segment_block->m_prev_size*Alignment); Chris@16: (void)end_block; Chris@16: BOOST_ASSERT(end_block->m_allocated == 1); Chris@16: BOOST_ASSERT(end_block->m_size == first_segment_block->m_prev_size); Chris@16: BOOST_ASSERT(end_block > first_segment_block); Chris@16: return end_block; Chris@16: } Chris@16: Chris@16: template inline Chris@16: typename rbtree_best_fit::block_ctrl * Chris@16: rbtree_best_fit::priv_first_block Chris@16: (typename rbtree_best_fit::block_ctrl *end_segment_block) Chris@16: { Chris@16: //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute Chris@16: //distance with the end block Chris@16: BOOST_ASSERT(end_segment_block->m_allocated); Chris@16: block_ctrl *first_block = reinterpret_cast Chris@16: (reinterpret_cast(end_segment_block) - end_segment_block->m_size*Alignment); Chris@16: (void)first_block; Chris@16: BOOST_ASSERT(first_block->m_prev_allocated == 1); Chris@16: BOOST_ASSERT(first_block->m_prev_size == end_segment_block->m_size); Chris@16: BOOST_ASSERT(end_segment_block > first_block); Chris@16: return first_block; Chris@16: } Chris@16: Chris@16: Chris@16: template inline Chris@16: typename rbtree_best_fit::block_ctrl * Chris@16: rbtree_best_fit::priv_next_block Chris@16: (typename rbtree_best_fit::block_ctrl *ptr) Chris@16: { Chris@16: return reinterpret_cast Chris@16: (reinterpret_cast(ptr) + ptr->m_size*Alignment); Chris@16: } Chris@16: Chris@16: template inline Chris@16: bool rbtree_best_fit::priv_is_allocated_block Chris@16: (typename rbtree_best_fit::block_ctrl *block) Chris@16: { Chris@16: bool allocated = block->m_allocated != 0; Chris@16: #ifndef NDEBUG Chris@16: if(block != priv_end_block()){ Chris@16: block_ctrl *next_block = reinterpret_cast Chris@16: (reinterpret_cast(block) + block->m_size*Alignment); Chris@16: bool next_block_prev_allocated = next_block->m_prev_allocated != 0; Chris@16: (void)next_block_prev_allocated; Chris@16: BOOST_ASSERT(allocated == next_block_prev_allocated); Chris@16: } Chris@16: #endif Chris@16: return allocated; Chris@16: } Chris@16: Chris@16: template inline Chris@16: bool rbtree_best_fit::priv_is_prev_allocated Chris@16: (typename rbtree_best_fit::block_ctrl *block) Chris@16: { Chris@16: if(block->m_prev_allocated){ Chris@16: return true; Chris@16: } Chris@16: else{ Chris@16: #ifndef NDEBUG Chris@16: if(block != priv_first_block()){ Chris@16: block_ctrl *prev = priv_prev_block(block); Chris@16: (void)prev; Chris@16: BOOST_ASSERT(!prev->m_allocated); Chris@101: BOOST_ASSERT(prev->m_size == block->m_prev_size); Chris@16: } Chris@16: #endif Chris@16: return false; Chris@16: } Chris@16: } Chris@16: Chris@16: template inline Chris@16: void rbtree_best_fit::priv_mark_as_allocated_block Chris@16: (typename rbtree_best_fit::block_ctrl *block) Chris@16: { Chris@16: block->m_allocated = 1; Chris@16: reinterpret_cast Chris@16: (reinterpret_cast(block)+ block->m_size*Alignment)->m_prev_allocated = 1; Chris@16: } Chris@16: Chris@16: template inline Chris@16: void rbtree_best_fit::priv_mark_as_free_block Chris@16: (typename rbtree_best_fit::block_ctrl *block) Chris@16: { Chris@16: block->m_allocated = 0; Chris@16: block_ctrl *next_block = priv_next_block(block); Chris@16: next_block->m_prev_allocated = 0; Chris@16: next_block->m_prev_size = block->m_size; Chris@16: } Chris@16: Chris@16: template inline Chris@16: void* rbtree_best_fit::priv_check_and_allocate Chris@16: (size_type nunits Chris@16: ,typename rbtree_best_fit::block_ctrl* block Chris@16: ,size_type &received_size) Chris@16: { Chris@16: size_type upper_nunits = nunits + BlockCtrlUnits; Chris@16: imultiset_iterator it_old = Imultiset::s_iterator_to(*block); Chris@16: algo_impl_t::assert_alignment(block); Chris@16: Chris@16: if (block->m_size >= upper_nunits){ Chris@16: //This block is bigger than needed, split it in Chris@16: //two blocks, the first's size will be "units" and Chris@16: //the second's size "block->m_size-units" Chris@16: size_type block_old_size = block->m_size; Chris@16: block->m_size = nunits; Chris@16: BOOST_ASSERT(block->m_size >= BlockCtrlUnits); Chris@16: Chris@16: //This is the remaining block Chris@101: block_ctrl *rem_block = ::new(reinterpret_cast Chris@101: (reinterpret_cast(block) + Alignment*nunits), boost_container_new_t())block_ctrl; Chris@16: algo_impl_t::assert_alignment(rem_block); Chris@16: rem_block->m_size = block_old_size - nunits; Chris@16: BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits); Chris@16: priv_mark_as_free_block(rem_block); Chris@16: Chris@16: imultiset_iterator it_hint; Chris@16: if(it_old == m_header.m_imultiset.begin() Chris@101: || (--imultiset_iterator(it_old))->m_size <= rem_block->m_size){ Chris@16: //option a: slow but secure Chris@16: //m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *rem_block); Chris@16: //option b: Construct an empty node and swap Chris@16: //Imultiset::init_node(*rem_block); Chris@16: //block->swap_nodes(*rem_block); Chris@16: //option c: replace the node directly Chris@16: m_header.m_imultiset.replace_node(Imultiset::s_iterator_to(*it_old), *rem_block); Chris@16: } Chris@16: else{ Chris@16: //Now we have to update the data in the tree Chris@16: m_header.m_imultiset.erase(it_old); Chris@16: m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *rem_block); Chris@16: } Chris@16: Chris@16: } Chris@16: else if (block->m_size >= nunits){ Chris@16: m_header.m_imultiset.erase(it_old); Chris@16: } Chris@16: else{ Chris@16: BOOST_ASSERT(0); Chris@16: return 0; Chris@16: } Chris@16: //We need block_ctrl for deallocation stuff, so Chris@16: //return memory user can overwrite Chris@16: m_header.m_allocated += (size_type)block->m_size*Alignment; Chris@16: received_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk; Chris@16: Chris@16: //Mark the block as allocated Chris@16: priv_mark_as_allocated_block(block); Chris@16: Chris@16: //Clear the memory occupied by the tree hook, since this won't be Chris@16: //cleared with zero_free_memory Chris@16: TreeHook *t = static_cast(block); Chris@16: //Just clear the memory part reserved for the user Chris@16: std::size_t tree_hook_offset_in_block = (char*)t - (char*)block; Chris@16: //volatile char *ptr = Chris@16: char *ptr = reinterpret_cast(block)+tree_hook_offset_in_block; Chris@16: const std::size_t s = BlockCtrlBytes - tree_hook_offset_in_block; Chris@16: std::memset(ptr, 0, s); Chris@16: this->priv_next_block(block)->m_prev_size = 0; Chris@16: return priv_get_user_buffer(block); Chris@16: } Chris@16: Chris@16: template Chris@16: void rbtree_best_fit::deallocate(void* addr) Chris@16: { Chris@16: if(!addr) return; Chris@16: //----------------------- Chris@16: boost::interprocess::scoped_lock guard(m_header); Chris@16: //----------------------- Chris@16: return this->priv_deallocate(addr); Chris@16: } Chris@16: Chris@16: template Chris@16: void rbtree_best_fit::priv_deallocate(void* addr) Chris@16: { Chris@16: if(!addr) return; Chris@16: Chris@16: block_ctrl *block = priv_get_block(addr); Chris@16: Chris@16: //The blocks must be marked as allocated and the sizes must be equal Chris@16: BOOST_ASSERT(priv_is_allocated_block(block)); Chris@16: Chris@16: //Check if alignment and block size are right Chris@16: algo_impl_t::assert_alignment(addr); Chris@16: Chris@16: size_type block_old_size = Alignment*(size_type)block->m_size; Chris@16: BOOST_ASSERT(m_header.m_allocated >= block_old_size); Chris@16: Chris@16: //Update used memory count Chris@16: m_header.m_allocated -= block_old_size; Chris@16: Chris@16: //The block to insert in the tree Chris@16: block_ctrl *block_to_insert = block; Chris@16: Chris@16: //Get the next block Chris@101: block_ctrl *const next_block = priv_next_block(block); Chris@101: const bool merge_with_prev = !priv_is_prev_allocated(block); Chris@101: const bool merge_with_next = !priv_is_allocated_block(next_block); Chris@16: Chris@16: //Merge logic. First just update block sizes, then fix free blocks tree Chris@16: if(merge_with_prev || merge_with_next){ Chris@16: //Merge if the previous is free Chris@16: if(merge_with_prev){ Chris@16: //Get the previous block Chris@101: block_to_insert = priv_prev_block(block); Chris@101: block_to_insert->m_size += block->m_size; Chris@101: BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits); Chris@16: } Chris@16: //Merge if the next is free Chris@16: if(merge_with_next){ Chris@16: block_to_insert->m_size += next_block->m_size; Chris@16: BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits); Chris@101: const imultiset_iterator next_it = Imultiset::s_iterator_to(*next_block); Chris@101: if(merge_with_prev){ Chris@101: m_header.m_imultiset.erase(next_it); Chris@101: } Chris@101: else{ Chris@101: m_header.m_imultiset.replace_node(next_it, *block_to_insert); Chris@101: } Chris@16: } Chris@16: Chris@16: //Now try to shortcut erasure + insertion (O(log(N))) with Chris@16: //a O(1) operation if merging does not alter tree positions Chris@101: const imultiset_iterator block_to_check_it = Imultiset::s_iterator_to(*block_to_insert); Chris@101: imultiset_const_iterator next_to_check_it(block_to_check_it), end_it(m_header.m_imultiset.end()); Chris@101: Chris@101: if(++next_to_check_it != end_it && block_to_insert->m_size > next_to_check_it->m_size){ Chris@101: //Block is bigger than next, so move it Chris@101: m_header.m_imultiset.erase(block_to_check_it); Chris@101: m_header.m_imultiset.insert(end_it, *block_to_insert); Chris@16: } Chris@101: else{ Chris@101: //Block size increment didn't violate tree invariants so there is nothing to fix Chris@16: } Chris@16: } Chris@16: else{ Chris@16: m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *block_to_insert); Chris@16: } Chris@16: priv_mark_as_free_block(block_to_insert); Chris@16: } Chris@16: Chris@101: #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED Chris@16: Chris@16: } //namespace interprocess { Chris@16: } //namespace boost { Chris@16: Chris@16: #include Chris@16: Chris@16: #endif //#ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP