Chris@16
|
1 //////////////////////////////////////////////////////////////////////////////
|
Chris@16
|
2 //
|
Chris@101
|
3 // (C) Copyright Ion Gaztanaga 2005-2013. Distributed under the Boost
|
Chris@16
|
4 // Software License, Version 1.0. (See accompanying file
|
Chris@16
|
5 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
Chris@16
|
6 //
|
Chris@16
|
7 // See http://www.boost.org/libs/container for documentation.
|
Chris@16
|
8 //
|
Chris@16
|
9 //////////////////////////////////////////////////////////////////////////////
|
Chris@16
|
10
|
Chris@16
|
11 #ifndef BOOST_CONTAINER_DETAIL_ADAPTIVE_NODE_POOL_IMPL_HPP
|
Chris@16
|
12 #define BOOST_CONTAINER_DETAIL_ADAPTIVE_NODE_POOL_IMPL_HPP
|
Chris@16
|
13
|
Chris@101
|
14 #ifndef BOOST_CONFIG_HPP
|
Chris@101
|
15 # include <boost/config.hpp>
|
Chris@101
|
16 #endif
|
Chris@101
|
17
|
Chris@101
|
18 #if defined(BOOST_HAS_PRAGMA_ONCE)
|
Chris@16
|
19 # pragma once
|
Chris@16
|
20 #endif
|
Chris@16
|
21
|
Chris@101
|
22 #include <boost/container/detail/config_begin.hpp>
|
Chris@101
|
23 #include <boost/container/detail/workaround.hpp>
|
Chris@101
|
24
|
Chris@101
|
25 // container
|
Chris@16
|
26 #include <boost/container/container_fwd.hpp>
|
Chris@101
|
27 #include <boost/container/throw_exception.hpp>
|
Chris@101
|
28 // container/detail
|
Chris@101
|
29 #include <boost/container/detail/pool_common.hpp>
|
Chris@101
|
30 #include <boost/container/detail/iterator.hpp>
|
Chris@101
|
31 #include <boost/container/detail/iterator_to_raw_pointer.hpp>
|
Chris@101
|
32 #include <boost/container/detail/math_functions.hpp>
|
Chris@101
|
33 #include <boost/container/detail/mpl.hpp>
|
Chris@101
|
34 #include <boost/container/detail/to_raw_pointer.hpp>
|
Chris@101
|
35 #include <boost/container/detail/type_traits.hpp>
|
Chris@101
|
36 // intrusive
|
Chris@16
|
37 #include <boost/intrusive/pointer_traits.hpp>
|
Chris@16
|
38 #include <boost/intrusive/set.hpp>
|
Chris@16
|
39 #include <boost/intrusive/list.hpp>
|
Chris@16
|
40 #include <boost/intrusive/slist.hpp>
|
Chris@101
|
41 // other
|
Chris@16
|
42 #include <boost/assert.hpp>
|
Chris@101
|
43 #include <boost/core/no_exceptions_support.hpp>
|
Chris@16
|
44 #include <cstddef>
|
Chris@16
|
45
|
Chris@16
|
46 namespace boost {
|
Chris@16
|
47 namespace container {
|
Chris@16
|
48
|
Chris@16
|
49 namespace adaptive_pool_flag {
|
Chris@16
|
50
|
Chris@16
|
51 static const unsigned int none = 0u;
|
Chris@16
|
52 static const unsigned int align_only = 1u << 0u;
|
Chris@16
|
53 static const unsigned int size_ordered = 1u << 1u;
|
Chris@16
|
54 static const unsigned int address_ordered = 1u << 2u;
|
Chris@16
|
55
|
Chris@16
|
56 } //namespace adaptive_pool_flag{
|
Chris@16
|
57
|
Chris@16
|
58 namespace container_detail {
|
Chris@16
|
59
|
Chris@16
|
60 template<class size_type>
|
Chris@16
|
61 struct hdr_offset_holder_t
|
Chris@16
|
62 {
|
Chris@16
|
63 hdr_offset_holder_t(size_type offset = 0)
|
Chris@16
|
64 : hdr_offset(offset)
|
Chris@16
|
65 {}
|
Chris@16
|
66 size_type hdr_offset;
|
Chris@16
|
67 };
|
Chris@16
|
68
|
Chris@16
|
69 template<class SizeType, unsigned int Flags>
|
Chris@16
|
70 struct less_func;
|
Chris@16
|
71
|
Chris@16
|
72 template<class SizeType>
|
Chris@16
|
73 struct less_func<SizeType, adaptive_pool_flag::none>
|
Chris@16
|
74 {
|
Chris@16
|
75 static bool less(SizeType, SizeType, const void *, const void *)
|
Chris@16
|
76 { return true; }
|
Chris@16
|
77 };
|
Chris@16
|
78
|
Chris@16
|
79 template<class SizeType>
|
Chris@16
|
80 struct less_func<SizeType, adaptive_pool_flag::size_ordered>
|
Chris@16
|
81 {
|
Chris@16
|
82 static bool less(SizeType ls, SizeType rs, const void *, const void *)
|
Chris@16
|
83 { return ls < rs; }
|
Chris@16
|
84 };
|
Chris@16
|
85
|
Chris@16
|
86 template<class SizeType>
|
Chris@16
|
87 struct less_func<SizeType, adaptive_pool_flag::address_ordered>
|
Chris@16
|
88 {
|
Chris@16
|
89 static bool less(SizeType, SizeType, const void *la, const void *ra)
|
Chris@16
|
90 { return &la < &ra; }
|
Chris@16
|
91 };
|
Chris@16
|
92
|
Chris@16
|
93 template<class SizeType>
|
Chris@16
|
94 struct less_func<SizeType, adaptive_pool_flag::size_ordered | adaptive_pool_flag::address_ordered>
|
Chris@16
|
95 {
|
Chris@16
|
96 static bool less(SizeType ls, SizeType rs, const void *la, const void *ra)
|
Chris@16
|
97 { return (ls < rs) || ((ls == rs) && (la < ra)); }
|
Chris@16
|
98 };
|
Chris@16
|
99
|
Chris@16
|
100 template<class VoidPointer, class SizeType, bool ordered>
|
Chris@16
|
101 struct block_container_traits
|
Chris@16
|
102 {
|
Chris@16
|
103 typedef typename bi::make_set_base_hook
|
Chris@16
|
104 < bi::void_pointer<VoidPointer>
|
Chris@16
|
105 , bi::optimize_size<true>
|
Chris@16
|
106 , bi::link_mode<bi::normal_link> >::type hook_t;
|
Chris@16
|
107
|
Chris@16
|
108 template<class T>
|
Chris@16
|
109 struct container
|
Chris@16
|
110 {
|
Chris@16
|
111 typedef typename bi::make_multiset
|
Chris@16
|
112 <T, bi::base_hook<hook_t>, bi::size_type<SizeType> >::type type;
|
Chris@16
|
113 };
|
Chris@16
|
114
|
Chris@16
|
115 template<class Container>
|
Chris@16
|
116 static void reinsert_was_used(Container &container, typename Container::reference v, bool)
|
Chris@16
|
117 {
|
Chris@16
|
118 typedef typename Container::const_iterator const_block_iterator;
|
Chris@16
|
119 const const_block_iterator this_block
|
Chris@16
|
120 (Container::s_iterator_to(const_cast<typename Container::const_reference>(v)));
|
Chris@16
|
121 const_block_iterator next_block(this_block);
|
Chris@16
|
122 if(++next_block != container.cend()){
|
Chris@16
|
123 if(this_block->free_nodes.size() > next_block->free_nodes.size()){
|
Chris@16
|
124 container.erase(this_block);
|
Chris@16
|
125 container.insert(v);
|
Chris@16
|
126 }
|
Chris@16
|
127 }
|
Chris@16
|
128 }
|
Chris@16
|
129
|
Chris@16
|
130 template<class Container>
|
Chris@16
|
131 static void insert_was_empty(Container &container, typename Container::value_type &v, bool)
|
Chris@16
|
132 {
|
Chris@16
|
133 container.insert(v);
|
Chris@16
|
134 }
|
Chris@16
|
135
|
Chris@16
|
136 template<class Container>
|
Chris@16
|
137 static void erase_first(Container &container)
|
Chris@16
|
138 {
|
Chris@16
|
139 container.erase(container.cbegin());
|
Chris@16
|
140 }
|
Chris@16
|
141
|
Chris@16
|
142 template<class Container>
|
Chris@16
|
143 static void erase_last(Container &container)
|
Chris@16
|
144 {
|
Chris@16
|
145 container.erase(--container.cend());
|
Chris@16
|
146 }
|
Chris@16
|
147 };
|
Chris@16
|
148
|
Chris@16
|
149 template<class VoidPointer, class SizeType>
|
Chris@16
|
150 struct block_container_traits<VoidPointer, SizeType, false>
|
Chris@16
|
151 {
|
Chris@16
|
152 typedef typename bi::make_list_base_hook
|
Chris@16
|
153 < bi::void_pointer<VoidPointer>
|
Chris@16
|
154 , bi::link_mode<bi::normal_link> >::type hook_t;
|
Chris@16
|
155
|
Chris@16
|
156 template<class T>
|
Chris@16
|
157 struct container
|
Chris@16
|
158 {
|
Chris@16
|
159 typedef typename bi::make_list
|
Chris@16
|
160 <T, bi::base_hook<hook_t>, bi::size_type<SizeType>, bi::constant_time_size<false> >::type type;
|
Chris@16
|
161 };
|
Chris@16
|
162
|
Chris@16
|
163 template<class Container>
|
Chris@16
|
164 static void reinsert_was_used(Container &container, typename Container::value_type &v, bool is_full)
|
Chris@16
|
165 {
|
Chris@16
|
166 if(is_full){
|
Chris@16
|
167 container.erase(Container::s_iterator_to(v));
|
Chris@16
|
168 container.push_back(v);
|
Chris@16
|
169 }
|
Chris@16
|
170 }
|
Chris@16
|
171
|
Chris@16
|
172 template<class Container>
|
Chris@16
|
173 static void insert_was_empty(Container &container, typename Container::value_type &v, bool is_full)
|
Chris@16
|
174 {
|
Chris@16
|
175 if(is_full){
|
Chris@16
|
176 container.push_back(v);
|
Chris@16
|
177 }
|
Chris@16
|
178 else{
|
Chris@16
|
179 container.push_front(v);
|
Chris@16
|
180 }
|
Chris@16
|
181 }
|
Chris@16
|
182
|
Chris@16
|
183 template<class Container>
|
Chris@16
|
184 static void erase_first(Container &container)
|
Chris@16
|
185 {
|
Chris@16
|
186 container.pop_front();
|
Chris@16
|
187 }
|
Chris@16
|
188
|
Chris@16
|
189 template<class Container>
|
Chris@16
|
190 static void erase_last(Container &container)
|
Chris@16
|
191 {
|
Chris@16
|
192 container.pop_back();
|
Chris@16
|
193 }
|
Chris@16
|
194 };
|
Chris@16
|
195
|
Chris@16
|
196 template<class MultiallocationChain, class VoidPointer, class SizeType, unsigned int Flags>
|
Chris@16
|
197 struct adaptive_pool_types
|
Chris@16
|
198 {
|
Chris@16
|
199 typedef VoidPointer void_pointer;
|
Chris@16
|
200 static const bool ordered = (Flags & (adaptive_pool_flag::size_ordered | adaptive_pool_flag::address_ordered)) != 0;
|
Chris@16
|
201 typedef block_container_traits<VoidPointer, SizeType, ordered> block_container_traits_t;
|
Chris@16
|
202 typedef typename block_container_traits_t::hook_t hook_t;
|
Chris@16
|
203 typedef hdr_offset_holder_t<SizeType> hdr_offset_holder;
|
Chris@16
|
204 static const unsigned int order_flags = Flags & (adaptive_pool_flag::size_ordered | adaptive_pool_flag::address_ordered);
|
Chris@16
|
205 typedef MultiallocationChain free_nodes_t;
|
Chris@16
|
206
|
Chris@16
|
207 struct block_info_t
|
Chris@16
|
208 : public hdr_offset_holder,
|
Chris@16
|
209 public hook_t
|
Chris@16
|
210 {
|
Chris@16
|
211 //An intrusive list of free node from this block
|
Chris@16
|
212 free_nodes_t free_nodes;
|
Chris@16
|
213 friend bool operator <(const block_info_t &l, const block_info_t &r)
|
Chris@16
|
214 {
|
Chris@16
|
215 return less_func<SizeType, order_flags>::
|
Chris@16
|
216 less(l.free_nodes.size(), r.free_nodes.size(), &l , &r);
|
Chris@16
|
217 }
|
Chris@16
|
218
|
Chris@16
|
219 friend bool operator ==(const block_info_t &l, const block_info_t &r)
|
Chris@16
|
220 { return &l == &r; }
|
Chris@16
|
221 };
|
Chris@16
|
222 typedef typename block_container_traits_t:: template container<block_info_t>::type block_container_t;
|
Chris@16
|
223 };
|
Chris@16
|
224
|
Chris@16
|
225 template<class size_type>
|
Chris@16
|
226 inline size_type calculate_alignment
|
Chris@16
|
227 ( size_type overhead_percent, size_type real_node_size
|
Chris@16
|
228 , size_type hdr_size, size_type hdr_offset_size, size_type payload_per_allocation)
|
Chris@16
|
229 {
|
Chris@16
|
230 //to-do: handle real_node_size != node_size
|
Chris@16
|
231 const size_type divisor = overhead_percent*real_node_size;
|
Chris@16
|
232 const size_type dividend = hdr_offset_size*100;
|
Chris@16
|
233 size_type elements_per_subblock = (dividend - 1)/divisor + 1;
|
Chris@16
|
234 size_type candidate_power_of_2 =
|
Chris@16
|
235 upper_power_of_2(elements_per_subblock*real_node_size + hdr_offset_size);
|
Chris@16
|
236 bool overhead_satisfied = false;
|
Chris@16
|
237 //Now calculate the wors-case overhead for a subblock
|
Chris@16
|
238 const size_type max_subblock_overhead = hdr_size + payload_per_allocation;
|
Chris@16
|
239 while(!overhead_satisfied){
|
Chris@16
|
240 elements_per_subblock = (candidate_power_of_2 - max_subblock_overhead)/real_node_size;
|
Chris@16
|
241 const size_type overhead_size = candidate_power_of_2 - elements_per_subblock*real_node_size;
|
Chris@16
|
242 if(overhead_size*100/candidate_power_of_2 < overhead_percent){
|
Chris@16
|
243 overhead_satisfied = true;
|
Chris@16
|
244 }
|
Chris@16
|
245 else{
|
Chris@16
|
246 candidate_power_of_2 <<= 1;
|
Chris@16
|
247 }
|
Chris@16
|
248 }
|
Chris@16
|
249 return candidate_power_of_2;
|
Chris@16
|
250 }
|
Chris@16
|
251
|
Chris@16
|
252 template<class size_type>
|
Chris@16
|
253 inline void calculate_num_subblocks
|
Chris@16
|
254 (size_type alignment, size_type real_node_size, size_type elements_per_block
|
Chris@16
|
255 , size_type &num_subblocks, size_type &real_num_node, size_type overhead_percent
|
Chris@16
|
256 , size_type hdr_size, size_type hdr_offset_size, size_type payload_per_allocation)
|
Chris@16
|
257 {
|
Chris@16
|
258 const size_type hdr_subblock_elements = (alignment - hdr_size - payload_per_allocation)/real_node_size;
|
Chris@16
|
259 size_type elements_per_subblock = (alignment - hdr_offset_size)/real_node_size;
|
Chris@16
|
260 size_type possible_num_subblock = (elements_per_block - 1)/elements_per_subblock + 1;
|
Chris@16
|
261 while(((possible_num_subblock-1)*elements_per_subblock + hdr_subblock_elements) < elements_per_block){
|
Chris@16
|
262 ++possible_num_subblock;
|
Chris@16
|
263 }
|
Chris@16
|
264 elements_per_subblock = (alignment - hdr_offset_size)/real_node_size;
|
Chris@16
|
265 bool overhead_satisfied = false;
|
Chris@16
|
266 while(!overhead_satisfied){
|
Chris@16
|
267 const size_type total_data = (elements_per_subblock*(possible_num_subblock-1) + hdr_subblock_elements)*real_node_size;
|
Chris@16
|
268 const size_type total_size = alignment*possible_num_subblock;
|
Chris@16
|
269 if((total_size - total_data)*100/total_size < overhead_percent){
|
Chris@16
|
270 overhead_satisfied = true;
|
Chris@16
|
271 }
|
Chris@16
|
272 else{
|
Chris@16
|
273 ++possible_num_subblock;
|
Chris@16
|
274 }
|
Chris@16
|
275 }
|
Chris@16
|
276 num_subblocks = possible_num_subblock;
|
Chris@16
|
277 real_num_node = (possible_num_subblock-1)*elements_per_subblock + hdr_subblock_elements;
|
Chris@16
|
278 }
|
Chris@16
|
279
|
Chris@16
|
280 template<class SegmentManagerBase, unsigned int Flags>
|
Chris@16
|
281 class private_adaptive_node_pool_impl
|
Chris@16
|
282 {
|
Chris@16
|
283 //Non-copyable
|
Chris@16
|
284 private_adaptive_node_pool_impl();
|
Chris@16
|
285 private_adaptive_node_pool_impl(const private_adaptive_node_pool_impl &);
|
Chris@16
|
286 private_adaptive_node_pool_impl &operator=(const private_adaptive_node_pool_impl &);
|
Chris@16
|
287 typedef private_adaptive_node_pool_impl this_type;
|
Chris@16
|
288
|
Chris@16
|
289 typedef typename SegmentManagerBase::void_pointer void_pointer;
|
Chris@16
|
290 static const typename SegmentManagerBase::
|
Chris@16
|
291 size_type PayloadPerAllocation = SegmentManagerBase::PayloadPerAllocation;
|
Chris@16
|
292 //Flags
|
Chris@16
|
293 //align_only
|
Chris@16
|
294 static const bool AlignOnly = (Flags & adaptive_pool_flag::align_only) != 0;
|
Chris@16
|
295 typedef bool_<AlignOnly> IsAlignOnly;
|
Chris@16
|
296 typedef true_ AlignOnlyTrue;
|
Chris@16
|
297 typedef false_ AlignOnlyFalse;
|
Chris@16
|
298 //size_ordered
|
Chris@16
|
299 static const bool SizeOrdered = (Flags & adaptive_pool_flag::size_ordered) != 0;
|
Chris@16
|
300 typedef bool_<SizeOrdered> IsSizeOrdered;
|
Chris@16
|
301 typedef true_ SizeOrderedTrue;
|
Chris@16
|
302 typedef false_ SizeOrderedFalse;
|
Chris@16
|
303 //address_ordered
|
Chris@16
|
304 static const bool AddressOrdered = (Flags & adaptive_pool_flag::address_ordered) != 0;
|
Chris@16
|
305 typedef bool_<AddressOrdered> IsAddressOrdered;
|
Chris@16
|
306 typedef true_ AddressOrderedTrue;
|
Chris@16
|
307 typedef false_ AddressOrderedFalse;
|
Chris@16
|
308
|
Chris@16
|
309 public:
|
Chris@16
|
310 typedef typename SegmentManagerBase::multiallocation_chain multiallocation_chain;
|
Chris@16
|
311 typedef typename SegmentManagerBase::size_type size_type;
|
Chris@16
|
312
|
Chris@16
|
313 private:
|
Chris@16
|
314 typedef adaptive_pool_types
|
Chris@16
|
315 <multiallocation_chain, void_pointer, size_type, Flags> adaptive_pool_types_t;
|
Chris@16
|
316 typedef typename adaptive_pool_types_t::free_nodes_t free_nodes_t;
|
Chris@16
|
317 typedef typename adaptive_pool_types_t::block_info_t block_info_t;
|
Chris@16
|
318 typedef typename adaptive_pool_types_t::block_container_t block_container_t;
|
Chris@16
|
319 typedef typename adaptive_pool_types_t::block_container_traits_t block_container_traits_t;
|
Chris@16
|
320 typedef typename block_container_t::iterator block_iterator;
|
Chris@16
|
321 typedef typename block_container_t::const_iterator const_block_iterator;
|
Chris@16
|
322 typedef typename adaptive_pool_types_t::hdr_offset_holder hdr_offset_holder;
|
Chris@16
|
323
|
Chris@16
|
324 static const size_type MaxAlign = alignment_of<void_pointer>::value;
|
Chris@16
|
325 static const size_type HdrSize = ((sizeof(block_info_t)-1)/MaxAlign+1)*MaxAlign;
|
Chris@16
|
326 static const size_type HdrOffsetSize = ((sizeof(hdr_offset_holder)-1)/MaxAlign+1)*MaxAlign;
|
Chris@16
|
327
|
Chris@16
|
328 public:
|
Chris@16
|
329 //!Segment manager typedef
|
Chris@16
|
330 typedef SegmentManagerBase segment_manager_base_type;
|
Chris@16
|
331
|
Chris@16
|
332 //!Constructor from a segment manager. Never throws
|
Chris@16
|
333 private_adaptive_node_pool_impl
|
Chris@16
|
334 ( segment_manager_base_type *segment_mngr_base
|
Chris@16
|
335 , size_type node_size
|
Chris@16
|
336 , size_type nodes_per_block
|
Chris@16
|
337 , size_type max_free_blocks
|
Chris@16
|
338 , unsigned char overhead_percent
|
Chris@16
|
339 )
|
Chris@16
|
340 : m_max_free_blocks(max_free_blocks)
|
Chris@16
|
341 , m_real_node_size(lcm(node_size, size_type(alignment_of<void_pointer>::value)))
|
Chris@16
|
342 //Round the size to a power of two value.
|
Chris@16
|
343 //This is the total memory size (including payload) that we want to
|
Chris@16
|
344 //allocate from the general-purpose allocator
|
Chris@16
|
345 , m_real_block_alignment
|
Chris@16
|
346 (AlignOnly ?
|
Chris@16
|
347 upper_power_of_2(HdrSize + m_real_node_size*nodes_per_block) :
|
Chris@16
|
348 calculate_alignment( (size_type)overhead_percent, m_real_node_size
|
Chris@16
|
349 , HdrSize, HdrOffsetSize, PayloadPerAllocation))
|
Chris@16
|
350 //This is the real number of nodes per block
|
Chris@16
|
351 , m_num_subblocks(0)
|
Chris@16
|
352 , m_real_num_node(AlignOnly ? (m_real_block_alignment - PayloadPerAllocation - HdrSize)/m_real_node_size : 0)
|
Chris@16
|
353 //General purpose allocator
|
Chris@16
|
354 , mp_segment_mngr_base(segment_mngr_base)
|
Chris@16
|
355 , m_block_container()
|
Chris@16
|
356 , m_totally_free_blocks(0)
|
Chris@16
|
357 {
|
Chris@16
|
358 if(!AlignOnly){
|
Chris@16
|
359 calculate_num_subblocks
|
Chris@16
|
360 ( m_real_block_alignment
|
Chris@16
|
361 , m_real_node_size
|
Chris@16
|
362 , nodes_per_block
|
Chris@16
|
363 , m_num_subblocks
|
Chris@16
|
364 , m_real_num_node
|
Chris@16
|
365 , (size_type)overhead_percent
|
Chris@16
|
366 , HdrSize
|
Chris@16
|
367 , HdrOffsetSize
|
Chris@16
|
368 , PayloadPerAllocation);
|
Chris@16
|
369 }
|
Chris@16
|
370 }
|
Chris@16
|
371
|
Chris@16
|
372 //!Destructor. Deallocates all allocated blocks. Never throws
|
Chris@16
|
373 ~private_adaptive_node_pool_impl()
|
Chris@16
|
374 { this->priv_clear(); }
|
Chris@16
|
375
|
Chris@16
|
376 size_type get_real_num_node() const
|
Chris@16
|
377 { return m_real_num_node; }
|
Chris@16
|
378
|
Chris@16
|
379 //!Returns the segment manager. Never throws
|
Chris@16
|
380 segment_manager_base_type* get_segment_manager_base()const
|
Chris@16
|
381 { return container_detail::to_raw_pointer(mp_segment_mngr_base); }
|
Chris@16
|
382
|
Chris@16
|
383 //!Allocates array of count elements. Can throw
|
Chris@16
|
384 void *allocate_node()
|
Chris@16
|
385 {
|
Chris@16
|
386 this->priv_invariants();
|
Chris@16
|
387 //If there are no free nodes we allocate a new block
|
Chris@16
|
388 if(!m_block_container.empty()){
|
Chris@16
|
389 //We take the first free node the multiset can't be empty
|
Chris@16
|
390 free_nodes_t &free_nodes = m_block_container.begin()->free_nodes;
|
Chris@16
|
391 BOOST_ASSERT(!free_nodes.empty());
|
Chris@16
|
392 const size_type free_nodes_count = free_nodes.size();
|
Chris@16
|
393 void *first_node = container_detail::to_raw_pointer(free_nodes.pop_front());
|
Chris@16
|
394 if(free_nodes.empty()){
|
Chris@16
|
395 block_container_traits_t::erase_first(m_block_container);
|
Chris@16
|
396 }
|
Chris@16
|
397 m_totally_free_blocks -= static_cast<size_type>(free_nodes_count == m_real_num_node);
|
Chris@16
|
398 this->priv_invariants();
|
Chris@16
|
399 return first_node;
|
Chris@16
|
400 }
|
Chris@16
|
401 else{
|
Chris@16
|
402 multiallocation_chain chain;
|
Chris@16
|
403 this->priv_append_from_new_blocks(1, chain, IsAlignOnly());
|
Chris@16
|
404 return container_detail::to_raw_pointer(chain.pop_front());
|
Chris@16
|
405 }
|
Chris@16
|
406 }
|
Chris@16
|
407
|
Chris@16
|
408 //!Deallocates an array pointed by ptr. Never throws
|
Chris@16
|
409 void deallocate_node(void *pElem)
|
Chris@16
|
410 {
|
Chris@16
|
411 this->priv_invariants();
|
Chris@16
|
412 block_info_t &block_info = *this->priv_block_from_node(pElem);
|
Chris@16
|
413 BOOST_ASSERT(block_info.free_nodes.size() < m_real_num_node);
|
Chris@16
|
414
|
Chris@16
|
415 //We put the node at the beginning of the free node list
|
Chris@16
|
416 block_info.free_nodes.push_back(void_pointer(pElem));
|
Chris@16
|
417
|
Chris@16
|
418 //The loop reinserts all blocks except the last one
|
Chris@16
|
419 this->priv_reinsert_block(block_info, block_info.free_nodes.size() == 1);
|
Chris@16
|
420 this->priv_deallocate_free_blocks(m_max_free_blocks);
|
Chris@16
|
421 this->priv_invariants();
|
Chris@16
|
422 }
|
Chris@16
|
423
|
Chris@16
|
424 //!Allocates n nodes.
|
Chris@16
|
425 //!Can throw
|
Chris@16
|
426 void allocate_nodes(const size_type n, multiallocation_chain &chain)
|
Chris@16
|
427 {
|
Chris@16
|
428 size_type i = 0;
|
Chris@16
|
429 BOOST_TRY{
|
Chris@16
|
430 this->priv_invariants();
|
Chris@16
|
431 while(i != n){
|
Chris@16
|
432 //If there are no free nodes we allocate all needed blocks
|
Chris@16
|
433 if (m_block_container.empty()){
|
Chris@16
|
434 this->priv_append_from_new_blocks(n - i, chain, IsAlignOnly());
|
Chris@16
|
435 BOOST_ASSERT(m_block_container.empty() || (++m_block_container.cbegin() == m_block_container.cend()));
|
Chris@16
|
436 BOOST_ASSERT(chain.size() == n);
|
Chris@16
|
437 break;
|
Chris@16
|
438 }
|
Chris@16
|
439 free_nodes_t &free_nodes = m_block_container.begin()->free_nodes;
|
Chris@16
|
440 const size_type free_nodes_count_before = free_nodes.size();
|
Chris@16
|
441 m_totally_free_blocks -= static_cast<size_type>(free_nodes_count_before == m_real_num_node);
|
Chris@16
|
442 const size_type num_left = n-i;
|
Chris@16
|
443 const size_type num_elems = (num_left < free_nodes_count_before) ? num_left : free_nodes_count_before;
|
Chris@16
|
444 typedef typename free_nodes_t::iterator free_nodes_iterator;
|
Chris@16
|
445
|
Chris@16
|
446 if(num_left < free_nodes_count_before){
|
Chris@16
|
447 const free_nodes_iterator it_bbeg(free_nodes.before_begin());
|
Chris@16
|
448 free_nodes_iterator it_bend(it_bbeg);
|
Chris@16
|
449 for(size_type j = 0; j != num_elems; ++j){
|
Chris@16
|
450 ++it_bend;
|
Chris@16
|
451 }
|
Chris@16
|
452 free_nodes_iterator it_end = it_bend; ++it_end;
|
Chris@16
|
453 free_nodes_iterator it_beg = it_bbeg; ++it_beg;
|
Chris@16
|
454 free_nodes.erase_after(it_bbeg, it_end, num_elems);
|
Chris@16
|
455 chain.incorporate_after(chain.last(), &*it_beg, &*it_bend, num_elems);
|
Chris@16
|
456 //chain.splice_after(chain.last(), free_nodes, it_bbeg, it_bend, num_elems);
|
Chris@16
|
457 BOOST_ASSERT(!free_nodes.empty());
|
Chris@16
|
458 }
|
Chris@16
|
459 else{
|
Chris@16
|
460 const free_nodes_iterator it_beg(free_nodes.begin()), it_bend(free_nodes.last());
|
Chris@16
|
461 free_nodes.clear();
|
Chris@16
|
462 chain.incorporate_after(chain.last(), &*it_beg, &*it_bend, num_elems);
|
Chris@16
|
463 block_container_traits_t::erase_first(m_block_container);
|
Chris@16
|
464 }
|
Chris@16
|
465 i += num_elems;
|
Chris@16
|
466 }
|
Chris@16
|
467 }
|
Chris@16
|
468 BOOST_CATCH(...){
|
Chris@16
|
469 this->deallocate_nodes(chain);
|
Chris@16
|
470 BOOST_RETHROW
|
Chris@16
|
471 }
|
Chris@16
|
472 BOOST_CATCH_END
|
Chris@16
|
473 this->priv_invariants();
|
Chris@16
|
474 }
|
Chris@16
|
475
|
Chris@16
|
476 //!Deallocates a linked list of nodes. Never throws
|
Chris@16
|
477 void deallocate_nodes(multiallocation_chain &nodes)
|
Chris@16
|
478 {
|
Chris@16
|
479 this->priv_invariants();
|
Chris@16
|
480 //To take advantage of node locality, wait until two
|
Chris@16
|
481 //nodes belong to different blocks. Only then reinsert
|
Chris@16
|
482 //the block of the first node in the block tree.
|
Chris@16
|
483 //Cache of the previous block
|
Chris@16
|
484 block_info_t *prev_block_info = 0;
|
Chris@16
|
485
|
Chris@16
|
486 //If block was empty before this call, it's not already
|
Chris@16
|
487 //inserted in the block tree.
|
Chris@16
|
488 bool prev_block_was_empty = false;
|
Chris@16
|
489 typedef typename free_nodes_t::iterator free_nodes_iterator;
|
Chris@16
|
490 {
|
Chris@16
|
491 const free_nodes_iterator itbb(nodes.before_begin()), ite(nodes.end());
|
Chris@16
|
492 free_nodes_iterator itf(nodes.begin()), itbf(itbb);
|
Chris@16
|
493 size_type splice_node_count = size_type(-1);
|
Chris@16
|
494 while(itf != ite){
|
Chris@101
|
495 void *pElem = container_detail::to_raw_pointer(container_detail::iterator_to_raw_pointer(itf));
|
Chris@16
|
496 block_info_t &block_info = *this->priv_block_from_node(pElem);
|
Chris@16
|
497 BOOST_ASSERT(block_info.free_nodes.size() < m_real_num_node);
|
Chris@16
|
498 ++splice_node_count;
|
Chris@16
|
499
|
Chris@16
|
500 //If block change is detected calculate the cached block position in the tree
|
Chris@16
|
501 if(&block_info != prev_block_info){
|
Chris@16
|
502 if(prev_block_info){ //Make sure we skip the initial "dummy" cache
|
Chris@16
|
503 free_nodes_iterator it(itbb); ++it;
|
Chris@16
|
504 nodes.erase_after(itbb, itf, splice_node_count);
|
Chris@16
|
505 prev_block_info->free_nodes.incorporate_after(prev_block_info->free_nodes.last(), &*it, &*itbf, splice_node_count);
|
Chris@16
|
506 this->priv_reinsert_block(*prev_block_info, prev_block_was_empty);
|
Chris@16
|
507 splice_node_count = 0;
|
Chris@16
|
508 }
|
Chris@16
|
509 //Update cache with new data
|
Chris@16
|
510 prev_block_was_empty = block_info.free_nodes.empty();
|
Chris@16
|
511 prev_block_info = &block_info;
|
Chris@16
|
512 }
|
Chris@16
|
513 itbf = itf;
|
Chris@16
|
514 ++itf;
|
Chris@16
|
515 }
|
Chris@16
|
516 }
|
Chris@16
|
517 if(prev_block_info){
|
Chris@16
|
518 //The loop reinserts all blocks except the last one
|
Chris@16
|
519 const free_nodes_iterator itfirst(nodes.begin()), itlast(nodes.last());
|
Chris@16
|
520 const size_type splice_node_count = nodes.size();
|
Chris@16
|
521 nodes.clear();
|
Chris@16
|
522 prev_block_info->free_nodes.incorporate_after(prev_block_info->free_nodes.last(), &*itfirst, &*itlast, splice_node_count);
|
Chris@16
|
523 this->priv_reinsert_block(*prev_block_info, prev_block_was_empty);
|
Chris@16
|
524 this->priv_invariants();
|
Chris@16
|
525 this->priv_deallocate_free_blocks(m_max_free_blocks);
|
Chris@16
|
526 }
|
Chris@16
|
527 }
|
Chris@16
|
528
|
Chris@16
|
529 void deallocate_free_blocks()
|
Chris@16
|
530 { this->priv_deallocate_free_blocks(0); }
|
Chris@16
|
531
|
Chris@16
|
532 size_type num_free_nodes()
|
Chris@16
|
533 {
|
Chris@16
|
534 typedef typename block_container_t::const_iterator citerator;
|
Chris@16
|
535 size_type count = 0;
|
Chris@16
|
536 citerator it (m_block_container.begin()), itend(m_block_container.end());
|
Chris@16
|
537 for(; it != itend; ++it){
|
Chris@16
|
538 count += it->free_nodes.size();
|
Chris@16
|
539 }
|
Chris@16
|
540 return count;
|
Chris@16
|
541 }
|
Chris@16
|
542
|
Chris@16
|
543 void swap(private_adaptive_node_pool_impl &other)
|
Chris@16
|
544 {
|
Chris@16
|
545 BOOST_ASSERT(m_max_free_blocks == other.m_max_free_blocks);
|
Chris@16
|
546 BOOST_ASSERT(m_real_node_size == other.m_real_node_size);
|
Chris@16
|
547 BOOST_ASSERT(m_real_block_alignment == other.m_real_block_alignment);
|
Chris@16
|
548 BOOST_ASSERT(m_real_num_node == other.m_real_num_node);
|
Chris@16
|
549 std::swap(mp_segment_mngr_base, other.mp_segment_mngr_base);
|
Chris@16
|
550 std::swap(m_totally_free_blocks, other.m_totally_free_blocks);
|
Chris@16
|
551 m_block_container.swap(other.m_block_container);
|
Chris@16
|
552 }
|
Chris@16
|
553
|
Chris@16
|
554 //Deprecated, use deallocate_free_blocks
|
Chris@16
|
555 void deallocate_free_chunks()
|
Chris@16
|
556 { this->priv_deallocate_free_blocks(0); }
|
Chris@16
|
557
|
Chris@16
|
558 private:
|
Chris@16
|
559
|
Chris@16
|
560 void priv_deallocate_free_blocks(size_type max_free_blocks)
|
Chris@16
|
561 { //Trampoline function to ease inlining
|
Chris@16
|
562 if(m_totally_free_blocks > max_free_blocks){
|
Chris@16
|
563 this->priv_deallocate_free_blocks_impl(max_free_blocks);
|
Chris@16
|
564 }
|
Chris@16
|
565 }
|
Chris@16
|
566
|
Chris@16
|
567 void priv_deallocate_free_blocks_impl(size_type max_free_blocks)
|
Chris@16
|
568 {
|
Chris@16
|
569 this->priv_invariants();
|
Chris@16
|
570 //Now check if we've reached the free nodes limit
|
Chris@16
|
571 //and check if we have free blocks. If so, deallocate as much
|
Chris@16
|
572 //as we can to stay below the limit
|
Chris@16
|
573 multiallocation_chain chain;
|
Chris@16
|
574 {
|
Chris@16
|
575 const const_block_iterator itend = m_block_container.cend();
|
Chris@16
|
576 const_block_iterator it = itend;
|
Chris@16
|
577 --it;
|
Chris@16
|
578 size_type totally_free_blocks = m_totally_free_blocks;
|
Chris@16
|
579
|
Chris@16
|
580 for( ; totally_free_blocks > max_free_blocks; --totally_free_blocks){
|
Chris@16
|
581 BOOST_ASSERT(it->free_nodes.size() == m_real_num_node);
|
Chris@16
|
582 void *addr = priv_first_subblock_from_block(const_cast<block_info_t*>(&*it));
|
Chris@16
|
583 --it;
|
Chris@16
|
584 block_container_traits_t::erase_last(m_block_container);
|
Chris@16
|
585 chain.push_front(void_pointer(addr));
|
Chris@16
|
586 }
|
Chris@16
|
587 BOOST_ASSERT((m_totally_free_blocks - max_free_blocks) == chain.size());
|
Chris@16
|
588 m_totally_free_blocks = max_free_blocks;
|
Chris@16
|
589 }
|
Chris@16
|
590 this->mp_segment_mngr_base->deallocate_many(chain);
|
Chris@16
|
591 }
|
Chris@16
|
592
|
Chris@16
|
593 void priv_reinsert_block(block_info_t &prev_block_info, const bool prev_block_was_empty)
|
Chris@16
|
594 {
|
Chris@16
|
595 //Cache the free nodes from the block
|
Chris@16
|
596 const size_type this_block_free_nodes = prev_block_info.free_nodes.size();
|
Chris@16
|
597 const bool is_full = this_block_free_nodes == m_real_num_node;
|
Chris@16
|
598
|
Chris@16
|
599 //Update free block count
|
Chris@16
|
600 m_totally_free_blocks += static_cast<size_type>(is_full);
|
Chris@16
|
601 if(prev_block_was_empty){
|
Chris@16
|
602 block_container_traits_t::insert_was_empty(m_block_container, prev_block_info, is_full);
|
Chris@16
|
603 }
|
Chris@16
|
604 else{
|
Chris@16
|
605 block_container_traits_t::reinsert_was_used(m_block_container, prev_block_info, is_full);
|
Chris@16
|
606 }
|
Chris@16
|
607 }
|
Chris@16
|
608
|
Chris@16
|
609 class block_destroyer;
|
Chris@16
|
610 friend class block_destroyer;
|
Chris@16
|
611
|
Chris@16
|
612 class block_destroyer
|
Chris@16
|
613 {
|
Chris@16
|
614 public:
|
Chris@16
|
615 block_destroyer(const this_type *impl, multiallocation_chain &chain)
|
Chris@16
|
616 : mp_impl(impl), m_chain(chain)
|
Chris@16
|
617 {}
|
Chris@16
|
618
|
Chris@16
|
619 void operator()(typename block_container_t::pointer to_deallocate)
|
Chris@16
|
620 { return this->do_destroy(to_deallocate, IsAlignOnly()); }
|
Chris@16
|
621
|
Chris@16
|
622 private:
|
Chris@16
|
623 void do_destroy(typename block_container_t::pointer to_deallocate, AlignOnlyTrue)
|
Chris@16
|
624 {
|
Chris@16
|
625 BOOST_ASSERT(to_deallocate->free_nodes.size() == mp_impl->m_real_num_node);
|
Chris@16
|
626 m_chain.push_back(to_deallocate);
|
Chris@16
|
627 }
|
Chris@16
|
628
|
Chris@16
|
629 void do_destroy(typename block_container_t::pointer to_deallocate, AlignOnlyFalse)
|
Chris@16
|
630 {
|
Chris@16
|
631 BOOST_ASSERT(to_deallocate->free_nodes.size() == mp_impl->m_real_num_node);
|
Chris@16
|
632 BOOST_ASSERT(0 == to_deallocate->hdr_offset);
|
Chris@16
|
633 hdr_offset_holder *hdr_off_holder =
|
Chris@16
|
634 mp_impl->priv_first_subblock_from_block(container_detail::to_raw_pointer(to_deallocate));
|
Chris@16
|
635 m_chain.push_back(hdr_off_holder);
|
Chris@16
|
636 }
|
Chris@16
|
637
|
Chris@16
|
638 const this_type *mp_impl;
|
Chris@16
|
639 multiallocation_chain &m_chain;
|
Chris@16
|
640 };
|
Chris@16
|
641
|
Chris@16
|
642 //This macro will activate invariant checking. Slow, but helpful for debugging the code.
|
Chris@16
|
643 //#define BOOST_CONTAINER_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS
|
Chris@16
|
644 void priv_invariants()
|
Chris@16
|
645 #ifdef BOOST_CONTAINER_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS
|
Chris@16
|
646 #undef BOOST_CONTAINER_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS
|
Chris@16
|
647 {
|
Chris@16
|
648 const const_block_iterator itend(m_block_container.end());
|
Chris@16
|
649
|
Chris@16
|
650 { //We iterate through the block tree to free the memory
|
Chris@16
|
651 const_block_iterator it(m_block_container.begin());
|
Chris@101
|
652
|
Chris@16
|
653 if(it != itend){
|
Chris@16
|
654 for(++it; it != itend; ++it){
|
Chris@16
|
655 const_block_iterator prev(it);
|
Chris@16
|
656 --prev;
|
Chris@16
|
657 BOOST_ASSERT(*prev < *it);
|
Chris@16
|
658 (void)prev; (void)it;
|
Chris@16
|
659 }
|
Chris@16
|
660 }
|
Chris@16
|
661 }
|
Chris@16
|
662 { //Check that the total free nodes are correct
|
Chris@16
|
663 const_block_iterator it(m_block_container.cbegin());
|
Chris@16
|
664 size_type total_free_nodes = 0;
|
Chris@16
|
665 for(; it != itend; ++it){
|
Chris@16
|
666 total_free_nodes += it->free_nodes.size();
|
Chris@16
|
667 }
|
Chris@16
|
668 BOOST_ASSERT(total_free_nodes >= m_totally_free_blocks*m_real_num_node);
|
Chris@16
|
669 }
|
Chris@16
|
670 { //Check that the total totally free blocks are correct
|
Chris@16
|
671 BOOST_ASSERT(m_block_container.size() >= m_totally_free_blocks);
|
Chris@16
|
672 const_block_iterator it = m_block_container.cend();
|
Chris@16
|
673 size_type total_free_blocks = m_totally_free_blocks;
|
Chris@16
|
674 while(total_free_blocks--){
|
Chris@16
|
675 BOOST_ASSERT((--it)->free_nodes.size() == m_real_num_node);
|
Chris@16
|
676 }
|
Chris@16
|
677 }
|
Chris@16
|
678
|
Chris@16
|
679 if(!AlignOnly){
|
Chris@16
|
680 //Check that header offsets are correct
|
Chris@16
|
681 const_block_iterator it = m_block_container.begin();
|
Chris@16
|
682 for(; it != itend; ++it){
|
Chris@16
|
683 hdr_offset_holder *hdr_off_holder = this->priv_first_subblock_from_block(const_cast<block_info_t *>(&*it));
|
Chris@16
|
684 for(size_type i = 0, max = m_num_subblocks; i < max; ++i){
|
Chris@16
|
685 const size_type offset = reinterpret_cast<char*>(const_cast<block_info_t *>(&*it)) - reinterpret_cast<char*>(hdr_off_holder);
|
Chris@16
|
686 BOOST_ASSERT(hdr_off_holder->hdr_offset == offset);
|
Chris@16
|
687 BOOST_ASSERT(0 == ((size_type)hdr_off_holder & (m_real_block_alignment - 1)));
|
Chris@16
|
688 BOOST_ASSERT(0 == (hdr_off_holder->hdr_offset & (m_real_block_alignment - 1)));
|
Chris@16
|
689 hdr_off_holder = reinterpret_cast<hdr_offset_holder *>(reinterpret_cast<char*>(hdr_off_holder) + m_real_block_alignment);
|
Chris@16
|
690 }
|
Chris@16
|
691 }
|
Chris@16
|
692 }
|
Chris@16
|
693 }
|
Chris@16
|
694 #else
|
Chris@16
|
695 {} //empty
|
Chris@16
|
696 #endif
|
Chris@16
|
697
|
Chris@16
|
698 //!Deallocates all used memory. Never throws
|
Chris@16
|
699 void priv_clear()
|
Chris@16
|
700 {
|
Chris@16
|
701 #ifndef NDEBUG
|
Chris@16
|
702 block_iterator it = m_block_container.begin();
|
Chris@16
|
703 block_iterator itend = m_block_container.end();
|
Chris@16
|
704 size_type n_free_nodes = 0;
|
Chris@16
|
705 for(; it != itend; ++it){
|
Chris@16
|
706 //Check for memory leak
|
Chris@16
|
707 BOOST_ASSERT(it->free_nodes.size() == m_real_num_node);
|
Chris@16
|
708 ++n_free_nodes;
|
Chris@16
|
709 }
|
Chris@16
|
710 BOOST_ASSERT(n_free_nodes == m_totally_free_blocks);
|
Chris@16
|
711 #endif
|
Chris@16
|
712 //Check for memory leaks
|
Chris@16
|
713 this->priv_invariants();
|
Chris@16
|
714 multiallocation_chain chain;
|
Chris@16
|
715 m_block_container.clear_and_dispose(block_destroyer(this, chain));
|
Chris@16
|
716 this->mp_segment_mngr_base->deallocate_many(chain);
|
Chris@16
|
717 m_totally_free_blocks = 0;
|
Chris@16
|
718 }
|
Chris@16
|
719
|
Chris@16
|
720 block_info_t *priv_block_from_node(void *node, AlignOnlyFalse) const
|
Chris@16
|
721 {
|
Chris@16
|
722 hdr_offset_holder *hdr_off_holder =
|
Chris@16
|
723 reinterpret_cast<hdr_offset_holder*>((std::size_t)node & size_type(~(m_real_block_alignment - 1)));
|
Chris@16
|
724 BOOST_ASSERT(0 == ((std::size_t)hdr_off_holder & (m_real_block_alignment - 1)));
|
Chris@16
|
725 BOOST_ASSERT(0 == (hdr_off_holder->hdr_offset & (m_real_block_alignment - 1)));
|
Chris@16
|
726 block_info_t *block = reinterpret_cast<block_info_t *>
|
Chris@16
|
727 (reinterpret_cast<char*>(hdr_off_holder) + hdr_off_holder->hdr_offset);
|
Chris@16
|
728 BOOST_ASSERT(block->hdr_offset == 0);
|
Chris@16
|
729 return block;
|
Chris@16
|
730 }
|
Chris@16
|
731
|
Chris@16
|
732 block_info_t *priv_block_from_node(void *node, AlignOnlyTrue) const
|
Chris@16
|
733 {
|
Chris@16
|
734 return (block_info_t *)((std::size_t)node & std::size_t(~(m_real_block_alignment - 1)));
|
Chris@16
|
735 }
|
Chris@16
|
736
|
Chris@16
|
737 block_info_t *priv_block_from_node(void *node) const
|
Chris@16
|
738 { return this->priv_block_from_node(node, IsAlignOnly()); }
|
Chris@16
|
739
|
Chris@16
|
740 hdr_offset_holder *priv_first_subblock_from_block(block_info_t *block) const
|
Chris@16
|
741 { return this->priv_first_subblock_from_block(block, IsAlignOnly()); }
|
Chris@16
|
742
|
Chris@16
|
743 hdr_offset_holder *priv_first_subblock_from_block(block_info_t *block, AlignOnlyFalse) const
|
Chris@16
|
744 {
|
Chris@16
|
745 hdr_offset_holder *const hdr_off_holder = reinterpret_cast<hdr_offset_holder*>
|
Chris@16
|
746 (reinterpret_cast<char*>(block) - (m_num_subblocks-1)*m_real_block_alignment);
|
Chris@16
|
747 BOOST_ASSERT(hdr_off_holder->hdr_offset == size_type(reinterpret_cast<char*>(block) - reinterpret_cast<char*>(hdr_off_holder)));
|
Chris@16
|
748 BOOST_ASSERT(0 == ((std::size_t)hdr_off_holder & (m_real_block_alignment - 1)));
|
Chris@16
|
749 BOOST_ASSERT(0 == (hdr_off_holder->hdr_offset & (m_real_block_alignment - 1)));
|
Chris@16
|
750 return hdr_off_holder;
|
Chris@16
|
751 }
|
Chris@16
|
752
|
Chris@16
|
753 hdr_offset_holder *priv_first_subblock_from_block(block_info_t *block, AlignOnlyTrue) const
|
Chris@16
|
754 {
|
Chris@16
|
755 return reinterpret_cast<hdr_offset_holder*>(block);
|
Chris@16
|
756 }
|
Chris@16
|
757
|
Chris@16
|
758 void priv_dispatch_block_chain_or_free
|
Chris@16
|
759 ( multiallocation_chain &chain, block_info_t &c_info, size_type num_node
|
Chris@16
|
760 , char *mem_address, size_type total_elements, bool insert_block_if_free)
|
Chris@16
|
761 {
|
Chris@16
|
762 BOOST_ASSERT(chain.size() <= total_elements);
|
Chris@16
|
763 //First add all possible nodes to the chain
|
Chris@16
|
764 const size_type left = total_elements - chain.size();
|
Chris@16
|
765 const size_type max_chain = (num_node < left) ? num_node : left;
|
Chris@16
|
766 mem_address = static_cast<char *>(container_detail::to_raw_pointer
|
Chris@16
|
767 (chain.incorporate_after(chain.last(), void_pointer(mem_address), m_real_node_size, max_chain)));
|
Chris@16
|
768 //Now store remaining nodes in the free list
|
Chris@16
|
769 if(const size_type max_free = num_node - max_chain){
|
Chris@16
|
770 free_nodes_t & free_nodes = c_info.free_nodes;
|
Chris@16
|
771 free_nodes.incorporate_after(free_nodes.last(), void_pointer(mem_address), m_real_node_size, max_free);
|
Chris@16
|
772 if(insert_block_if_free){
|
Chris@16
|
773 m_block_container.push_front(c_info);
|
Chris@16
|
774 }
|
Chris@16
|
775 }
|
Chris@16
|
776 }
|
Chris@16
|
777
|
Chris@16
|
778 //!Allocates a several blocks of nodes. Can throw
|
Chris@16
|
779 void priv_append_from_new_blocks(size_type min_elements, multiallocation_chain &chain, AlignOnlyTrue)
|
Chris@16
|
780 {
|
Chris@16
|
781 BOOST_ASSERT(m_block_container.empty());
|
Chris@16
|
782 BOOST_ASSERT(min_elements > 0);
|
Chris@16
|
783 const size_type n = (min_elements - 1)/m_real_num_node + 1;
|
Chris@16
|
784 const size_type real_block_size = m_real_block_alignment - PayloadPerAllocation;
|
Chris@16
|
785 const size_type total_elements = chain.size() + min_elements;
|
Chris@16
|
786 for(size_type i = 0; i != n; ++i){
|
Chris@16
|
787 //We allocate a new NodeBlock and put it the last
|
Chris@16
|
788 //element of the tree
|
Chris@16
|
789 char *mem_address = static_cast<char*>
|
Chris@16
|
790 (mp_segment_mngr_base->allocate_aligned(real_block_size, m_real_block_alignment));
|
Chris@16
|
791 if(!mem_address){
|
Chris@16
|
792 //In case of error, free memory deallocating all nodes (the new ones allocated
|
Chris@16
|
793 //in this function plus previously stored nodes in chain).
|
Chris@16
|
794 this->deallocate_nodes(chain);
|
Chris@16
|
795 throw_bad_alloc();
|
Chris@16
|
796 }
|
Chris@16
|
797 block_info_t &c_info = *new(mem_address)block_info_t();
|
Chris@16
|
798 mem_address += HdrSize;
|
Chris@16
|
799 if(i != (n-1)){
|
Chris@16
|
800 chain.incorporate_after(chain.last(), void_pointer(mem_address), m_real_node_size, m_real_num_node);
|
Chris@16
|
801 }
|
Chris@16
|
802 else{
|
Chris@16
|
803 this->priv_dispatch_block_chain_or_free(chain, c_info, m_real_num_node, mem_address, total_elements, true);
|
Chris@16
|
804 }
|
Chris@16
|
805 }
|
Chris@16
|
806 }
|
Chris@16
|
807
|
Chris@16
|
808 void priv_append_from_new_blocks(size_type min_elements, multiallocation_chain &chain, AlignOnlyFalse)
|
Chris@16
|
809 {
|
Chris@16
|
810 BOOST_ASSERT(m_block_container.empty());
|
Chris@16
|
811 BOOST_ASSERT(min_elements > 0);
|
Chris@16
|
812 const size_type n = (min_elements - 1)/m_real_num_node + 1;
|
Chris@16
|
813 const size_type real_block_size = m_real_block_alignment*m_num_subblocks - PayloadPerAllocation;
|
Chris@16
|
814 const size_type elements_per_subblock = (m_real_block_alignment - HdrOffsetSize)/m_real_node_size;
|
Chris@16
|
815 const size_type hdr_subblock_elements = (m_real_block_alignment - HdrSize - PayloadPerAllocation)/m_real_node_size;
|
Chris@16
|
816 const size_type total_elements = chain.size() + min_elements;
|
Chris@16
|
817
|
Chris@16
|
818 for(size_type i = 0; i != n; ++i){
|
Chris@16
|
819 //We allocate a new NodeBlock and put it the last
|
Chris@16
|
820 //element of the tree
|
Chris@16
|
821 char *mem_address = static_cast<char*>
|
Chris@16
|
822 (mp_segment_mngr_base->allocate_aligned(real_block_size, m_real_block_alignment));
|
Chris@16
|
823 if(!mem_address){
|
Chris@16
|
824 //In case of error, free memory deallocating all nodes (the new ones allocated
|
Chris@16
|
825 //in this function plus previously stored nodes in chain).
|
Chris@16
|
826 this->deallocate_nodes(chain);
|
Chris@16
|
827 throw_bad_alloc();
|
Chris@16
|
828 }
|
Chris@16
|
829 //First initialize header information on the last subblock
|
Chris@16
|
830 char *hdr_addr = mem_address + m_real_block_alignment*(m_num_subblocks-1);
|
Chris@16
|
831 block_info_t &c_info = *new(hdr_addr)block_info_t();
|
Chris@16
|
832 //Some structural checks
|
Chris@16
|
833 BOOST_ASSERT(static_cast<void*>(&static_cast<hdr_offset_holder&>(c_info).hdr_offset) ==
|
Chris@16
|
834 static_cast<void*>(&c_info)); (void)c_info;
|
Chris@16
|
835 if(i != (n-1)){
|
Chris@16
|
836 for( size_type subblock = 0, maxsubblock = m_num_subblocks - 1
|
Chris@16
|
837 ; subblock < maxsubblock
|
Chris@16
|
838 ; ++subblock, mem_address += m_real_block_alignment){
|
Chris@16
|
839 //Initialize header offset mark
|
Chris@16
|
840 new(mem_address) hdr_offset_holder(size_type(hdr_addr - mem_address));
|
Chris@16
|
841 chain.incorporate_after
|
Chris@16
|
842 (chain.last(), void_pointer(mem_address + HdrOffsetSize), m_real_node_size, elements_per_subblock);
|
Chris@16
|
843 }
|
Chris@16
|
844 chain.incorporate_after(chain.last(), void_pointer(hdr_addr + HdrSize), m_real_node_size, hdr_subblock_elements);
|
Chris@16
|
845 }
|
Chris@16
|
846 else{
|
Chris@16
|
847 for( size_type subblock = 0, maxsubblock = m_num_subblocks - 1
|
Chris@16
|
848 ; subblock < maxsubblock
|
Chris@16
|
849 ; ++subblock, mem_address += m_real_block_alignment){
|
Chris@16
|
850 //Initialize header offset mark
|
Chris@16
|
851 new(mem_address) hdr_offset_holder(size_type(hdr_addr - mem_address));
|
Chris@16
|
852 this->priv_dispatch_block_chain_or_free
|
Chris@16
|
853 (chain, c_info, elements_per_subblock, mem_address + HdrOffsetSize, total_elements, false);
|
Chris@16
|
854 }
|
Chris@16
|
855 this->priv_dispatch_block_chain_or_free
|
Chris@16
|
856 (chain, c_info, hdr_subblock_elements, hdr_addr + HdrSize, total_elements, true);
|
Chris@16
|
857 }
|
Chris@16
|
858 }
|
Chris@16
|
859 }
|
Chris@16
|
860
|
Chris@16
|
861 private:
|
Chris@16
|
862 typedef typename boost::intrusive::pointer_traits
|
Chris@16
|
863 <void_pointer>::template rebind_pointer<segment_manager_base_type>::type segment_mngr_base_ptr_t;
|
Chris@16
|
864 const size_type m_max_free_blocks;
|
Chris@16
|
865 const size_type m_real_node_size;
|
Chris@16
|
866 //Round the size to a power of two value.
|
Chris@16
|
867 //This is the total memory size (including payload) that we want to
|
Chris@16
|
868 //allocate from the general-purpose allocator
|
Chris@16
|
869 const size_type m_real_block_alignment;
|
Chris@16
|
870 size_type m_num_subblocks;
|
Chris@16
|
871 //This is the real number of nodes per block
|
Chris@16
|
872 //const
|
Chris@16
|
873 size_type m_real_num_node;
|
Chris@16
|
874 segment_mngr_base_ptr_t mp_segment_mngr_base; //Segment manager
|
Chris@16
|
875 block_container_t m_block_container; //Intrusive block list
|
Chris@16
|
876 size_type m_totally_free_blocks; //Free blocks
|
Chris@16
|
877 };
|
Chris@16
|
878
|
Chris@16
|
879 } //namespace container_detail {
|
Chris@16
|
880 } //namespace container {
|
Chris@16
|
881 } //namespace boost {
|
Chris@16
|
882
|
Chris@16
|
883 #include <boost/container/detail/config_end.hpp>
|
Chris@16
|
884
|
Chris@16
|
885 #endif //#ifndef BOOST_CONTAINER_DETAIL_ADAPTIVE_NODE_POOL_IMPL_HPP
|