comparison DEPENDENCIES/generic/include/boost/asio/detail/impl/task_io_service.ipp @ 16:2665513ce2d3

Add boost headers
author Chris Cannam
date Tue, 05 Aug 2014 11:11:38 +0100
parents
children c530137014c0
comparison
equal deleted inserted replaced
15:663ca0da4350 16:2665513ce2d3
1 //
2 // detail/impl/task_io_service.ipp
3 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4 //
5 // Copyright (c) 2003-2013 Christopher M. Kohlhoff (chris at kohlhoff dot com)
6 //
7 // Distributed under the Boost Software License, Version 1.0. (See accompanying
8 // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
9 //
10
11 #ifndef BOOST_ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP
12 #define BOOST_ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP
13
14 #if defined(_MSC_VER) && (_MSC_VER >= 1200)
15 # pragma once
16 #endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
17
18 #include <boost/asio/detail/config.hpp>
19
20 #if !defined(BOOST_ASIO_HAS_IOCP)
21
22 #include <boost/asio/detail/event.hpp>
23 #include <boost/asio/detail/limits.hpp>
24 #include <boost/asio/detail/reactor.hpp>
25 #include <boost/asio/detail/task_io_service.hpp>
26 #include <boost/asio/detail/task_io_service_thread_info.hpp>
27
28 #include <boost/asio/detail/push_options.hpp>
29
30 namespace boost {
31 namespace asio {
32 namespace detail {
33
34 struct task_io_service::task_cleanup
35 {
36 ~task_cleanup()
37 {
38 if (this_thread_->private_outstanding_work > 0)
39 {
40 boost::asio::detail::increment(
41 task_io_service_->outstanding_work_,
42 this_thread_->private_outstanding_work);
43 }
44 this_thread_->private_outstanding_work = 0;
45
46 // Enqueue the completed operations and reinsert the task at the end of
47 // the operation queue.
48 lock_->lock();
49 task_io_service_->task_interrupted_ = true;
50 task_io_service_->op_queue_.push(this_thread_->private_op_queue);
51 task_io_service_->op_queue_.push(&task_io_service_->task_operation_);
52 }
53
54 task_io_service* task_io_service_;
55 mutex::scoped_lock* lock_;
56 thread_info* this_thread_;
57 };
58
59 struct task_io_service::work_cleanup
60 {
61 ~work_cleanup()
62 {
63 if (this_thread_->private_outstanding_work > 1)
64 {
65 boost::asio::detail::increment(
66 task_io_service_->outstanding_work_,
67 this_thread_->private_outstanding_work - 1);
68 }
69 else if (this_thread_->private_outstanding_work < 1)
70 {
71 task_io_service_->work_finished();
72 }
73 this_thread_->private_outstanding_work = 0;
74
75 #if defined(BOOST_ASIO_HAS_THREADS)
76 if (!this_thread_->private_op_queue.empty())
77 {
78 lock_->lock();
79 task_io_service_->op_queue_.push(this_thread_->private_op_queue);
80 }
81 #endif // defined(BOOST_ASIO_HAS_THREADS)
82 }
83
84 task_io_service* task_io_service_;
85 mutex::scoped_lock* lock_;
86 thread_info* this_thread_;
87 };
88
89 task_io_service::task_io_service(
90 boost::asio::io_service& io_service, std::size_t concurrency_hint)
91 : boost::asio::detail::service_base<task_io_service>(io_service),
92 one_thread_(concurrency_hint == 1),
93 mutex_(),
94 task_(0),
95 task_interrupted_(true),
96 outstanding_work_(0),
97 stopped_(false),
98 shutdown_(false),
99 first_idle_thread_(0)
100 {
101 BOOST_ASIO_HANDLER_TRACKING_INIT;
102 }
103
104 void task_io_service::shutdown_service()
105 {
106 mutex::scoped_lock lock(mutex_);
107 shutdown_ = true;
108 lock.unlock();
109
110 // Destroy handler objects.
111 while (!op_queue_.empty())
112 {
113 operation* o = op_queue_.front();
114 op_queue_.pop();
115 if (o != &task_operation_)
116 o->destroy();
117 }
118
119 // Reset to initial state.
120 task_ = 0;
121 }
122
123 void task_io_service::init_task()
124 {
125 mutex::scoped_lock lock(mutex_);
126 if (!shutdown_ && !task_)
127 {
128 task_ = &use_service<reactor>(this->get_io_service());
129 op_queue_.push(&task_operation_);
130 wake_one_thread_and_unlock(lock);
131 }
132 }
133
134 std::size_t task_io_service::run(boost::system::error_code& ec)
135 {
136 ec = boost::system::error_code();
137 if (outstanding_work_ == 0)
138 {
139 stop();
140 return 0;
141 }
142
143 thread_info this_thread;
144 event wakeup_event;
145 this_thread.wakeup_event = &wakeup_event;
146 this_thread.private_outstanding_work = 0;
147 this_thread.next = 0;
148 thread_call_stack::context ctx(this, this_thread);
149
150 mutex::scoped_lock lock(mutex_);
151
152 std::size_t n = 0;
153 for (; do_run_one(lock, this_thread, ec); lock.lock())
154 if (n != (std::numeric_limits<std::size_t>::max)())
155 ++n;
156 return n;
157 }
158
159 std::size_t task_io_service::run_one(boost::system::error_code& ec)
160 {
161 ec = boost::system::error_code();
162 if (outstanding_work_ == 0)
163 {
164 stop();
165 return 0;
166 }
167
168 thread_info this_thread;
169 event wakeup_event;
170 this_thread.wakeup_event = &wakeup_event;
171 this_thread.private_outstanding_work = 0;
172 this_thread.next = 0;
173 thread_call_stack::context ctx(this, this_thread);
174
175 mutex::scoped_lock lock(mutex_);
176
177 return do_run_one(lock, this_thread, ec);
178 }
179
180 std::size_t task_io_service::poll(boost::system::error_code& ec)
181 {
182 ec = boost::system::error_code();
183 if (outstanding_work_ == 0)
184 {
185 stop();
186 return 0;
187 }
188
189 thread_info this_thread;
190 this_thread.wakeup_event = 0;
191 this_thread.private_outstanding_work = 0;
192 this_thread.next = 0;
193 thread_call_stack::context ctx(this, this_thread);
194
195 mutex::scoped_lock lock(mutex_);
196
197 #if defined(BOOST_ASIO_HAS_THREADS)
198 // We want to support nested calls to poll() and poll_one(), so any handlers
199 // that are already on a thread-private queue need to be put on to the main
200 // queue now.
201 if (one_thread_)
202 if (thread_info* outer_thread_info = ctx.next_by_key())
203 op_queue_.push(outer_thread_info->private_op_queue);
204 #endif // defined(BOOST_ASIO_HAS_THREADS)
205
206 std::size_t n = 0;
207 for (; do_poll_one(lock, this_thread, ec); lock.lock())
208 if (n != (std::numeric_limits<std::size_t>::max)())
209 ++n;
210 return n;
211 }
212
213 std::size_t task_io_service::poll_one(boost::system::error_code& ec)
214 {
215 ec = boost::system::error_code();
216 if (outstanding_work_ == 0)
217 {
218 stop();
219 return 0;
220 }
221
222 thread_info this_thread;
223 this_thread.wakeup_event = 0;
224 this_thread.private_outstanding_work = 0;
225 this_thread.next = 0;
226 thread_call_stack::context ctx(this, this_thread);
227
228 mutex::scoped_lock lock(mutex_);
229
230 #if defined(BOOST_ASIO_HAS_THREADS)
231 // We want to support nested calls to poll() and poll_one(), so any handlers
232 // that are already on a thread-private queue need to be put on to the main
233 // queue now.
234 if (one_thread_)
235 if (thread_info* outer_thread_info = ctx.next_by_key())
236 op_queue_.push(outer_thread_info->private_op_queue);
237 #endif // defined(BOOST_ASIO_HAS_THREADS)
238
239 return do_poll_one(lock, this_thread, ec);
240 }
241
242 void task_io_service::stop()
243 {
244 mutex::scoped_lock lock(mutex_);
245 stop_all_threads(lock);
246 }
247
248 bool task_io_service::stopped() const
249 {
250 mutex::scoped_lock lock(mutex_);
251 return stopped_;
252 }
253
254 void task_io_service::reset()
255 {
256 mutex::scoped_lock lock(mutex_);
257 stopped_ = false;
258 }
259
260 void task_io_service::post_immediate_completion(
261 task_io_service::operation* op, bool is_continuation)
262 {
263 #if defined(BOOST_ASIO_HAS_THREADS)
264 if (one_thread_ || is_continuation)
265 {
266 if (thread_info* this_thread = thread_call_stack::contains(this))
267 {
268 ++this_thread->private_outstanding_work;
269 this_thread->private_op_queue.push(op);
270 return;
271 }
272 }
273 #endif // defined(BOOST_ASIO_HAS_THREADS)
274
275 work_started();
276 mutex::scoped_lock lock(mutex_);
277 op_queue_.push(op);
278 wake_one_thread_and_unlock(lock);
279 }
280
281 void task_io_service::post_deferred_completion(task_io_service::operation* op)
282 {
283 #if defined(BOOST_ASIO_HAS_THREADS)
284 if (one_thread_)
285 {
286 if (thread_info* this_thread = thread_call_stack::contains(this))
287 {
288 this_thread->private_op_queue.push(op);
289 return;
290 }
291 }
292 #endif // defined(BOOST_ASIO_HAS_THREADS)
293
294 mutex::scoped_lock lock(mutex_);
295 op_queue_.push(op);
296 wake_one_thread_and_unlock(lock);
297 }
298
299 void task_io_service::post_deferred_completions(
300 op_queue<task_io_service::operation>& ops)
301 {
302 if (!ops.empty())
303 {
304 #if defined(BOOST_ASIO_HAS_THREADS)
305 if (one_thread_)
306 {
307 if (thread_info* this_thread = thread_call_stack::contains(this))
308 {
309 this_thread->private_op_queue.push(ops);
310 return;
311 }
312 }
313 #endif // defined(BOOST_ASIO_HAS_THREADS)
314
315 mutex::scoped_lock lock(mutex_);
316 op_queue_.push(ops);
317 wake_one_thread_and_unlock(lock);
318 }
319 }
320
321 void task_io_service::do_dispatch(
322 task_io_service::operation* op)
323 {
324 work_started();
325 mutex::scoped_lock lock(mutex_);
326 op_queue_.push(op);
327 wake_one_thread_and_unlock(lock);
328 }
329
330 void task_io_service::abandon_operations(
331 op_queue<task_io_service::operation>& ops)
332 {
333 op_queue<task_io_service::operation> ops2;
334 ops2.push(ops);
335 }
336
337 std::size_t task_io_service::do_run_one(mutex::scoped_lock& lock,
338 task_io_service::thread_info& this_thread,
339 const boost::system::error_code& ec)
340 {
341 while (!stopped_)
342 {
343 if (!op_queue_.empty())
344 {
345 // Prepare to execute first handler from queue.
346 operation* o = op_queue_.front();
347 op_queue_.pop();
348 bool more_handlers = (!op_queue_.empty());
349
350 if (o == &task_operation_)
351 {
352 task_interrupted_ = more_handlers;
353
354 if (more_handlers && !one_thread_)
355 {
356 if (!wake_one_idle_thread_and_unlock(lock))
357 lock.unlock();
358 }
359 else
360 lock.unlock();
361
362 task_cleanup on_exit = { this, &lock, &this_thread };
363 (void)on_exit;
364
365 // Run the task. May throw an exception. Only block if the operation
366 // queue is empty and we're not polling, otherwise we want to return
367 // as soon as possible.
368 task_->run(!more_handlers, this_thread.private_op_queue);
369 }
370 else
371 {
372 std::size_t task_result = o->task_result_;
373
374 if (more_handlers && !one_thread_)
375 wake_one_thread_and_unlock(lock);
376 else
377 lock.unlock();
378
379 // Ensure the count of outstanding work is decremented on block exit.
380 work_cleanup on_exit = { this, &lock, &this_thread };
381 (void)on_exit;
382
383 // Complete the operation. May throw an exception. Deletes the object.
384 o->complete(*this, ec, task_result);
385
386 return 1;
387 }
388 }
389 else
390 {
391 // Nothing to run right now, so just wait for work to do.
392 this_thread.next = first_idle_thread_;
393 first_idle_thread_ = &this_thread;
394 this_thread.wakeup_event->clear(lock);
395 this_thread.wakeup_event->wait(lock);
396 }
397 }
398
399 return 0;
400 }
401
402 std::size_t task_io_service::do_poll_one(mutex::scoped_lock& lock,
403 task_io_service::thread_info& this_thread,
404 const boost::system::error_code& ec)
405 {
406 if (stopped_)
407 return 0;
408
409 operation* o = op_queue_.front();
410 if (o == &task_operation_)
411 {
412 op_queue_.pop();
413 lock.unlock();
414
415 {
416 task_cleanup c = { this, &lock, &this_thread };
417 (void)c;
418
419 // Run the task. May throw an exception. Only block if the operation
420 // queue is empty and we're not polling, otherwise we want to return
421 // as soon as possible.
422 task_->run(false, this_thread.private_op_queue);
423 }
424
425 o = op_queue_.front();
426 if (o == &task_operation_)
427 {
428 wake_one_idle_thread_and_unlock(lock);
429 return 0;
430 }
431 }
432
433 if (o == 0)
434 return 0;
435
436 op_queue_.pop();
437 bool more_handlers = (!op_queue_.empty());
438
439 std::size_t task_result = o->task_result_;
440
441 if (more_handlers && !one_thread_)
442 wake_one_thread_and_unlock(lock);
443 else
444 lock.unlock();
445
446 // Ensure the count of outstanding work is decremented on block exit.
447 work_cleanup on_exit = { this, &lock, &this_thread };
448 (void)on_exit;
449
450 // Complete the operation. May throw an exception. Deletes the object.
451 o->complete(*this, ec, task_result);
452
453 return 1;
454 }
455
456 void task_io_service::stop_all_threads(
457 mutex::scoped_lock& lock)
458 {
459 stopped_ = true;
460
461 while (first_idle_thread_)
462 {
463 thread_info* idle_thread = first_idle_thread_;
464 first_idle_thread_ = idle_thread->next;
465 idle_thread->next = 0;
466 idle_thread->wakeup_event->signal(lock);
467 }
468
469 if (!task_interrupted_ && task_)
470 {
471 task_interrupted_ = true;
472 task_->interrupt();
473 }
474 }
475
476 bool task_io_service::wake_one_idle_thread_and_unlock(
477 mutex::scoped_lock& lock)
478 {
479 if (first_idle_thread_)
480 {
481 thread_info* idle_thread = first_idle_thread_;
482 first_idle_thread_ = idle_thread->next;
483 idle_thread->next = 0;
484 idle_thread->wakeup_event->signal_and_unlock(lock);
485 return true;
486 }
487 return false;
488 }
489
490 void task_io_service::wake_one_thread_and_unlock(
491 mutex::scoped_lock& lock)
492 {
493 if (!wake_one_idle_thread_and_unlock(lock))
494 {
495 if (!task_interrupted_ && task_)
496 {
497 task_interrupted_ = true;
498 task_->interrupt();
499 }
500 lock.unlock();
501 }
502 }
503
504 } // namespace detail
505 } // namespace asio
506 } // namespace boost
507
508 #include <boost/asio/detail/pop_options.hpp>
509
510 #endif // !defined(BOOST_ASIO_HAS_IOCP)
511
512 #endif // BOOST_ASIO_DETAIL_IMPL_TASK_IO_SERVICE_IPP