Chris@16
|
1 //
|
Chris@16
|
2 // detail/impl/epoll_reactor.ipp
|
Chris@16
|
3 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
Chris@16
|
4 //
|
Chris@101
|
5 // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com)
|
Chris@16
|
6 //
|
Chris@16
|
7 // Distributed under the Boost Software License, Version 1.0. (See accompanying
|
Chris@16
|
8 // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
Chris@16
|
9 //
|
Chris@16
|
10
|
Chris@16
|
11 #ifndef BOOST_ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP
|
Chris@16
|
12 #define BOOST_ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP
|
Chris@16
|
13
|
Chris@16
|
14 #if defined(_MSC_VER) && (_MSC_VER >= 1200)
|
Chris@16
|
15 # pragma once
|
Chris@16
|
16 #endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
|
Chris@16
|
17
|
Chris@16
|
18 #include <boost/asio/detail/config.hpp>
|
Chris@16
|
19
|
Chris@16
|
20 #if defined(BOOST_ASIO_HAS_EPOLL)
|
Chris@16
|
21
|
Chris@16
|
22 #include <cstddef>
|
Chris@16
|
23 #include <sys/epoll.h>
|
Chris@16
|
24 #include <boost/asio/detail/epoll_reactor.hpp>
|
Chris@16
|
25 #include <boost/asio/detail/throw_error.hpp>
|
Chris@16
|
26 #include <boost/asio/error.hpp>
|
Chris@16
|
27
|
Chris@16
|
28 #if defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
29 # include <sys/timerfd.h>
|
Chris@16
|
30 #endif // defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
31
|
Chris@16
|
32 #include <boost/asio/detail/push_options.hpp>
|
Chris@16
|
33
|
Chris@16
|
34 namespace boost {
|
Chris@16
|
35 namespace asio {
|
Chris@16
|
36 namespace detail {
|
Chris@16
|
37
|
Chris@16
|
38 epoll_reactor::epoll_reactor(boost::asio::io_service& io_service)
|
Chris@16
|
39 : boost::asio::detail::service_base<epoll_reactor>(io_service),
|
Chris@16
|
40 io_service_(use_service<io_service_impl>(io_service)),
|
Chris@16
|
41 mutex_(),
|
Chris@16
|
42 interrupter_(),
|
Chris@16
|
43 epoll_fd_(do_epoll_create()),
|
Chris@16
|
44 timer_fd_(do_timerfd_create()),
|
Chris@16
|
45 shutdown_(false)
|
Chris@16
|
46 {
|
Chris@16
|
47 // Add the interrupter's descriptor to epoll.
|
Chris@16
|
48 epoll_event ev = { 0, { 0 } };
|
Chris@16
|
49 ev.events = EPOLLIN | EPOLLERR | EPOLLET;
|
Chris@16
|
50 ev.data.ptr = &interrupter_;
|
Chris@16
|
51 epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev);
|
Chris@16
|
52 interrupter_.interrupt();
|
Chris@16
|
53
|
Chris@16
|
54 // Add the timer descriptor to epoll.
|
Chris@16
|
55 if (timer_fd_ != -1)
|
Chris@16
|
56 {
|
Chris@16
|
57 ev.events = EPOLLIN | EPOLLERR;
|
Chris@16
|
58 ev.data.ptr = &timer_fd_;
|
Chris@16
|
59 epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev);
|
Chris@16
|
60 }
|
Chris@16
|
61 }
|
Chris@16
|
62
|
Chris@16
|
63 epoll_reactor::~epoll_reactor()
|
Chris@16
|
64 {
|
Chris@16
|
65 if (epoll_fd_ != -1)
|
Chris@16
|
66 close(epoll_fd_);
|
Chris@16
|
67 if (timer_fd_ != -1)
|
Chris@16
|
68 close(timer_fd_);
|
Chris@16
|
69 }
|
Chris@16
|
70
|
Chris@16
|
71 void epoll_reactor::shutdown_service()
|
Chris@16
|
72 {
|
Chris@16
|
73 mutex::scoped_lock lock(mutex_);
|
Chris@16
|
74 shutdown_ = true;
|
Chris@16
|
75 lock.unlock();
|
Chris@16
|
76
|
Chris@16
|
77 op_queue<operation> ops;
|
Chris@16
|
78
|
Chris@16
|
79 while (descriptor_state* state = registered_descriptors_.first())
|
Chris@16
|
80 {
|
Chris@16
|
81 for (int i = 0; i < max_ops; ++i)
|
Chris@16
|
82 ops.push(state->op_queue_[i]);
|
Chris@16
|
83 state->shutdown_ = true;
|
Chris@16
|
84 registered_descriptors_.free(state);
|
Chris@16
|
85 }
|
Chris@16
|
86
|
Chris@16
|
87 timer_queues_.get_all_timers(ops);
|
Chris@16
|
88
|
Chris@16
|
89 io_service_.abandon_operations(ops);
|
Chris@16
|
90 }
|
Chris@16
|
91
|
Chris@16
|
92 void epoll_reactor::fork_service(boost::asio::io_service::fork_event fork_ev)
|
Chris@16
|
93 {
|
Chris@16
|
94 if (fork_ev == boost::asio::io_service::fork_child)
|
Chris@16
|
95 {
|
Chris@16
|
96 if (epoll_fd_ != -1)
|
Chris@16
|
97 ::close(epoll_fd_);
|
Chris@16
|
98 epoll_fd_ = -1;
|
Chris@16
|
99 epoll_fd_ = do_epoll_create();
|
Chris@16
|
100
|
Chris@16
|
101 if (timer_fd_ != -1)
|
Chris@16
|
102 ::close(timer_fd_);
|
Chris@16
|
103 timer_fd_ = -1;
|
Chris@16
|
104 timer_fd_ = do_timerfd_create();
|
Chris@16
|
105
|
Chris@16
|
106 interrupter_.recreate();
|
Chris@16
|
107
|
Chris@16
|
108 // Add the interrupter's descriptor to epoll.
|
Chris@16
|
109 epoll_event ev = { 0, { 0 } };
|
Chris@16
|
110 ev.events = EPOLLIN | EPOLLERR | EPOLLET;
|
Chris@16
|
111 ev.data.ptr = &interrupter_;
|
Chris@16
|
112 epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev);
|
Chris@16
|
113 interrupter_.interrupt();
|
Chris@16
|
114
|
Chris@16
|
115 // Add the timer descriptor to epoll.
|
Chris@16
|
116 if (timer_fd_ != -1)
|
Chris@16
|
117 {
|
Chris@16
|
118 ev.events = EPOLLIN | EPOLLERR;
|
Chris@16
|
119 ev.data.ptr = &timer_fd_;
|
Chris@16
|
120 epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev);
|
Chris@16
|
121 }
|
Chris@16
|
122
|
Chris@16
|
123 update_timeout();
|
Chris@16
|
124
|
Chris@16
|
125 // Re-register all descriptors with epoll.
|
Chris@16
|
126 mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
|
Chris@16
|
127 for (descriptor_state* state = registered_descriptors_.first();
|
Chris@16
|
128 state != 0; state = state->next_)
|
Chris@16
|
129 {
|
Chris@16
|
130 ev.events = state->registered_events_;
|
Chris@16
|
131 ev.data.ptr = state;
|
Chris@16
|
132 int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, state->descriptor_, &ev);
|
Chris@16
|
133 if (result != 0)
|
Chris@16
|
134 {
|
Chris@16
|
135 boost::system::error_code ec(errno,
|
Chris@16
|
136 boost::asio::error::get_system_category());
|
Chris@16
|
137 boost::asio::detail::throw_error(ec, "epoll re-registration");
|
Chris@16
|
138 }
|
Chris@16
|
139 }
|
Chris@16
|
140 }
|
Chris@16
|
141 }
|
Chris@16
|
142
|
Chris@16
|
143 void epoll_reactor::init_task()
|
Chris@16
|
144 {
|
Chris@16
|
145 io_service_.init_task();
|
Chris@16
|
146 }
|
Chris@16
|
147
|
Chris@16
|
148 int epoll_reactor::register_descriptor(socket_type descriptor,
|
Chris@16
|
149 epoll_reactor::per_descriptor_data& descriptor_data)
|
Chris@16
|
150 {
|
Chris@16
|
151 descriptor_data = allocate_descriptor_state();
|
Chris@16
|
152
|
Chris@16
|
153 {
|
Chris@16
|
154 mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
|
Chris@16
|
155
|
Chris@16
|
156 descriptor_data->reactor_ = this;
|
Chris@16
|
157 descriptor_data->descriptor_ = descriptor;
|
Chris@16
|
158 descriptor_data->shutdown_ = false;
|
Chris@16
|
159 }
|
Chris@16
|
160
|
Chris@16
|
161 epoll_event ev = { 0, { 0 } };
|
Chris@16
|
162 ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLPRI | EPOLLET;
|
Chris@16
|
163 descriptor_data->registered_events_ = ev.events;
|
Chris@16
|
164 ev.data.ptr = descriptor_data;
|
Chris@16
|
165 int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);
|
Chris@16
|
166 if (result != 0)
|
Chris@16
|
167 return errno;
|
Chris@16
|
168
|
Chris@16
|
169 return 0;
|
Chris@16
|
170 }
|
Chris@16
|
171
|
Chris@16
|
172 int epoll_reactor::register_internal_descriptor(
|
Chris@16
|
173 int op_type, socket_type descriptor,
|
Chris@16
|
174 epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op)
|
Chris@16
|
175 {
|
Chris@16
|
176 descriptor_data = allocate_descriptor_state();
|
Chris@16
|
177
|
Chris@16
|
178 {
|
Chris@16
|
179 mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
|
Chris@16
|
180
|
Chris@16
|
181 descriptor_data->reactor_ = this;
|
Chris@16
|
182 descriptor_data->descriptor_ = descriptor;
|
Chris@16
|
183 descriptor_data->shutdown_ = false;
|
Chris@16
|
184 descriptor_data->op_queue_[op_type].push(op);
|
Chris@16
|
185 }
|
Chris@16
|
186
|
Chris@16
|
187 epoll_event ev = { 0, { 0 } };
|
Chris@16
|
188 ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLPRI | EPOLLET;
|
Chris@16
|
189 descriptor_data->registered_events_ = ev.events;
|
Chris@16
|
190 ev.data.ptr = descriptor_data;
|
Chris@16
|
191 int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);
|
Chris@16
|
192 if (result != 0)
|
Chris@16
|
193 return errno;
|
Chris@16
|
194
|
Chris@16
|
195 return 0;
|
Chris@16
|
196 }
|
Chris@16
|
197
|
Chris@16
|
198 void epoll_reactor::move_descriptor(socket_type,
|
Chris@16
|
199 epoll_reactor::per_descriptor_data& target_descriptor_data,
|
Chris@16
|
200 epoll_reactor::per_descriptor_data& source_descriptor_data)
|
Chris@16
|
201 {
|
Chris@16
|
202 target_descriptor_data = source_descriptor_data;
|
Chris@16
|
203 source_descriptor_data = 0;
|
Chris@16
|
204 }
|
Chris@16
|
205
|
Chris@16
|
206 void epoll_reactor::start_op(int op_type, socket_type descriptor,
|
Chris@16
|
207 epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op,
|
Chris@16
|
208 bool is_continuation, bool allow_speculative)
|
Chris@16
|
209 {
|
Chris@16
|
210 if (!descriptor_data)
|
Chris@16
|
211 {
|
Chris@16
|
212 op->ec_ = boost::asio::error::bad_descriptor;
|
Chris@16
|
213 post_immediate_completion(op, is_continuation);
|
Chris@16
|
214 return;
|
Chris@16
|
215 }
|
Chris@16
|
216
|
Chris@16
|
217 mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
|
Chris@16
|
218
|
Chris@16
|
219 if (descriptor_data->shutdown_)
|
Chris@16
|
220 {
|
Chris@16
|
221 post_immediate_completion(op, is_continuation);
|
Chris@16
|
222 return;
|
Chris@16
|
223 }
|
Chris@16
|
224
|
Chris@16
|
225 if (descriptor_data->op_queue_[op_type].empty())
|
Chris@16
|
226 {
|
Chris@16
|
227 if (allow_speculative
|
Chris@16
|
228 && (op_type != read_op
|
Chris@16
|
229 || descriptor_data->op_queue_[except_op].empty()))
|
Chris@16
|
230 {
|
Chris@16
|
231 if (op->perform())
|
Chris@16
|
232 {
|
Chris@16
|
233 descriptor_lock.unlock();
|
Chris@16
|
234 io_service_.post_immediate_completion(op, is_continuation);
|
Chris@16
|
235 return;
|
Chris@16
|
236 }
|
Chris@16
|
237
|
Chris@16
|
238 if (op_type == write_op)
|
Chris@16
|
239 {
|
Chris@16
|
240 if ((descriptor_data->registered_events_ & EPOLLOUT) == 0)
|
Chris@16
|
241 {
|
Chris@16
|
242 epoll_event ev = { 0, { 0 } };
|
Chris@16
|
243 ev.events = descriptor_data->registered_events_ | EPOLLOUT;
|
Chris@16
|
244 ev.data.ptr = descriptor_data;
|
Chris@16
|
245 if (epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev) == 0)
|
Chris@16
|
246 {
|
Chris@16
|
247 descriptor_data->registered_events_ |= ev.events;
|
Chris@16
|
248 }
|
Chris@16
|
249 else
|
Chris@16
|
250 {
|
Chris@16
|
251 op->ec_ = boost::system::error_code(errno,
|
Chris@16
|
252 boost::asio::error::get_system_category());
|
Chris@16
|
253 io_service_.post_immediate_completion(op, is_continuation);
|
Chris@16
|
254 return;
|
Chris@16
|
255 }
|
Chris@16
|
256 }
|
Chris@16
|
257 }
|
Chris@16
|
258 }
|
Chris@16
|
259 else
|
Chris@16
|
260 {
|
Chris@16
|
261 if (op_type == write_op)
|
Chris@16
|
262 {
|
Chris@16
|
263 descriptor_data->registered_events_ |= EPOLLOUT;
|
Chris@16
|
264 }
|
Chris@16
|
265
|
Chris@16
|
266 epoll_event ev = { 0, { 0 } };
|
Chris@16
|
267 ev.events = descriptor_data->registered_events_;
|
Chris@16
|
268 ev.data.ptr = descriptor_data;
|
Chris@16
|
269 epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev);
|
Chris@16
|
270 }
|
Chris@16
|
271 }
|
Chris@16
|
272
|
Chris@16
|
273 descriptor_data->op_queue_[op_type].push(op);
|
Chris@16
|
274 io_service_.work_started();
|
Chris@16
|
275 }
|
Chris@16
|
276
|
Chris@16
|
277 void epoll_reactor::cancel_ops(socket_type,
|
Chris@16
|
278 epoll_reactor::per_descriptor_data& descriptor_data)
|
Chris@16
|
279 {
|
Chris@16
|
280 if (!descriptor_data)
|
Chris@16
|
281 return;
|
Chris@16
|
282
|
Chris@16
|
283 mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
|
Chris@16
|
284
|
Chris@16
|
285 op_queue<operation> ops;
|
Chris@16
|
286 for (int i = 0; i < max_ops; ++i)
|
Chris@16
|
287 {
|
Chris@16
|
288 while (reactor_op* op = descriptor_data->op_queue_[i].front())
|
Chris@16
|
289 {
|
Chris@16
|
290 op->ec_ = boost::asio::error::operation_aborted;
|
Chris@16
|
291 descriptor_data->op_queue_[i].pop();
|
Chris@16
|
292 ops.push(op);
|
Chris@16
|
293 }
|
Chris@16
|
294 }
|
Chris@16
|
295
|
Chris@16
|
296 descriptor_lock.unlock();
|
Chris@16
|
297
|
Chris@16
|
298 io_service_.post_deferred_completions(ops);
|
Chris@16
|
299 }
|
Chris@16
|
300
|
Chris@16
|
301 void epoll_reactor::deregister_descriptor(socket_type descriptor,
|
Chris@16
|
302 epoll_reactor::per_descriptor_data& descriptor_data, bool closing)
|
Chris@16
|
303 {
|
Chris@16
|
304 if (!descriptor_data)
|
Chris@16
|
305 return;
|
Chris@16
|
306
|
Chris@16
|
307 mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
|
Chris@16
|
308
|
Chris@16
|
309 if (!descriptor_data->shutdown_)
|
Chris@16
|
310 {
|
Chris@16
|
311 if (closing)
|
Chris@16
|
312 {
|
Chris@16
|
313 // The descriptor will be automatically removed from the epoll set when
|
Chris@16
|
314 // it is closed.
|
Chris@16
|
315 }
|
Chris@16
|
316 else
|
Chris@16
|
317 {
|
Chris@16
|
318 epoll_event ev = { 0, { 0 } };
|
Chris@16
|
319 epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev);
|
Chris@16
|
320 }
|
Chris@16
|
321
|
Chris@16
|
322 op_queue<operation> ops;
|
Chris@16
|
323 for (int i = 0; i < max_ops; ++i)
|
Chris@16
|
324 {
|
Chris@16
|
325 while (reactor_op* op = descriptor_data->op_queue_[i].front())
|
Chris@16
|
326 {
|
Chris@16
|
327 op->ec_ = boost::asio::error::operation_aborted;
|
Chris@16
|
328 descriptor_data->op_queue_[i].pop();
|
Chris@16
|
329 ops.push(op);
|
Chris@16
|
330 }
|
Chris@16
|
331 }
|
Chris@16
|
332
|
Chris@16
|
333 descriptor_data->descriptor_ = -1;
|
Chris@16
|
334 descriptor_data->shutdown_ = true;
|
Chris@16
|
335
|
Chris@16
|
336 descriptor_lock.unlock();
|
Chris@16
|
337
|
Chris@16
|
338 free_descriptor_state(descriptor_data);
|
Chris@16
|
339 descriptor_data = 0;
|
Chris@16
|
340
|
Chris@16
|
341 io_service_.post_deferred_completions(ops);
|
Chris@16
|
342 }
|
Chris@16
|
343 }
|
Chris@16
|
344
|
Chris@16
|
345 void epoll_reactor::deregister_internal_descriptor(socket_type descriptor,
|
Chris@16
|
346 epoll_reactor::per_descriptor_data& descriptor_data)
|
Chris@16
|
347 {
|
Chris@16
|
348 if (!descriptor_data)
|
Chris@16
|
349 return;
|
Chris@16
|
350
|
Chris@16
|
351 mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
|
Chris@16
|
352
|
Chris@16
|
353 if (!descriptor_data->shutdown_)
|
Chris@16
|
354 {
|
Chris@16
|
355 epoll_event ev = { 0, { 0 } };
|
Chris@16
|
356 epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev);
|
Chris@16
|
357
|
Chris@16
|
358 op_queue<operation> ops;
|
Chris@16
|
359 for (int i = 0; i < max_ops; ++i)
|
Chris@16
|
360 ops.push(descriptor_data->op_queue_[i]);
|
Chris@16
|
361
|
Chris@16
|
362 descriptor_data->descriptor_ = -1;
|
Chris@16
|
363 descriptor_data->shutdown_ = true;
|
Chris@16
|
364
|
Chris@16
|
365 descriptor_lock.unlock();
|
Chris@16
|
366
|
Chris@16
|
367 free_descriptor_state(descriptor_data);
|
Chris@16
|
368 descriptor_data = 0;
|
Chris@16
|
369 }
|
Chris@16
|
370 }
|
Chris@16
|
371
|
Chris@16
|
372 void epoll_reactor::run(bool block, op_queue<operation>& ops)
|
Chris@16
|
373 {
|
Chris@16
|
374 // This code relies on the fact that the task_io_service queues the reactor
|
Chris@16
|
375 // task behind all descriptor operations generated by this function. This
|
Chris@16
|
376 // means, that by the time we reach this point, any previously returned
|
Chris@16
|
377 // descriptor operations have already been dequeued. Therefore it is now safe
|
Chris@16
|
378 // for us to reuse and return them for the task_io_service to queue again.
|
Chris@16
|
379
|
Chris@16
|
380 // Calculate a timeout only if timerfd is not used.
|
Chris@16
|
381 int timeout;
|
Chris@16
|
382 if (timer_fd_ != -1)
|
Chris@16
|
383 timeout = block ? -1 : 0;
|
Chris@16
|
384 else
|
Chris@16
|
385 {
|
Chris@16
|
386 mutex::scoped_lock lock(mutex_);
|
Chris@16
|
387 timeout = block ? get_timeout() : 0;
|
Chris@16
|
388 }
|
Chris@16
|
389
|
Chris@16
|
390 // Block on the epoll descriptor.
|
Chris@16
|
391 epoll_event events[128];
|
Chris@16
|
392 int num_events = epoll_wait(epoll_fd_, events, 128, timeout);
|
Chris@16
|
393
|
Chris@16
|
394 #if defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
395 bool check_timers = (timer_fd_ == -1);
|
Chris@16
|
396 #else // defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
397 bool check_timers = true;
|
Chris@16
|
398 #endif // defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
399
|
Chris@16
|
400 // Dispatch the waiting events.
|
Chris@16
|
401 for (int i = 0; i < num_events; ++i)
|
Chris@16
|
402 {
|
Chris@16
|
403 void* ptr = events[i].data.ptr;
|
Chris@16
|
404 if (ptr == &interrupter_)
|
Chris@16
|
405 {
|
Chris@16
|
406 // No need to reset the interrupter since we're leaving the descriptor
|
Chris@16
|
407 // in a ready-to-read state and relying on edge-triggered notifications
|
Chris@16
|
408 // to make it so that we only get woken up when the descriptor's epoll
|
Chris@16
|
409 // registration is updated.
|
Chris@16
|
410
|
Chris@16
|
411 #if defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
412 if (timer_fd_ == -1)
|
Chris@16
|
413 check_timers = true;
|
Chris@16
|
414 #else // defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
415 check_timers = true;
|
Chris@16
|
416 #endif // defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
417 }
|
Chris@16
|
418 #if defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
419 else if (ptr == &timer_fd_)
|
Chris@16
|
420 {
|
Chris@16
|
421 check_timers = true;
|
Chris@16
|
422 }
|
Chris@16
|
423 #endif // defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
424 else
|
Chris@16
|
425 {
|
Chris@16
|
426 // The descriptor operation doesn't count as work in and of itself, so we
|
Chris@16
|
427 // don't call work_started() here. This still allows the io_service to
|
Chris@16
|
428 // stop if the only remaining operations are descriptor operations.
|
Chris@16
|
429 descriptor_state* descriptor_data = static_cast<descriptor_state*>(ptr);
|
Chris@16
|
430 descriptor_data->set_ready_events(events[i].events);
|
Chris@16
|
431 ops.push(descriptor_data);
|
Chris@16
|
432 }
|
Chris@16
|
433 }
|
Chris@16
|
434
|
Chris@16
|
435 if (check_timers)
|
Chris@16
|
436 {
|
Chris@16
|
437 mutex::scoped_lock common_lock(mutex_);
|
Chris@16
|
438 timer_queues_.get_ready_timers(ops);
|
Chris@16
|
439
|
Chris@16
|
440 #if defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
441 if (timer_fd_ != -1)
|
Chris@16
|
442 {
|
Chris@16
|
443 itimerspec new_timeout;
|
Chris@16
|
444 itimerspec old_timeout;
|
Chris@16
|
445 int flags = get_timeout(new_timeout);
|
Chris@16
|
446 timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout);
|
Chris@16
|
447 }
|
Chris@16
|
448 #endif // defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
449 }
|
Chris@16
|
450 }
|
Chris@16
|
451
|
Chris@16
|
452 void epoll_reactor::interrupt()
|
Chris@16
|
453 {
|
Chris@16
|
454 epoll_event ev = { 0, { 0 } };
|
Chris@16
|
455 ev.events = EPOLLIN | EPOLLERR | EPOLLET;
|
Chris@16
|
456 ev.data.ptr = &interrupter_;
|
Chris@16
|
457 epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, interrupter_.read_descriptor(), &ev);
|
Chris@16
|
458 }
|
Chris@16
|
459
|
Chris@16
|
460 int epoll_reactor::do_epoll_create()
|
Chris@16
|
461 {
|
Chris@16
|
462 #if defined(EPOLL_CLOEXEC)
|
Chris@16
|
463 int fd = epoll_create1(EPOLL_CLOEXEC);
|
Chris@16
|
464 #else // defined(EPOLL_CLOEXEC)
|
Chris@16
|
465 int fd = -1;
|
Chris@16
|
466 errno = EINVAL;
|
Chris@16
|
467 #endif // defined(EPOLL_CLOEXEC)
|
Chris@16
|
468
|
Chris@16
|
469 if (fd == -1 && (errno == EINVAL || errno == ENOSYS))
|
Chris@16
|
470 {
|
Chris@16
|
471 fd = epoll_create(epoll_size);
|
Chris@16
|
472 if (fd != -1)
|
Chris@16
|
473 ::fcntl(fd, F_SETFD, FD_CLOEXEC);
|
Chris@16
|
474 }
|
Chris@16
|
475
|
Chris@16
|
476 if (fd == -1)
|
Chris@16
|
477 {
|
Chris@16
|
478 boost::system::error_code ec(errno,
|
Chris@16
|
479 boost::asio::error::get_system_category());
|
Chris@16
|
480 boost::asio::detail::throw_error(ec, "epoll");
|
Chris@16
|
481 }
|
Chris@16
|
482
|
Chris@16
|
483 return fd;
|
Chris@16
|
484 }
|
Chris@16
|
485
|
Chris@16
|
486 int epoll_reactor::do_timerfd_create()
|
Chris@16
|
487 {
|
Chris@16
|
488 #if defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
489 # if defined(TFD_CLOEXEC)
|
Chris@16
|
490 int fd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC);
|
Chris@16
|
491 # else // defined(TFD_CLOEXEC)
|
Chris@16
|
492 int fd = -1;
|
Chris@16
|
493 errno = EINVAL;
|
Chris@16
|
494 # endif // defined(TFD_CLOEXEC)
|
Chris@16
|
495
|
Chris@16
|
496 if (fd == -1 && errno == EINVAL)
|
Chris@16
|
497 {
|
Chris@16
|
498 fd = timerfd_create(CLOCK_MONOTONIC, 0);
|
Chris@16
|
499 if (fd != -1)
|
Chris@16
|
500 ::fcntl(fd, F_SETFD, FD_CLOEXEC);
|
Chris@16
|
501 }
|
Chris@16
|
502
|
Chris@16
|
503 return fd;
|
Chris@16
|
504 #else // defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
505 return -1;
|
Chris@16
|
506 #endif // defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
507 }
|
Chris@16
|
508
|
Chris@16
|
509 epoll_reactor::descriptor_state* epoll_reactor::allocate_descriptor_state()
|
Chris@16
|
510 {
|
Chris@16
|
511 mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
|
Chris@16
|
512 return registered_descriptors_.alloc();
|
Chris@16
|
513 }
|
Chris@16
|
514
|
Chris@16
|
515 void epoll_reactor::free_descriptor_state(epoll_reactor::descriptor_state* s)
|
Chris@16
|
516 {
|
Chris@16
|
517 mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
|
Chris@16
|
518 registered_descriptors_.free(s);
|
Chris@16
|
519 }
|
Chris@16
|
520
|
Chris@16
|
521 void epoll_reactor::do_add_timer_queue(timer_queue_base& queue)
|
Chris@16
|
522 {
|
Chris@16
|
523 mutex::scoped_lock lock(mutex_);
|
Chris@16
|
524 timer_queues_.insert(&queue);
|
Chris@16
|
525 }
|
Chris@16
|
526
|
Chris@16
|
527 void epoll_reactor::do_remove_timer_queue(timer_queue_base& queue)
|
Chris@16
|
528 {
|
Chris@16
|
529 mutex::scoped_lock lock(mutex_);
|
Chris@16
|
530 timer_queues_.erase(&queue);
|
Chris@16
|
531 }
|
Chris@16
|
532
|
Chris@16
|
533 void epoll_reactor::update_timeout()
|
Chris@16
|
534 {
|
Chris@16
|
535 #if defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
536 if (timer_fd_ != -1)
|
Chris@16
|
537 {
|
Chris@16
|
538 itimerspec new_timeout;
|
Chris@16
|
539 itimerspec old_timeout;
|
Chris@16
|
540 int flags = get_timeout(new_timeout);
|
Chris@16
|
541 timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout);
|
Chris@16
|
542 return;
|
Chris@16
|
543 }
|
Chris@16
|
544 #endif // defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
545 interrupt();
|
Chris@16
|
546 }
|
Chris@16
|
547
|
Chris@16
|
548 int epoll_reactor::get_timeout()
|
Chris@16
|
549 {
|
Chris@16
|
550 // By default we will wait no longer than 5 minutes. This will ensure that
|
Chris@16
|
551 // any changes to the system clock are detected after no longer than this.
|
Chris@16
|
552 return timer_queues_.wait_duration_msec(5 * 60 * 1000);
|
Chris@16
|
553 }
|
Chris@16
|
554
|
Chris@16
|
555 #if defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
556 int epoll_reactor::get_timeout(itimerspec& ts)
|
Chris@16
|
557 {
|
Chris@16
|
558 ts.it_interval.tv_sec = 0;
|
Chris@16
|
559 ts.it_interval.tv_nsec = 0;
|
Chris@16
|
560
|
Chris@16
|
561 long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000);
|
Chris@16
|
562 ts.it_value.tv_sec = usec / 1000000;
|
Chris@16
|
563 ts.it_value.tv_nsec = usec ? (usec % 1000000) * 1000 : 1;
|
Chris@16
|
564
|
Chris@16
|
565 return usec ? 0 : TFD_TIMER_ABSTIME;
|
Chris@16
|
566 }
|
Chris@16
|
567 #endif // defined(BOOST_ASIO_HAS_TIMERFD)
|
Chris@16
|
568
|
Chris@16
|
569 struct epoll_reactor::perform_io_cleanup_on_block_exit
|
Chris@16
|
570 {
|
Chris@16
|
571 explicit perform_io_cleanup_on_block_exit(epoll_reactor* r)
|
Chris@16
|
572 : reactor_(r), first_op_(0)
|
Chris@16
|
573 {
|
Chris@16
|
574 }
|
Chris@16
|
575
|
Chris@16
|
576 ~perform_io_cleanup_on_block_exit()
|
Chris@16
|
577 {
|
Chris@16
|
578 if (first_op_)
|
Chris@16
|
579 {
|
Chris@16
|
580 // Post the remaining completed operations for invocation.
|
Chris@16
|
581 if (!ops_.empty())
|
Chris@16
|
582 reactor_->io_service_.post_deferred_completions(ops_);
|
Chris@16
|
583
|
Chris@16
|
584 // A user-initiated operation has completed, but there's no need to
|
Chris@16
|
585 // explicitly call work_finished() here. Instead, we'll take advantage of
|
Chris@16
|
586 // the fact that the task_io_service will call work_finished() once we
|
Chris@16
|
587 // return.
|
Chris@16
|
588 }
|
Chris@16
|
589 else
|
Chris@16
|
590 {
|
Chris@16
|
591 // No user-initiated operations have completed, so we need to compensate
|
Chris@16
|
592 // for the work_finished() call that the task_io_service will make once
|
Chris@16
|
593 // this operation returns.
|
Chris@16
|
594 reactor_->io_service_.work_started();
|
Chris@16
|
595 }
|
Chris@16
|
596 }
|
Chris@16
|
597
|
Chris@16
|
598 epoll_reactor* reactor_;
|
Chris@16
|
599 op_queue<operation> ops_;
|
Chris@16
|
600 operation* first_op_;
|
Chris@16
|
601 };
|
Chris@16
|
602
|
Chris@16
|
603 epoll_reactor::descriptor_state::descriptor_state()
|
Chris@16
|
604 : operation(&epoll_reactor::descriptor_state::do_complete)
|
Chris@16
|
605 {
|
Chris@16
|
606 }
|
Chris@16
|
607
|
Chris@16
|
608 operation* epoll_reactor::descriptor_state::perform_io(uint32_t events)
|
Chris@16
|
609 {
|
Chris@16
|
610 mutex_.lock();
|
Chris@16
|
611 perform_io_cleanup_on_block_exit io_cleanup(reactor_);
|
Chris@16
|
612 mutex::scoped_lock descriptor_lock(mutex_, mutex::scoped_lock::adopt_lock);
|
Chris@16
|
613
|
Chris@16
|
614 // Exception operations must be processed first to ensure that any
|
Chris@16
|
615 // out-of-band data is read before normal data.
|
Chris@16
|
616 static const int flag[max_ops] = { EPOLLIN, EPOLLOUT, EPOLLPRI };
|
Chris@16
|
617 for (int j = max_ops - 1; j >= 0; --j)
|
Chris@16
|
618 {
|
Chris@16
|
619 if (events & (flag[j] | EPOLLERR | EPOLLHUP))
|
Chris@16
|
620 {
|
Chris@16
|
621 while (reactor_op* op = op_queue_[j].front())
|
Chris@16
|
622 {
|
Chris@16
|
623 if (op->perform())
|
Chris@16
|
624 {
|
Chris@16
|
625 op_queue_[j].pop();
|
Chris@16
|
626 io_cleanup.ops_.push(op);
|
Chris@16
|
627 }
|
Chris@16
|
628 else
|
Chris@16
|
629 break;
|
Chris@16
|
630 }
|
Chris@16
|
631 }
|
Chris@16
|
632 }
|
Chris@16
|
633
|
Chris@16
|
634 // The first operation will be returned for completion now. The others will
|
Chris@16
|
635 // be posted for later by the io_cleanup object's destructor.
|
Chris@16
|
636 io_cleanup.first_op_ = io_cleanup.ops_.front();
|
Chris@16
|
637 io_cleanup.ops_.pop();
|
Chris@16
|
638 return io_cleanup.first_op_;
|
Chris@16
|
639 }
|
Chris@16
|
640
|
Chris@16
|
641 void epoll_reactor::descriptor_state::do_complete(
|
Chris@16
|
642 io_service_impl* owner, operation* base,
|
Chris@16
|
643 const boost::system::error_code& ec, std::size_t bytes_transferred)
|
Chris@16
|
644 {
|
Chris@16
|
645 if (owner)
|
Chris@16
|
646 {
|
Chris@16
|
647 descriptor_state* descriptor_data = static_cast<descriptor_state*>(base);
|
Chris@16
|
648 uint32_t events = static_cast<uint32_t>(bytes_transferred);
|
Chris@16
|
649 if (operation* op = descriptor_data->perform_io(events))
|
Chris@16
|
650 {
|
Chris@16
|
651 op->complete(*owner, ec, 0);
|
Chris@16
|
652 }
|
Chris@16
|
653 }
|
Chris@16
|
654 }
|
Chris@16
|
655
|
Chris@16
|
656 } // namespace detail
|
Chris@16
|
657 } // namespace asio
|
Chris@16
|
658 } // namespace boost
|
Chris@16
|
659
|
Chris@16
|
660 #include <boost/asio/detail/pop_options.hpp>
|
Chris@16
|
661
|
Chris@16
|
662 #endif // defined(BOOST_ASIO_HAS_EPOLL)
|
Chris@16
|
663
|
Chris@16
|
664 #endif // BOOST_ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP
|