Chris@16
|
1 //
|
Chris@16
|
2 // detail/impl/kqueue_reactor.ipp
|
Chris@16
|
3 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
Chris@16
|
4 //
|
Chris@101
|
5 // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com)
|
Chris@16
|
6 // Copyright (c) 2005 Stefan Arentz (stefan at soze dot com)
|
Chris@16
|
7 //
|
Chris@16
|
8 // Distributed under the Boost Software License, Version 1.0. (See accompanying
|
Chris@16
|
9 // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
Chris@16
|
10 //
|
Chris@16
|
11
|
Chris@16
|
12 #ifndef BOOST_ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP
|
Chris@16
|
13 #define BOOST_ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP
|
Chris@16
|
14
|
Chris@16
|
15 #if defined(_MSC_VER) && (_MSC_VER >= 1200)
|
Chris@16
|
16 # pragma once
|
Chris@16
|
17 #endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
|
Chris@16
|
18
|
Chris@16
|
19 #include <boost/asio/detail/config.hpp>
|
Chris@16
|
20
|
Chris@16
|
21 #if defined(BOOST_ASIO_HAS_KQUEUE)
|
Chris@16
|
22
|
Chris@16
|
23 #include <boost/asio/detail/kqueue_reactor.hpp>
|
Chris@16
|
24 #include <boost/asio/detail/throw_error.hpp>
|
Chris@16
|
25 #include <boost/asio/error.hpp>
|
Chris@16
|
26
|
Chris@16
|
27 #include <boost/asio/detail/push_options.hpp>
|
Chris@16
|
28
|
Chris@16
|
29 #if defined(__NetBSD__)
|
Chris@16
|
30 # define BOOST_ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \
|
Chris@16
|
31 EV_SET(ev, ident, filt, flags, fflags, data, \
|
Chris@16
|
32 reinterpret_cast<intptr_t>(static_cast<void*>(udata)))
|
Chris@16
|
33 #else
|
Chris@16
|
34 # define BOOST_ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \
|
Chris@16
|
35 EV_SET(ev, ident, filt, flags, fflags, data, udata)
|
Chris@16
|
36 #endif
|
Chris@16
|
37
|
Chris@16
|
38 namespace boost {
|
Chris@16
|
39 namespace asio {
|
Chris@16
|
40 namespace detail {
|
Chris@16
|
41
|
Chris@16
|
42 kqueue_reactor::kqueue_reactor(boost::asio::io_service& io_service)
|
Chris@16
|
43 : boost::asio::detail::service_base<kqueue_reactor>(io_service),
|
Chris@16
|
44 io_service_(use_service<io_service_impl>(io_service)),
|
Chris@16
|
45 mutex_(),
|
Chris@16
|
46 kqueue_fd_(do_kqueue_create()),
|
Chris@16
|
47 interrupter_(),
|
Chris@16
|
48 shutdown_(false)
|
Chris@16
|
49 {
|
Chris@101
|
50 struct kevent events[1];
|
Chris@101
|
51 BOOST_ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(),
|
Chris@101
|
52 EVFILT_READ, EV_ADD, 0, 0, &interrupter_);
|
Chris@101
|
53 if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1)
|
Chris@101
|
54 {
|
Chris@101
|
55 boost::system::error_code error(errno,
|
Chris@101
|
56 boost::asio::error::get_system_category());
|
Chris@101
|
57 boost::asio::detail::throw_error(error);
|
Chris@101
|
58 }
|
Chris@16
|
59 }
|
Chris@16
|
60
|
Chris@16
|
61 kqueue_reactor::~kqueue_reactor()
|
Chris@16
|
62 {
|
Chris@16
|
63 close(kqueue_fd_);
|
Chris@16
|
64 }
|
Chris@16
|
65
|
Chris@16
|
66 void kqueue_reactor::shutdown_service()
|
Chris@16
|
67 {
|
Chris@16
|
68 mutex::scoped_lock lock(mutex_);
|
Chris@16
|
69 shutdown_ = true;
|
Chris@16
|
70 lock.unlock();
|
Chris@16
|
71
|
Chris@16
|
72 op_queue<operation> ops;
|
Chris@16
|
73
|
Chris@16
|
74 while (descriptor_state* state = registered_descriptors_.first())
|
Chris@16
|
75 {
|
Chris@16
|
76 for (int i = 0; i < max_ops; ++i)
|
Chris@16
|
77 ops.push(state->op_queue_[i]);
|
Chris@16
|
78 state->shutdown_ = true;
|
Chris@16
|
79 registered_descriptors_.free(state);
|
Chris@16
|
80 }
|
Chris@16
|
81
|
Chris@16
|
82 timer_queues_.get_all_timers(ops);
|
Chris@16
|
83
|
Chris@16
|
84 io_service_.abandon_operations(ops);
|
Chris@16
|
85 }
|
Chris@16
|
86
|
Chris@16
|
87 void kqueue_reactor::fork_service(boost::asio::io_service::fork_event fork_ev)
|
Chris@16
|
88 {
|
Chris@16
|
89 if (fork_ev == boost::asio::io_service::fork_child)
|
Chris@16
|
90 {
|
Chris@16
|
91 // The kqueue descriptor is automatically closed in the child.
|
Chris@16
|
92 kqueue_fd_ = -1;
|
Chris@16
|
93 kqueue_fd_ = do_kqueue_create();
|
Chris@16
|
94
|
Chris@16
|
95 interrupter_.recreate();
|
Chris@16
|
96
|
Chris@101
|
97 struct kevent events[2];
|
Chris@101
|
98 BOOST_ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(),
|
Chris@101
|
99 EVFILT_READ, EV_ADD, 0, 0, &interrupter_);
|
Chris@101
|
100 if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1)
|
Chris@101
|
101 {
|
Chris@101
|
102 boost::system::error_code ec(errno,
|
Chris@101
|
103 boost::asio::error::get_system_category());
|
Chris@101
|
104 boost::asio::detail::throw_error(ec, "kqueue interrupter registration");
|
Chris@101
|
105 }
|
Chris@101
|
106
|
Chris@16
|
107 // Re-register all descriptors with kqueue.
|
Chris@16
|
108 mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
|
Chris@16
|
109 for (descriptor_state* state = registered_descriptors_.first();
|
Chris@16
|
110 state != 0; state = state->next_)
|
Chris@16
|
111 {
|
Chris@101
|
112 if (state->num_kevents_ > 0)
|
Chris@101
|
113 {
|
Chris@101
|
114 BOOST_ASIO_KQUEUE_EV_SET(&events[0], state->descriptor_,
|
Chris@16
|
115 EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, state);
|
Chris@101
|
116 BOOST_ASIO_KQUEUE_EV_SET(&events[1], state->descriptor_,
|
Chris@16
|
117 EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, state);
|
Chris@101
|
118 if (::kevent(kqueue_fd_, events, state->num_kevents_, 0, 0, 0) == -1)
|
Chris@101
|
119 {
|
Chris@101
|
120 boost::system::error_code ec(errno,
|
Chris@101
|
121 boost::asio::error::get_system_category());
|
Chris@101
|
122 boost::asio::detail::throw_error(ec, "kqueue re-registration");
|
Chris@101
|
123 }
|
Chris@16
|
124 }
|
Chris@16
|
125 }
|
Chris@16
|
126 }
|
Chris@16
|
127 }
|
Chris@16
|
128
|
Chris@16
|
129 void kqueue_reactor::init_task()
|
Chris@16
|
130 {
|
Chris@16
|
131 io_service_.init_task();
|
Chris@16
|
132 }
|
Chris@16
|
133
|
Chris@16
|
134 int kqueue_reactor::register_descriptor(socket_type descriptor,
|
Chris@16
|
135 kqueue_reactor::per_descriptor_data& descriptor_data)
|
Chris@16
|
136 {
|
Chris@16
|
137 descriptor_data = allocate_descriptor_state();
|
Chris@16
|
138
|
Chris@16
|
139 mutex::scoped_lock lock(descriptor_data->mutex_);
|
Chris@16
|
140
|
Chris@16
|
141 descriptor_data->descriptor_ = descriptor;
|
Chris@101
|
142 descriptor_data->num_kevents_ = 0;
|
Chris@16
|
143 descriptor_data->shutdown_ = false;
|
Chris@16
|
144
|
Chris@16
|
145 return 0;
|
Chris@16
|
146 }
|
Chris@16
|
147
|
Chris@16
|
148 int kqueue_reactor::register_internal_descriptor(
|
Chris@16
|
149 int op_type, socket_type descriptor,
|
Chris@16
|
150 kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op)
|
Chris@16
|
151 {
|
Chris@16
|
152 descriptor_data = allocate_descriptor_state();
|
Chris@16
|
153
|
Chris@16
|
154 mutex::scoped_lock lock(descriptor_data->mutex_);
|
Chris@16
|
155
|
Chris@16
|
156 descriptor_data->descriptor_ = descriptor;
|
Chris@101
|
157 descriptor_data->num_kevents_ = 1;
|
Chris@16
|
158 descriptor_data->shutdown_ = false;
|
Chris@16
|
159 descriptor_data->op_queue_[op_type].push(op);
|
Chris@16
|
160
|
Chris@101
|
161 struct kevent events[1];
|
Chris@101
|
162 BOOST_ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ,
|
Chris@101
|
163 EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
|
Chris@101
|
164 if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1)
|
Chris@101
|
165 return errno;
|
Chris@16
|
166
|
Chris@16
|
167 return 0;
|
Chris@16
|
168 }
|
Chris@16
|
169
|
Chris@16
|
170 void kqueue_reactor::move_descriptor(socket_type,
|
Chris@16
|
171 kqueue_reactor::per_descriptor_data& target_descriptor_data,
|
Chris@16
|
172 kqueue_reactor::per_descriptor_data& source_descriptor_data)
|
Chris@16
|
173 {
|
Chris@16
|
174 target_descriptor_data = source_descriptor_data;
|
Chris@16
|
175 source_descriptor_data = 0;
|
Chris@16
|
176 }
|
Chris@16
|
177
|
Chris@16
|
178 void kqueue_reactor::start_op(int op_type, socket_type descriptor,
|
Chris@16
|
179 kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op,
|
Chris@16
|
180 bool is_continuation, bool allow_speculative)
|
Chris@16
|
181 {
|
Chris@16
|
182 if (!descriptor_data)
|
Chris@16
|
183 {
|
Chris@16
|
184 op->ec_ = boost::asio::error::bad_descriptor;
|
Chris@16
|
185 post_immediate_completion(op, is_continuation);
|
Chris@16
|
186 return;
|
Chris@16
|
187 }
|
Chris@16
|
188
|
Chris@16
|
189 mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
|
Chris@16
|
190
|
Chris@16
|
191 if (descriptor_data->shutdown_)
|
Chris@16
|
192 {
|
Chris@16
|
193 post_immediate_completion(op, is_continuation);
|
Chris@16
|
194 return;
|
Chris@16
|
195 }
|
Chris@16
|
196
|
Chris@101
|
197 if (descriptor_data->op_queue_[op_type].empty())
|
Chris@16
|
198 {
|
Chris@101
|
199 static const int num_kevents[max_ops] = { 1, 2, 1 };
|
Chris@101
|
200
|
Chris@101
|
201 if (allow_speculative
|
Chris@101
|
202 && (op_type != read_op
|
Chris@101
|
203 || descriptor_data->op_queue_[except_op].empty()))
|
Chris@16
|
204 {
|
Chris@101
|
205 if (op->perform())
|
Chris@16
|
206 {
|
Chris@101
|
207 descriptor_lock.unlock();
|
Chris@101
|
208 io_service_.post_immediate_completion(op, is_continuation);
|
Chris@101
|
209 return;
|
Chris@101
|
210 }
|
Chris@101
|
211
|
Chris@101
|
212 if (descriptor_data->num_kevents_ < num_kevents[op_type])
|
Chris@101
|
213 {
|
Chris@101
|
214 struct kevent events[2];
|
Chris@101
|
215 BOOST_ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ,
|
Chris@101
|
216 EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
|
Chris@101
|
217 BOOST_ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE,
|
Chris@101
|
218 EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
|
Chris@101
|
219 if (::kevent(kqueue_fd_, events, num_kevents[op_type], 0, 0, 0) != -1)
|
Chris@16
|
220 {
|
Chris@101
|
221 descriptor_data->num_kevents_ = num_kevents[op_type];
|
Chris@101
|
222 }
|
Chris@101
|
223 else
|
Chris@101
|
224 {
|
Chris@101
|
225 op->ec_ = boost::system::error_code(errno,
|
Chris@101
|
226 boost::asio::error::get_system_category());
|
Chris@16
|
227 io_service_.post_immediate_completion(op, is_continuation);
|
Chris@16
|
228 return;
|
Chris@16
|
229 }
|
Chris@16
|
230 }
|
Chris@16
|
231 }
|
Chris@101
|
232 else
|
Chris@101
|
233 {
|
Chris@101
|
234 if (descriptor_data->num_kevents_ < num_kevents[op_type])
|
Chris@101
|
235 descriptor_data->num_kevents_ = num_kevents[op_type];
|
Chris@101
|
236
|
Chris@101
|
237 struct kevent events[2];
|
Chris@101
|
238 BOOST_ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ,
|
Chris@101
|
239 EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
|
Chris@101
|
240 BOOST_ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE,
|
Chris@101
|
241 EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
|
Chris@101
|
242 ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0);
|
Chris@101
|
243 }
|
Chris@16
|
244 }
|
Chris@16
|
245
|
Chris@16
|
246 descriptor_data->op_queue_[op_type].push(op);
|
Chris@16
|
247 io_service_.work_started();
|
Chris@16
|
248 }
|
Chris@16
|
249
|
Chris@16
|
250 void kqueue_reactor::cancel_ops(socket_type,
|
Chris@16
|
251 kqueue_reactor::per_descriptor_data& descriptor_data)
|
Chris@16
|
252 {
|
Chris@16
|
253 if (!descriptor_data)
|
Chris@16
|
254 return;
|
Chris@16
|
255
|
Chris@16
|
256 mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
|
Chris@16
|
257
|
Chris@16
|
258 op_queue<operation> ops;
|
Chris@16
|
259 for (int i = 0; i < max_ops; ++i)
|
Chris@16
|
260 {
|
Chris@16
|
261 while (reactor_op* op = descriptor_data->op_queue_[i].front())
|
Chris@16
|
262 {
|
Chris@16
|
263 op->ec_ = boost::asio::error::operation_aborted;
|
Chris@16
|
264 descriptor_data->op_queue_[i].pop();
|
Chris@16
|
265 ops.push(op);
|
Chris@16
|
266 }
|
Chris@16
|
267 }
|
Chris@16
|
268
|
Chris@16
|
269 descriptor_lock.unlock();
|
Chris@16
|
270
|
Chris@16
|
271 io_service_.post_deferred_completions(ops);
|
Chris@16
|
272 }
|
Chris@16
|
273
|
Chris@16
|
274 void kqueue_reactor::deregister_descriptor(socket_type descriptor,
|
Chris@16
|
275 kqueue_reactor::per_descriptor_data& descriptor_data, bool closing)
|
Chris@16
|
276 {
|
Chris@16
|
277 if (!descriptor_data)
|
Chris@16
|
278 return;
|
Chris@16
|
279
|
Chris@16
|
280 mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
|
Chris@16
|
281
|
Chris@16
|
282 if (!descriptor_data->shutdown_)
|
Chris@16
|
283 {
|
Chris@16
|
284 if (closing)
|
Chris@16
|
285 {
|
Chris@16
|
286 // The descriptor will be automatically removed from the kqueue when it
|
Chris@16
|
287 // is closed.
|
Chris@16
|
288 }
|
Chris@16
|
289 else
|
Chris@16
|
290 {
|
Chris@16
|
291 struct kevent events[2];
|
Chris@16
|
292 BOOST_ASIO_KQUEUE_EV_SET(&events[0], descriptor,
|
Chris@16
|
293 EVFILT_READ, EV_DELETE, 0, 0, 0);
|
Chris@16
|
294 BOOST_ASIO_KQUEUE_EV_SET(&events[1], descriptor,
|
Chris@16
|
295 EVFILT_WRITE, EV_DELETE, 0, 0, 0);
|
Chris@101
|
296 ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0);
|
Chris@16
|
297 }
|
Chris@16
|
298
|
Chris@16
|
299 op_queue<operation> ops;
|
Chris@16
|
300 for (int i = 0; i < max_ops; ++i)
|
Chris@16
|
301 {
|
Chris@16
|
302 while (reactor_op* op = descriptor_data->op_queue_[i].front())
|
Chris@16
|
303 {
|
Chris@16
|
304 op->ec_ = boost::asio::error::operation_aborted;
|
Chris@16
|
305 descriptor_data->op_queue_[i].pop();
|
Chris@16
|
306 ops.push(op);
|
Chris@16
|
307 }
|
Chris@16
|
308 }
|
Chris@16
|
309
|
Chris@16
|
310 descriptor_data->descriptor_ = -1;
|
Chris@16
|
311 descriptor_data->shutdown_ = true;
|
Chris@16
|
312
|
Chris@16
|
313 descriptor_lock.unlock();
|
Chris@16
|
314
|
Chris@16
|
315 free_descriptor_state(descriptor_data);
|
Chris@16
|
316 descriptor_data = 0;
|
Chris@16
|
317
|
Chris@16
|
318 io_service_.post_deferred_completions(ops);
|
Chris@16
|
319 }
|
Chris@16
|
320 }
|
Chris@16
|
321
|
Chris@16
|
322 void kqueue_reactor::deregister_internal_descriptor(socket_type descriptor,
|
Chris@16
|
323 kqueue_reactor::per_descriptor_data& descriptor_data)
|
Chris@16
|
324 {
|
Chris@16
|
325 if (!descriptor_data)
|
Chris@16
|
326 return;
|
Chris@16
|
327
|
Chris@16
|
328 mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
|
Chris@16
|
329
|
Chris@16
|
330 if (!descriptor_data->shutdown_)
|
Chris@16
|
331 {
|
Chris@16
|
332 struct kevent events[2];
|
Chris@16
|
333 BOOST_ASIO_KQUEUE_EV_SET(&events[0], descriptor,
|
Chris@16
|
334 EVFILT_READ, EV_DELETE, 0, 0, 0);
|
Chris@16
|
335 BOOST_ASIO_KQUEUE_EV_SET(&events[1], descriptor,
|
Chris@16
|
336 EVFILT_WRITE, EV_DELETE, 0, 0, 0);
|
Chris@101
|
337 ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0);
|
Chris@16
|
338
|
Chris@16
|
339 op_queue<operation> ops;
|
Chris@16
|
340 for (int i = 0; i < max_ops; ++i)
|
Chris@16
|
341 ops.push(descriptor_data->op_queue_[i]);
|
Chris@16
|
342
|
Chris@16
|
343 descriptor_data->descriptor_ = -1;
|
Chris@16
|
344 descriptor_data->shutdown_ = true;
|
Chris@16
|
345
|
Chris@16
|
346 descriptor_lock.unlock();
|
Chris@16
|
347
|
Chris@16
|
348 free_descriptor_state(descriptor_data);
|
Chris@16
|
349 descriptor_data = 0;
|
Chris@16
|
350 }
|
Chris@16
|
351 }
|
Chris@16
|
352
|
Chris@16
|
353 void kqueue_reactor::run(bool block, op_queue<operation>& ops)
|
Chris@16
|
354 {
|
Chris@16
|
355 mutex::scoped_lock lock(mutex_);
|
Chris@16
|
356
|
Chris@16
|
357 // Determine how long to block while waiting for events.
|
Chris@16
|
358 timespec timeout_buf = { 0, 0 };
|
Chris@16
|
359 timespec* timeout = block ? get_timeout(timeout_buf) : &timeout_buf;
|
Chris@16
|
360
|
Chris@16
|
361 lock.unlock();
|
Chris@16
|
362
|
Chris@16
|
363 // Block on the kqueue descriptor.
|
Chris@16
|
364 struct kevent events[128];
|
Chris@16
|
365 int num_events = kevent(kqueue_fd_, 0, 0, events, 128, timeout);
|
Chris@16
|
366
|
Chris@16
|
367 // Dispatch the waiting events.
|
Chris@16
|
368 for (int i = 0; i < num_events; ++i)
|
Chris@16
|
369 {
|
Chris@16
|
370 void* ptr = reinterpret_cast<void*>(events[i].udata);
|
Chris@16
|
371 if (ptr == &interrupter_)
|
Chris@16
|
372 {
|
Chris@101
|
373 interrupter_.reset();
|
Chris@16
|
374 }
|
Chris@16
|
375 else
|
Chris@16
|
376 {
|
Chris@16
|
377 descriptor_state* descriptor_data = static_cast<descriptor_state*>(ptr);
|
Chris@16
|
378 mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
|
Chris@16
|
379
|
Chris@101
|
380 if (events[i].filter == EVFILT_WRITE
|
Chris@101
|
381 && descriptor_data->num_kevents_ == 2
|
Chris@101
|
382 && descriptor_data->op_queue_[write_op].empty())
|
Chris@101
|
383 {
|
Chris@101
|
384 // Some descriptor types, like serial ports, don't seem to support
|
Chris@101
|
385 // EV_CLEAR with EVFILT_WRITE. Since we have no pending write
|
Chris@101
|
386 // operations we'll remove the EVFILT_WRITE registration here so that
|
Chris@101
|
387 // we don't end up in a tight spin.
|
Chris@101
|
388 struct kevent delete_events[1];
|
Chris@101
|
389 BOOST_ASIO_KQUEUE_EV_SET(&delete_events[0],
|
Chris@101
|
390 descriptor_data->descriptor_, EVFILT_WRITE, EV_DELETE, 0, 0, 0);
|
Chris@101
|
391 ::kevent(kqueue_fd_, delete_events, 1, 0, 0, 0);
|
Chris@101
|
392 descriptor_data->num_kevents_ = 1;
|
Chris@101
|
393 }
|
Chris@101
|
394
|
Chris@16
|
395 // Exception operations must be processed first to ensure that any
|
Chris@16
|
396 // out-of-band data is read before normal data.
|
Chris@16
|
397 #if defined(__NetBSD__)
|
Chris@16
|
398 static const unsigned int filter[max_ops] =
|
Chris@16
|
399 #else
|
Chris@16
|
400 static const int filter[max_ops] =
|
Chris@16
|
401 #endif
|
Chris@16
|
402 { EVFILT_READ, EVFILT_WRITE, EVFILT_READ };
|
Chris@16
|
403 for (int j = max_ops - 1; j >= 0; --j)
|
Chris@16
|
404 {
|
Chris@16
|
405 if (events[i].filter == filter[j])
|
Chris@16
|
406 {
|
Chris@16
|
407 if (j != except_op || events[i].flags & EV_OOBAND)
|
Chris@16
|
408 {
|
Chris@16
|
409 while (reactor_op* op = descriptor_data->op_queue_[j].front())
|
Chris@16
|
410 {
|
Chris@16
|
411 if (events[i].flags & EV_ERROR)
|
Chris@16
|
412 {
|
Chris@16
|
413 op->ec_ = boost::system::error_code(
|
Chris@16
|
414 static_cast<int>(events[i].data),
|
Chris@16
|
415 boost::asio::error::get_system_category());
|
Chris@16
|
416 descriptor_data->op_queue_[j].pop();
|
Chris@16
|
417 ops.push(op);
|
Chris@16
|
418 }
|
Chris@16
|
419 if (op->perform())
|
Chris@16
|
420 {
|
Chris@16
|
421 descriptor_data->op_queue_[j].pop();
|
Chris@16
|
422 ops.push(op);
|
Chris@16
|
423 }
|
Chris@16
|
424 else
|
Chris@16
|
425 break;
|
Chris@16
|
426 }
|
Chris@16
|
427 }
|
Chris@16
|
428 }
|
Chris@16
|
429 }
|
Chris@16
|
430 }
|
Chris@16
|
431 }
|
Chris@16
|
432
|
Chris@16
|
433 lock.lock();
|
Chris@16
|
434 timer_queues_.get_ready_timers(ops);
|
Chris@16
|
435 }
|
Chris@16
|
436
|
Chris@16
|
437 void kqueue_reactor::interrupt()
|
Chris@16
|
438 {
|
Chris@101
|
439 interrupter_.interrupt();
|
Chris@16
|
440 }
|
Chris@16
|
441
|
Chris@16
|
442 int kqueue_reactor::do_kqueue_create()
|
Chris@16
|
443 {
|
Chris@16
|
444 int fd = ::kqueue();
|
Chris@16
|
445 if (fd == -1)
|
Chris@16
|
446 {
|
Chris@16
|
447 boost::system::error_code ec(errno,
|
Chris@16
|
448 boost::asio::error::get_system_category());
|
Chris@16
|
449 boost::asio::detail::throw_error(ec, "kqueue");
|
Chris@16
|
450 }
|
Chris@16
|
451 return fd;
|
Chris@16
|
452 }
|
Chris@16
|
453
|
Chris@16
|
454 kqueue_reactor::descriptor_state* kqueue_reactor::allocate_descriptor_state()
|
Chris@16
|
455 {
|
Chris@16
|
456 mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
|
Chris@16
|
457 return registered_descriptors_.alloc();
|
Chris@16
|
458 }
|
Chris@16
|
459
|
Chris@16
|
460 void kqueue_reactor::free_descriptor_state(kqueue_reactor::descriptor_state* s)
|
Chris@16
|
461 {
|
Chris@16
|
462 mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
|
Chris@16
|
463 registered_descriptors_.free(s);
|
Chris@16
|
464 }
|
Chris@16
|
465
|
Chris@16
|
466 void kqueue_reactor::do_add_timer_queue(timer_queue_base& queue)
|
Chris@16
|
467 {
|
Chris@16
|
468 mutex::scoped_lock lock(mutex_);
|
Chris@16
|
469 timer_queues_.insert(&queue);
|
Chris@16
|
470 }
|
Chris@16
|
471
|
Chris@16
|
472 void kqueue_reactor::do_remove_timer_queue(timer_queue_base& queue)
|
Chris@16
|
473 {
|
Chris@16
|
474 mutex::scoped_lock lock(mutex_);
|
Chris@16
|
475 timer_queues_.erase(&queue);
|
Chris@16
|
476 }
|
Chris@16
|
477
|
Chris@16
|
478 timespec* kqueue_reactor::get_timeout(timespec& ts)
|
Chris@16
|
479 {
|
Chris@16
|
480 // By default we will wait no longer than 5 minutes. This will ensure that
|
Chris@16
|
481 // any changes to the system clock are detected after no longer than this.
|
Chris@16
|
482 long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000);
|
Chris@16
|
483 ts.tv_sec = usec / 1000000;
|
Chris@16
|
484 ts.tv_nsec = (usec % 1000000) * 1000;
|
Chris@16
|
485 return &ts;
|
Chris@16
|
486 }
|
Chris@16
|
487
|
Chris@16
|
488 } // namespace detail
|
Chris@16
|
489 } // namespace asio
|
Chris@16
|
490 } // namespace boost
|
Chris@16
|
491
|
Chris@16
|
492 #undef BOOST_ASIO_KQUEUE_EV_SET
|
Chris@16
|
493
|
Chris@16
|
494 #include <boost/asio/detail/pop_options.hpp>
|
Chris@16
|
495
|
Chris@16
|
496 #endif // defined(BOOST_ASIO_HAS_KQUEUE)
|
Chris@16
|
497
|
Chris@16
|
498 #endif // BOOST_ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP
|