1
// libTorrent - BitTorrent library
2
// Copyright (C) 2005-2007, Jari Sundell
4
// This program is free software; you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation; either version 2 of the License, or
7
// (at your option) any later version.
9
// This program is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
// GNU General Public License for more details.
14
// You should have received a copy of the GNU General Public License
15
// along with this program; if not, write to the Free Software
16
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
// In addition, as a special exception, the copyright holders give
19
// permission to link the code of portions of this program with the
20
// OpenSSL library under certain conditions as described in each
21
// individual source file, and distribute linked combinations
24
// You must obey the GNU General Public License in all respects for
25
// all of the code used other than OpenSSL. If you modify file(s)
26
// with this exception, you may extend this exception to your version
27
// of the file(s), but you are not obligated to do so. If you do not
28
// wish to do so, delete this exception statement from your version.
29
// If you delete this exception statement from all source files in the
30
// program, then also delete it here.
32
// Contact: Jari Sundell <jaris@ifi.uio.no>
35
// 3185 Skoppum, NORWAY
43
#include <rak/error_number.h>
44
#include <rak/functional.h>
45
#include <torrent/exceptions.h>
46
#include <torrent/event.h>
48
#include "poll_kqueue.h"
49
#include "thread_base.h"
52
#include <sys/types.h>
53
#include <sys/event.h>
54
#include <sys/select.h>
65
PollKQueue::event_mask(Event* e) {
66
assert(e->file_descriptor() != -1);
68
Table::value_type entry = m_table[e->file_descriptor()];
69
return entry.second != e ? 0 : entry.first;
73
PollKQueue::set_event_mask(Event* e, uint32_t m) {
74
assert(e->file_descriptor() != -1);
76
m_table[e->file_descriptor()] = Table::value_type(m, e);
80
PollKQueue::flush_events() {
81
timespec timeout = { 0, 0 };
83
int nfds = kevent(m_fd, m_changes, m_changedEvents, m_events + m_waitingEvents, m_maxEvents - m_waitingEvents, &timeout);
85
throw internal_error("PollKQueue::flush_events() error: " + std::string(rak::error_number::current().c_str()));
88
m_waitingEvents += nfds;
92
PollKQueue::modify(Event* event, unsigned short op, short mask) {
93
// Flush the changed filters to the kernel if the buffer is full.
94
if (m_changedEvents == m_maxEvents) {
95
if (kevent(m_fd, m_changes, m_changedEvents, NULL, 0, NULL) == -1)
96
throw internal_error("PollKQueue::modify() error: " + std::string(rak::error_number::current().c_str()));
101
struct kevent* itr = m_changes + (m_changedEvents++);
103
assert(event == m_table[event->file_descriptor()].second);
104
EV_SET(itr, event->file_descriptor(), mask, op, 0, 0, NULL);
108
PollKQueue::create(int maxOpenSockets) {
114
return new PollKQueue(fd, 1024, maxOpenSockets);
117
PollKQueue::PollKQueue(int fd, int maxEvents, int maxOpenSockets) :
119
m_maxEvents(maxEvents),
124
m_events = new struct kevent[m_maxEvents];
125
m_changes = new struct kevent[maxOpenSockets];
127
m_table.resize(maxOpenSockets);
130
PollKQueue::~PollKQueue() {
139
PollKQueue::poll(int msec) {
140
#if KQUEUE_SOCKET_ONLY
141
if (m_stdinEvent != NULL) {
142
// Flush all changes to the kqueue poll before we start select
143
// polling, so that they get included.
144
if (m_changedEvents != 0)
147
if (poll_select(msec) == -1)
150
// The timeout was already handled in select().
155
timespec timeout = { msec / 1000, (msec % 1000) * 1000000 };
157
int nfds = kevent(m_fd, m_changes, m_changedEvents, m_events + m_waitingEvents, m_maxEvents - m_waitingEvents, &timeout);
159
// Clear the changed events even on fail as we might have received a
160
// signal or similar, and the changed events have already been
163
// There's a chance a bad changed event could make kevent return -1,
164
// but it won't as long as there is room enough in m_events.
170
m_waitingEvents += nfds;
175
#if KQUEUE_SOCKET_ONLY
177
PollKQueue::poll_select(int msec) {
178
if (m_waitingEvents >= m_maxEvents)
181
timeval selectTimeout = { msec / 1000, (msec % 1000) * 1000 };
183
// If m_fd isn't the first FD opened by the client and has
184
// a low number, using ::poll() here would perform better.
186
// This kinda assumes fd_set's internal type is int.
187
int readBuffer[m_fd + 1];
188
fd_set* readSet = (fd_set*)&readBuffer;
190
std::memset(readBuffer, 0, m_fd + 1);
192
FD_SET(m_fd, readSet);
194
int nfds = select(m_fd + 1, readSet, NULL, NULL, &selectTimeout);
199
if (FD_ISSET(0, readSet)) {
200
m_events[m_waitingEvents].ident = 0;
201
m_events[m_waitingEvents].filter = EVFILT_READ;
202
m_events[m_waitingEvents].flags = 0;
211
PollKQueue::perform() {
212
for (struct kevent *itr = m_events, *last = m_events + m_waitingEvents; itr != last; ++itr) {
213
if (itr->ident < 0 || itr->ident >= m_table.size())
216
if ((flags() & flag_waive_global_lock) && ThreadBase::global_queue_size() != 0)
217
ThreadBase::waive_global_lock();
219
Table::iterator evItr = m_table.begin() + itr->ident;
221
if ((itr->flags & EV_ERROR) && evItr->second != NULL) {
222
if (evItr->first & flag_error)
223
evItr->second->event_error();
227
// Also check current mask.
229
if (itr->filter == EVFILT_READ && evItr->second != NULL && evItr->first & flag_read)
230
evItr->second->event_read();
232
if (itr->filter == EVFILT_WRITE && evItr->second != NULL && evItr->first & flag_write)
233
evItr->second->event_write();
240
PollKQueue::open_max() const {
241
return m_table.size();
245
PollKQueue::open(Event* event) {
246
if (event_mask(event) != 0)
247
throw internal_error("PollKQueue::open(...) called but the file descriptor is active");
251
PollKQueue::close(Event* event) {
252
#if KQUEUE_SOCKET_ONLY
253
if (event->file_descriptor() == 0) {
259
if (event_mask(event) != 0)
260
throw internal_error("PollKQueue::close(...) called but the file descriptor is active");
262
m_table[event->file_descriptor()] = Table::value_type();
265
Shouldn't be needed anymore.
266
for (struct kevent *itr = m_events, *last = m_events + m_waitingEvents; itr != last; ++itr)
267
if (itr->udata == event)
270
m_changedEvents = std::remove_if(m_changes, m_changes + m_changedEvents, rak::equal(event, rak::mem_ref(&kevent::udata))) - m_changes;
275
PollKQueue::closed(Event* event) {
276
#if KQUEUE_SOCKET_ONLY
277
if (event->file_descriptor() == 0) {
283
// Kernel removes closed FDs automatically, so just clear the mask
284
// and remove it from pending calls. Don't touch if the FD was
285
// re-used before we received the close notification.
286
if (m_table[event->file_descriptor()].second == event)
287
m_table[event->file_descriptor()] = Table::value_type();
290
for (struct kevent *itr = m_events, *last = m_events + m_waitingEvents; itr != last; ++itr) {
291
if (itr->udata == event)
295
m_changedEvents = std::remove_if(m_changes, m_changes + m_changedEvents, rak::equal(event, rak::mem_ref(&kevent::udata))) - m_changes;
299
// Use custom defines for EPOLL* to make the below code compile with
302
PollKQueue::in_read(Event* event) {
303
return event_mask(event) & flag_read;
307
PollKQueue::in_write(Event* event) {
308
return event_mask(event) & flag_write;
312
PollKQueue::in_error(Event* event) {
313
return event_mask(event) & flag_error;
317
PollKQueue::insert_read(Event* event) {
318
if (event_mask(event) & flag_read)
321
set_event_mask(event, event_mask(event) | flag_read);
323
#if KQUEUE_SOCKET_ONLY
324
if (event->file_descriptor() == 0) {
325
m_stdinEvent = event;
330
modify(event, EV_ADD, EVFILT_READ);
334
PollKQueue::insert_write(Event* event) {
335
if (event_mask(event) & flag_write)
338
set_event_mask(event, event_mask(event) | flag_write);
339
modify(event, EV_ADD, EVFILT_WRITE);
343
PollKQueue::insert_error(Event* event) {
347
PollKQueue::remove_read(Event* event) {
348
if (!(event_mask(event) & flag_read))
351
set_event_mask(event, event_mask(event) & ~flag_read);
353
#if KQUEUE_SOCKET_ONLY
354
if (event->file_descriptor() == 0) {
360
modify(event, EV_DELETE, EVFILT_READ);
364
PollKQueue::remove_write(Event* event) {
365
if (!(event_mask(event) & flag_write))
368
set_event_mask(event, event_mask(event) & ~flag_write);
369
modify(event, EV_DELETE, EVFILT_WRITE);
373
PollKQueue::remove_error(Event* event) {
379
PollKQueue::create(__UNUSED int maxOpenSockets) {
383
PollKQueue::~PollKQueue() {
387
PollKQueue::poll(__UNUSED int msec) {
388
throw internal_error("An PollKQueue function was called, but it is disabled.");
392
PollKQueue::perform() {
393
throw internal_error("An PollKQueue function was called, but it is disabled.");
397
PollKQueue::open_max() const {
398
throw internal_error("An PollKQueue function was called, but it is disabled.");
402
PollKQueue::open(__UNUSED torrent::Event* event) {
406
PollKQueue::close(__UNUSED torrent::Event* event) {
410
PollKQueue::closed(__UNUSED torrent::Event* event) {
414
PollKQueue::in_read(__UNUSED torrent::Event* event) {
415
throw internal_error("An PollKQueue function was called, but it is disabled.");
419
PollKQueue::in_write(__UNUSED torrent::Event* event) {
420
throw internal_error("An PollKQueue function was called, but it is disabled.");
424
PollKQueue::in_error(__UNUSED torrent::Event* event) {
425
throw internal_error("An PollKQueue function was called, but it is disabled.");
429
PollKQueue::insert_read(__UNUSED torrent::Event* event) {
433
PollKQueue::insert_write(__UNUSED torrent::Event* event) {
437
PollKQueue::insert_error(__UNUSED torrent::Event* event) {
441
PollKQueue::remove_read(__UNUSED torrent::Event* event) {
445
PollKQueue::remove_write(__UNUSED torrent::Event* event) {
449
PollKQueue::remove_error(__UNUSED torrent::Event* event) {
452
PollKQueue::PollKQueue(__UNUSED int fd, __UNUSED int maxEvents, __UNUSED int maxOpenSockets) {
453
throw internal_error("An PollKQueue function was called, but it is disabled.");