1
#include "opal_config.h"
3
/* Enable F_SETSIG and F_SETOWN */
6
#ifdef HAVE_SYS_TYPES_H
12
#include <sys/_time.h>
22
#include <sys/queue.h>
23
#ifndef HAVE_WORKING_RTSIG
30
#define EVLIST_X_NORT 0x1000 /* Skip RT signals (internal) */
33
extern struct event_list eventqueue;
34
extern struct event_list signalqueue;
41
#ifndef HAVE_WORKING_RTSIG
49
poll_add(struct rtsigop *op, struct event *ev)
53
if (op->poll == NULL) return 0;
55
if (op->cur == op->max) {
58
p = realloc(op->poll, sizeof(*op->poll) * (op->max << 1));
64
p = realloc(op->toev, sizeof(*op->toev) * (op->max << 1));
66
op->poll = realloc(op->poll, sizeof(*op->poll) * op->max);
74
pfd = &op->poll[op->cur];
77
if (ev->ev_events & EV_READ) pfd->events |= POLLIN;
78
if (ev->ev_events & EV_WRITE) pfd->events |= POLLOUT;
81
op->toev[op->cur] = ev;
88
poll_free(struct rtsigop *op, int n)
90
if (op->poll == NULL) return;
94
memcpy(&op->poll[n], &op->poll[op->cur], sizeof(*op->poll));
95
op->toev[n] = op->toev[op->cur];
97
if (op->max > INIT_MAX && op->cur < op->max >> 1) {
99
op->poll = realloc(op->poll, sizeof(*op->poll) * op->max);
100
op->toev = realloc(op->toev, sizeof(*op->toev) * op->max);
105
poll_remove(struct rtsigop *op, struct event *ev)
109
for (i = 0; i < op->cur; i++) {
110
if (op->toev[i] == ev) {
118
activate(struct event *ev, int flags)
120
if (!(ev->ev_events & EV_PERSIST)) event_del(ev);
121
event_active(ev, flags, 1);
124
static void *rtsig_init(void);
125
static int rtsig_add(void *, struct event *);
126
static int rtsig_del(void *, struct event *);
127
static int rtsig_recalc(void *, int);
128
static int rtsig_dispatch(void *, struct timeval *);
130
struct opal_eventop rtsigops = {
144
if (getenv("EVENT_NORTSIG"))
147
op = malloc(sizeof(*op));
148
if (op == NULL) return (NULL);
150
memset(op, 0, sizeof(*op));
153
op->poll = malloc(sizeof(*op->poll) * op->max);
154
if (op->poll == NULL) {
158
op->toev = malloc(sizeof(*op->toev) * op->max);
159
if (op->toev == NULL) {
165
sigemptyset(&op->sigs);
166
sigaddset(&op->sigs, SIGIO);
167
sigaddset(&op->sigs, SIGRTMIN);
168
sigprocmask(SIG_BLOCK, &op->sigs, NULL);
174
rtsig_add(void *arg, struct event *ev)
176
struct rtsigop *op = (struct rtsigop *) arg;
178
#ifndef HAVE_WORKING_RTSIG
182
if (ev->ev_events & EV_SIGNAL) {
183
sigaddset(&op->sigs, EVENT_SIGNAL(ev));
184
return sigprocmask(SIG_BLOCK, &op->sigs, NULL);
187
if (!(ev->ev_events & (EV_READ | EV_WRITE))) return 0;
189
#ifndef HAVE_WORKING_RTSIG
190
if (fstat(ev->ev_fd, &st) == -1) return -1;
191
if (S_ISFIFO(st.st_mode)) {
192
ev->ev_flags |= EVLIST_X_NORT;
197
flags = fcntl(ev->ev_fd, F_GETFL);
201
if (!(flags & O_ASYNC)) {
202
if (fcntl(ev->ev_fd, F_SETSIG, SIGRTMIN) == -1
203
|| fcntl(ev->ev_fd, F_SETOWN, (int) getpid()) == -1)
206
if (fcntl(ev->ev_fd, F_SETFL, flags | O_ASYNC))
211
fcntl(ev->ev_fd, F_SETAUXFL, O_ONESIGFD);
215
if (poll_add(op, ev) == -1)
222
fcntl(ev->ev_fd, F_SETFL, flags);
228
rtsig_del(void *arg, struct event *ev)
230
struct rtsigop *op = (struct rtsigop *) arg;
232
if (ev->ev_events & EV_SIGNAL) {
235
sigdelset(&op->sigs, EVENT_SIGNAL(ev));
238
sigaddset(&sigs, EVENT_SIGNAL(ev));
239
return (sigprocmask(SIG_UNBLOCK, &sigs, NULL));
242
if (!(ev->ev_events & (EV_READ | EV_WRITE)))
245
#ifndef HAVE_WORKING_RTSIG
246
if (ev->ev_flags & EVLIST_X_NORT)
256
rtsig_recalc(void *arg, int max)
262
rtsig_dispatch(void *arg, struct timeval *tv)
264
struct rtsigop *op = (struct rtsigop *) arg;
268
if (op->poll == NULL)
270
#ifndef HAVE_WORKING_RTSIG
276
ts.tv_sec = ts.tv_nsec = 0;
278
ts.tv_sec = tv->tv_sec;
279
ts.tv_nsec = tv->tv_usec * 1000;
287
signum = sigtimedwait(&op->sigs, &info, &ts);
292
return (errno == EINTR ? 0 : -1);
295
ts.tv_sec = ts.tv_nsec = 0;
297
if (signum == SIGIO) {
298
#ifndef HAVE_WORKING_RTSIG
306
op->poll = malloc(sizeof(*op->poll) * op->total);
307
if (op->poll == NULL)
309
op->toev = malloc(sizeof(*op->toev) * op->total);
310
if (op->toev == NULL) {
316
TAILQ_FOREACH(ev, &eventqueue, ev_next)
317
if (!(ev->ev_flags & EVLIST_X_NORT))
323
if (signum == SIGRTMIN) {
324
int flags, i, sigok = 0;
326
if (info.si_band <= 0) { /* SI_SIGIO */
327
flags = EV_READ | EV_WRITE;
330
if (info.si_band & POLLIN) flags |= EV_READ;
331
if (info.si_band & POLLOUT) flags |= EV_WRITE;
332
if (!flags) continue;
335
for (i = 0; flags && i < op->cur; i++) {
338
if (ev->ev_fd == info.si_fd) {
339
flags &= ~ev->ev_events;
344
for (ev = TAILQ_FIRST(&eventqueue);
345
flags && ev != TAILQ_END(&eventqueue);
346
ev = TAILQ_NEXT(ev, ev_next)) {
347
if (ev->ev_fd == info.si_fd) {
348
if (flags & ev->ev_events) {
349
i = poll_add(op, ev);
350
if (i == -1) return -1;
351
flags &= ~ev->ev_events;
358
flags = fcntl(info.si_fd, F_GETFL);
359
if (flags == -1) return -1;
360
fcntl(info.si_fd, F_SETFL, flags & ~O_ASYNC);
363
TAILQ_FOREACH(ev, &signalqueue, ev_signal_next) {
364
if (EVENT_SIGNAL(ev) == signum)
365
activate(ev, EV_SIGNAL);
373
res = poll(op->poll, op->cur, tv->tv_sec * 1000 + tv->tv_usec / 1000);
378
#ifdef HAVE_WORKING_RTSIG
381
while (i < op->cur) {
383
if (op->poll[i].revents) {
385
struct event *ev = op->toev[i];
387
if (op->poll[i].revents & POLLIN)
389
if (op->poll[i].revents & POLLOUT)
392
if (!(ev->ev_events & EV_PERSIST)) {
398
event_active(ev, flags, 1);
400
#ifndef HAVE_WORKING_RTSIG
401
if (op->toev[i]->ev_flags & EVLIST_X_NORT) {
411
if (op->poll[op->cur].revents) {
412
memcpy(&op->poll[i], &op->poll[op->cur], sizeof(*op->poll));
413
op->toev[i] = op->toev[op->cur];
419
#ifdef HAVE_WORKING_RTSIG
427
/* We just freed it, we shouldn't have a problem getting it back. */
428
op->poll = malloc(sizeof(*op->poll) * op->max);
429
op->toev = malloc(sizeof(*op->toev) * op->max);
431
if (op->poll == NULL || op->toev == NULL)
432
err(1, "%s: malloc");