1
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2
* Permission is hereby granted, free of charge, to any person obtaining a copy
3
* of this software and associated documentation files (the "Software"), to
4
* deal in the Software without restriction, including without limitation the
5
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6
* sell copies of the Software, and to permit persons to whom the Software is
7
* furnished to do so, subject to the following conditions:
9
* The above copyright notice and this permission notice shall be included in
10
* all copies or substantial portions of the Software.
12
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29
#include <sys/sysctl.h>
30
#include <sys/types.h>
31
#include <sys/event.h>
37
static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags);
40
int uv__kqueue_init(uv_loop_t* loop) {
41
loop->backend_fd = kqueue();
43
if (loop->backend_fd == -1)
46
uv__cloexec(loop->backend_fd, 1);
52
void uv__io_poll(uv_loop_t* loop, int timeout) {
53
struct kevent events[1024];
70
if (loop->nfds == 0) {
71
assert(ngx_queue_empty(&loop->watcher_queue));
77
while (!ngx_queue_empty(&loop->watcher_queue)) {
78
q = ngx_queue_head(&loop->watcher_queue);
82
w = ngx_queue_data(q, uv__io_t, watcher_queue);
83
assert(w->pevents != 0);
85
assert(w->fd < (int) loop->nwatchers);
87
if ((w->events & UV__POLLIN) == 0 && (w->pevents & UV__POLLIN) != 0) {
92
if (w->cb == uv__fs_event) {
93
filter = EVFILT_VNODE;
94
fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
95
| NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
96
op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */
99
EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0);
101
if (++nevents == ARRAY_SIZE(events)) {
102
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
108
if ((w->events & UV__POLLOUT) == 0 && (w->pevents & UV__POLLOUT) != 0) {
109
EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
111
if (++nevents == ARRAY_SIZE(events)) {
112
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
118
w->events = w->pevents;
121
assert(timeout >= -1);
123
count = 48; /* Benchmarks suggest this gives the best throughput. */
125
for (;; nevents = 0) {
127
spec.tv_sec = timeout / 1000;
128
spec.tv_nsec = (timeout % 1000) * 1000000;
131
nfds = kevent(loop->backend_fd,
136
timeout == -1 ? NULL : &spec);
138
/* Update loop->time unconditionally. It's tempting to skip the update when
139
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
140
* operating system didn't reschedule our process while in the syscall.
142
SAVE_ERRNO(uv__update_time(loop));
145
assert(timeout != -1);
159
/* Interrupted by a signal. Update timeout and poll again. */
165
assert(loop->watchers != NULL);
166
loop->watchers[loop->nwatchers] = (void*) events;
167
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
168
for (i = 0; i < nfds; i++) {
171
w = loop->watchers[fd];
173
/* Skip invalidated events, see uv__platform_invalidate_fd */
178
/* File descriptor that we've stopped watching, disarm it. */
180
struct kevent events[1];
182
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
183
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
184
if (errno != EBADF && errno != ENOENT)
190
if (ev->filter == EVFILT_VNODE) {
191
assert(w->events == UV__POLLIN);
192
assert(w->pevents == UV__POLLIN);
193
w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */
200
if (ev->filter == EVFILT_READ) {
201
if (w->pevents & UV__POLLIN) {
202
revents |= UV__POLLIN;
203
w->rcount = ev->data;
206
struct kevent events[1];
207
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
208
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
214
if (ev->filter == EVFILT_WRITE) {
215
if (w->pevents & UV__POLLOUT) {
216
revents |= UV__POLLOUT;
217
w->wcount = ev->data;
220
struct kevent events[1];
221
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
222
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
228
if (ev->flags & EV_ERROR)
229
revents |= UV__POLLERR;
234
w->cb(loop, w, revents);
237
loop->watchers[loop->nwatchers] = NULL;
238
loop->watchers[loop->nwatchers + 1] = NULL;
241
if (nfds == ARRAY_SIZE(events) && --count != 0) {
242
/* Poll for more events but don't block this time. */
258
diff = loop->time - base;
259
if (diff >= (uint64_t) timeout)
267
static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) {
268
uv_fs_event_t* handle;
272
handle = container_of(w, uv_fs_event_t, event_watcher);
274
if (fflags & (NOTE_ATTRIB | NOTE_EXTEND))
279
handle->cb(handle, NULL, events, 0);
281
if (handle->event_watcher.fd == -1)
284
/* Watcher operates in one-shot mode, re-arm it. */
285
fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
286
| NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
288
EV_SET(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags, 0, 0);
290
if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
295
int uv_fs_event_init(uv_loop_t* loop,
296
uv_fs_event_t* handle,
297
const char* filename,
300
#if defined(__APPLE__)
302
#endif /* defined(__APPLE__) */
305
/* TODO open asynchronously - but how do we report back errors? */
306
if ((fd = open(filename, O_RDONLY)) == -1) {
307
uv__set_sys_error(loop, errno);
311
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
312
uv__handle_start(handle); /* FIXME shouldn't start automatically */
313
uv__io_init(&handle->event_watcher, uv__fs_event, fd);
314
handle->filename = strdup(filename);
317
#if defined(__APPLE__)
318
/* Nullify field to perform checks later */
319
handle->cf_eventstream = NULL;
320
handle->realpath = NULL;
321
handle->realpath_len = 0;
322
handle->cf_flags = flags;
324
if (fstat(fd, &statbuf))
326
/* FSEvents works only with directories */
327
if (!(statbuf.st_mode & S_IFDIR))
330
return uv__fsevents_init(handle);
333
#endif /* defined(__APPLE__) */
335
uv__io_start(loop, &handle->event_watcher, UV__POLLIN);
341
void uv__fs_event_close(uv_fs_event_t* handle) {
342
#if defined(__APPLE__)
343
if (uv__fsevents_close(handle))
344
uv__io_stop(handle->loop, &handle->event_watcher, UV__POLLIN);
346
uv__io_stop(handle->loop, &handle->event_watcher, UV__POLLIN);
347
#endif /* defined(__APPLE__) */
349
uv__handle_stop(handle);
351
free(handle->filename);
352
handle->filename = NULL;
354
close(handle->event_watcher.fd);
355
handle->event_watcher.fd = -1;