2
Copyright (C) 2006 John McCutchan <john@johnmccutchan.com>
4
This program is free software; you can redistribute it and/or modify
5
it under the terms of the GNU General Public License as published by
6
the Free Software Foundation; version 2.
8
This program is distributed in the hope that it will be useful,
9
but WITHOUT ANY WARRANTY; without even the implied warranty of
10
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
GNU General Public License version 2 for more details.
12
You should have received a copy of the GNU General Public License
13
along with this program; if not, write to the Free Software Foundation,
14
Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
#include <sys/ioctl.h>
25
#include "inotify-kernel.h"
27
/* Just include the local headers to stop all the pain */
28
#include "local_inotify.h"
29
#include "local_inotify_syscalls.h"
31
#ifdef HAVE_SYS_INOTIFY_H
32
/* We don't actually include the libc header, because there has been
33
* problems with libc versions that was built without inotify support.
34
* Instead we use the local version.
36
#include "local_inotify.h"
37
#include "local_inotify_syscalls.h"
38
#elif defined (HAVE_LINUX_INOTIFY_H)
39
#include <linux/inotify.h>
40
#include "local_inotify_syscalls.h"
44
/* Timings for pairing MOVED_TO / MOVED_FROM events */
45
#define PROCESS_EVENTS_TIME 1000 /* milliseconds (1 hz) */
46
#define DEFAULT_HOLD_UNTIL_TIME 0 /* 0 millisecond */
47
#define MOVE_HOLD_UNTIL_TIME 0 /* 0 milliseconds */
49
static int inotify_instance_fd = -1;
50
static GQueue *events_to_process = NULL;
51
static GQueue *event_queue = NULL;
52
static GHashTable * cookie_hash = NULL;
53
static GIOChannel *inotify_read_ioc;
54
static GPollFD ik_poll_fd;
55
static gboolean ik_poll_fd_enabled = TRUE;
56
static void (*user_cb)(ik_event_t *event);
58
static gboolean ik_read_callback (gpointer user_data);
59
static gboolean ik_process_eq_callback (gpointer user_data);
61
static guint32 ik_move_matches = 0;
62
static guint32 ik_move_misses = 0;
64
static gboolean process_eq_running = FALSE;
66
/* We use the lock from inotify-helper.c
68
* There are two places that we take this lock
70
* 1) In ik_read_callback
72
* 2) ik_process_eq_callback.
75
* The rest of locking is taken care of in inotify-helper.c
77
G_LOCK_EXTERN (inotify_lock);
79
typedef struct ik_event_internal {
84
struct ik_event_internal *pair;
85
} ik_event_internal_t;
87
/* In order to perform non-sleeping inotify event chunking we need
91
ik_source_prepare (GSource *source,
98
ik_source_timeout (gpointer data)
100
GSource *source = (GSource *)data;
102
/* Re-active the PollFD */
103
g_source_add_poll (source, &ik_poll_fd);
104
g_source_unref (source);
105
ik_poll_fd_enabled = TRUE;
110
#define MAX_PENDING_COUNT 2
111
#define PENDING_THRESHOLD(qsize) ((qsize) >> 1)
112
#define PENDING_MARGINAL_COST(p) ((unsigned int)(1 << (p)))
113
#define MAX_QUEUED_EVENTS 2048
114
#define AVERAGE_EVENT_SIZE sizeof (struct inotify_event) + 16
115
#define TIMEOUT_MILLISECONDS 10
117
ik_source_check (GSource *source)
119
static int prev_pending = 0, pending_count = 0;
121
/* We already disabled the PollFD or
122
* nothing to be read from inotify */
123
if (!ik_poll_fd_enabled || !(ik_poll_fd.revents & G_IO_IN))
128
if (pending_count < MAX_PENDING_COUNT) {
129
unsigned int pending;
131
if (ioctl (inotify_instance_fd, FIONREAD, &pending) == -1)
134
pending /= AVERAGE_EVENT_SIZE;
136
/* Don't wait if the number of pending events is too close
137
* to the maximum queue size.
139
if (pending > PENDING_THRESHOLD (MAX_QUEUED_EVENTS))
142
/* With each successive iteration, the minimum rate for
143
* further sleep doubles. */
144
if (pending-prev_pending < PENDING_MARGINAL_COST(pending_count))
147
prev_pending = pending;
150
/* We are going to wait to read the events: */
152
/* Remove the PollFD from the source */
153
g_source_remove_poll (source, &ik_poll_fd);
154
/* To avoid threading issues we need to flag that we've done that */
155
ik_poll_fd_enabled = FALSE;
156
/* Set a timeout to re-add the PollFD to the source */
157
g_source_ref (source);
158
g_timeout_add (TIMEOUT_MILLISECONDS, ik_source_timeout, source);
164
/* We are ready to read events from inotify */
173
ik_source_dispatch (GSource *source,
174
GSourceFunc callback,
179
return callback(user_data);
184
GSourceFuncs ik_source_funcs =
192
gboolean ik_startup (void (*cb)(ik_event_t *event))
194
static gboolean initialized = FALSE;
198
/* Ignore multi-calls */
200
return inotify_instance_fd >= 0;
204
inotify_instance_fd = inotify_init ();
206
if (inotify_instance_fd < 0) {
210
inotify_read_ioc = g_io_channel_unix_new(inotify_instance_fd);
211
ik_poll_fd.fd = inotify_instance_fd;
212
ik_poll_fd.events = G_IO_IN | G_IO_HUP | G_IO_ERR;
213
g_io_channel_set_encoding(inotify_read_ioc, NULL, NULL);
214
g_io_channel_set_flags(inotify_read_ioc, G_IO_FLAG_NONBLOCK, NULL);
216
source = g_source_new (&ik_source_funcs, sizeof(GSource));
217
g_source_add_poll (source, &ik_poll_fd);
218
g_source_set_callback(source, ik_read_callback, NULL, NULL);
219
g_source_attach(source, NULL);
220
g_source_unref (source);
222
cookie_hash = g_hash_table_new(g_direct_hash, g_direct_equal);
223
event_queue = g_queue_new ();
224
events_to_process = g_queue_new ();
229
static ik_event_internal_t *ik_event_internal_new (ik_event_t *event)
231
ik_event_internal_t *internal_event = g_new0(ik_event_internal_t, 1);
236
g_get_current_time (&tv);
237
g_time_val_add (&tv, DEFAULT_HOLD_UNTIL_TIME);
238
internal_event->event = event;
239
internal_event->hold_until = tv;
241
return internal_event;
244
static ik_event_t *ik_event_new (char *buffer)
246
struct inotify_event *kevent = (struct inotify_event *)buffer;
248
ik_event_t *event = g_new0(ik_event_t,1);
249
event->wd = kevent->wd;
250
event->mask = kevent->mask;
251
event->cookie = kevent->cookie;
252
event->len = kevent->len;
254
event->name = g_strdup(kevent->name);
256
event->name = g_strdup("");
261
ik_event_t *ik_event_new_dummy (const char *name, gint32 wd, guint32 mask)
263
ik_event_t *event = g_new0(ik_event_t,1);
268
event->name = g_strdup(name);
270
event->name = g_strdup("");
272
event->len = strlen (event->name);
277
void ik_event_free (ik_event_t *event)
280
ik_event_free (event->pair);
285
gint32 ik_watch (const char *path, guint32 mask, int *err)
289
g_assert (path != NULL);
290
g_assert (inotify_instance_fd >= 0);
292
wd = inotify_add_watch (inotify_instance_fd, path, mask);
297
// FIXME: debug msg failed to add watch
307
int ik_ignore(const char *path, gint32 wd)
310
g_assert (inotify_instance_fd >= 0);
312
if (inotify_rm_watch (inotify_instance_fd, wd) < 0)
315
// failed to rm watch
322
void ik_move_stats (guint32 *matches, guint32 *misses)
325
*matches = ik_move_matches;
328
*misses = ik_move_misses;
331
const char *ik_mask_to_string (guint32 mask)
333
gboolean is_dir = mask & IN_ISDIR;
341
return "ACCESS (dir)";
344
return "MODIFY (dir)";
347
return "ATTRIB (dir)";
350
return "CLOSE_WRITE (dir)";
352
case IN_CLOSE_NOWRITE:
353
return "CLOSE_NOWRITE (dir)";
359
return "MOVED_FROM (dir)";
362
return "MOVED_TO (dir)";
365
return "DELETE (dir)";
368
return "CREATE (dir)";
371
return "DELETE_SELF (dir)";
374
return "UNMOUNT (dir)";
377
return "Q_OVERFLOW (dir)";
380
return "IGNORED (dir)";
383
return "UNKNOWN_EVENT (dir)";
400
return "CLOSE_WRITE";
402
case IN_CLOSE_NOWRITE:
403
return "CLOSE_NOWRITE";
421
return "DELETE_SELF";
433
return "UNKNOWN_EVENT";
441
static void ik_read_events (gsize *buffer_size_out, gchar **buffer_out)
443
static gchar *buffer = NULL;
444
static gsize buffer_size;
446
/* Initialize the buffer on our first call */
449
buffer_size = AVERAGE_EVENT_SIZE;
450
buffer_size *= MAX_QUEUED_EVENTS;
451
buffer = g_malloc (buffer_size);
454
*buffer_size_out = 0;
460
*buffer_size_out = 0;
463
memset(buffer, 0, buffer_size);
465
if (g_io_channel_read_chars (inotify_read_ioc, (char *)buffer, buffer_size, buffer_size_out, NULL) != G_IO_STATUS_NORMAL) {
468
*buffer_out = buffer;
471
static gboolean ik_read_callback(gpointer user_data)
474
gsize buffer_size, buffer_i, events;
476
G_LOCK(inotify_lock);
477
ik_read_events (&buffer_size, &buffer);
481
while (buffer_i < buffer_size)
483
struct inotify_event *event;
485
event = (struct inotify_event *)&buffer[buffer_i];
486
event_size = sizeof(struct inotify_event) + event->len;
487
g_queue_push_tail (events_to_process, ik_event_internal_new (ik_event_new (&buffer[buffer_i])));
488
buffer_i += event_size;
492
/* If the event process callback is off, turn it back on */
493
if (!process_eq_running && events)
495
process_eq_running = TRUE;
496
g_timeout_add (PROCESS_EVENTS_TIME, ik_process_eq_callback, NULL);
499
G_UNLOCK(inotify_lock);
504
g_timeval_lt(GTimeVal *val1, GTimeVal *val2)
506
if (val1->tv_sec < val2->tv_sec)
509
if (val1->tv_sec > val2->tv_sec)
512
/* val1->tv_sec == val2->tv_sec */
513
if (val1->tv_usec < val2->tv_usec)
520
g_timeval_eq(GTimeVal *val1, GTimeVal *val2)
522
return (val1->tv_sec == val2->tv_sec) && (val1->tv_usec == val2->tv_usec);
526
ik_pair_events (ik_event_internal_t *event1, ik_event_internal_t *event2)
528
g_assert (event1 && event2);
529
/* We should only be pairing events that have the same cookie */
530
g_assert (event1->event->cookie == event2->event->cookie);
531
/* We shouldn't pair an event that already is paired */
532
g_assert (event1->pair == NULL && event2->pair == NULL);
534
/* Pair the internal structures and the ik_event_t structures */
535
event1->pair = event2;
536
event1->event->pair = event2->event;
538
if (g_timeval_lt (&event1->hold_until, &event2->hold_until))
539
event1->hold_until = event2->hold_until;
541
event2->hold_until = event1->hold_until;
545
ik_event_add_microseconds (ik_event_internal_t *event, glong ms)
548
g_time_val_add (&event->hold_until, ms);
552
ik_event_ready (ik_event_internal_t *event)
557
g_get_current_time (&tv);
559
/* An event is ready if,
561
* it has no cookie -- there is nothing to be gained by holding it
562
* or, it is already paired -- we don't need to hold it anymore
563
* or, we have held it long enough
565
return event->event->cookie == 0 ||
566
event->pair != NULL ||
567
g_timeval_lt(&event->hold_until, &tv) || g_timeval_eq(&event->hold_until, &tv);
571
ik_pair_moves (gpointer data, gpointer user_data)
573
ik_event_internal_t *event = (ik_event_internal_t *)data;
575
if (event->seen == TRUE || event->sent == TRUE)
578
if (event->event->cookie != 0)
580
/* When we get a MOVED_FROM event we delay sending the event by
581
* MOVE_HOLD_UNTIL_TIME microseconds. We need to do this because a
582
* MOVED_TO pair _might_ be coming in the near future */
583
if (event->event->mask & IN_MOVED_FROM) {
584
g_hash_table_insert (cookie_hash, GINT_TO_POINTER(event->event->cookie), event);
585
// because we don't deliver move events there is no point in waiting for the match right now.
586
ik_event_add_microseconds (event, MOVE_HOLD_UNTIL_TIME);
587
} else if (event->event->mask & IN_MOVED_TO) {
588
/* We need to check if we are waiting for this MOVED_TO events cookie to pair it with
590
ik_event_internal_t *match = NULL;
591
match = g_hash_table_lookup (cookie_hash, GINT_TO_POINTER(event->event->cookie));
593
g_hash_table_remove (cookie_hash, GINT_TO_POINTER(event->event->cookie));
594
ik_pair_events (match, event);
604
g_queue_foreach (events_to_process, ik_pair_moves, NULL);
606
while (!g_queue_is_empty (events_to_process))
608
ik_event_internal_t *event = g_queue_peek_head (events_to_process);
610
/* This must have been sent as part of a MOVED_TO/MOVED_FROM */
614
g_queue_pop_head (events_to_process);
615
/* Free the internal event structure */
620
/* The event isn't ready yet */
621
if (!ik_event_ready (event)) {
626
event = g_queue_pop_head (events_to_process);
628
/* Check if this is a MOVED_FROM that is also sitting in the cookie_hash */
629
if (event->event->cookie && event->pair == NULL &&
630
g_hash_table_lookup (cookie_hash, GINT_TO_POINTER(event->event->cookie)))
632
g_hash_table_remove (cookie_hash, GINT_TO_POINTER(event->event->cookie));
636
/* We send out paired MOVED_FROM/MOVED_TO events in the same event buffer */
637
//g_assert (event->event->mask == IN_MOVED_FROM && event->pair->event->mask == IN_MOVED_TO);
638
/* Copy the paired data */
639
event->pair->sent = TRUE;
642
} else if (event->event->cookie) {
643
/* If we couldn't pair a MOVED_FROM and MOVED_TO together, we change
645
/* Changeing MOVED_FROM to DELETE and MOVED_TO to create lets us make
646
* the gaurantee that you will never see a non-matched MOVE event */
648
if (event->event->mask & IN_MOVED_FROM) {
649
event->event->mask = IN_DELETE|(event->event->mask & IN_ISDIR);
650
ik_move_misses++; // not super accurate, if we aren't watching the destination it still counts as a miss
652
if (event->event->mask & IN_MOVED_TO)
653
event->event->mask = IN_CREATE|(event->event->mask & IN_ISDIR);
656
/* Push the ik_event_t onto the event queue */
657
g_queue_push_tail (event_queue, event->event);
658
/* Free the internal event structure */
663
gboolean ik_process_eq_callback (gpointer user_data)
665
/* Try and move as many events to the event queue */
666
G_LOCK(inotify_lock);
667
ik_process_events ();
669
while (!g_queue_is_empty (event_queue))
671
ik_event_t *event = g_queue_pop_head (event_queue);
676
if (g_queue_get_length (events_to_process) == 0)
678
process_eq_running = FALSE;
679
G_UNLOCK(inotify_lock);
682
G_UNLOCK(inotify_lock);