1
Subject: wait-simple: Simple waitqueue implementation
2
From: Thomas Gleixner <tglx@linutronix.de>
3
Date: Mon, 12 Dec 2011 12:29:04 +0100
5
wait_queue is a swiss army knife and in most of the cases the
6
complexity is not needed. For RT waitqueues are a constant source of
7
trouble as we can't convert the head lock to a raw spinlock due to
8
fancy and long lasting callbacks.
10
Provide a slim version, which allows RT to replace wait queues. This
11
should go mainline as well, as it lowers memory consumption and
14
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
16
include/linux/wait-simple.h | 152 ++++++++++++++++++++++++++++++++++++++++++++
18
kernel/wait-simple.c | 63 ++++++++++++++++++
19
3 files changed, 216 insertions(+), 1 deletion(-)
21
Index: linux-3.2/include/linux/wait-simple.h
22
===================================================================
24
+++ linux-3.2/include/linux/wait-simple.h
26
+#ifndef _LINUX_WAIT_SIMPLE_H
27
+#define _LINUX_WAIT_SIMPLE_H
29
+#include <linux/spinlock.h>
30
+#include <linux/list.h>
32
+#include <asm/current.h>
35
+ struct task_struct *task;
36
+ struct list_head node;
39
+#define DEFINE_SWAITER(name) \
40
+ struct swaiter name = { \
42
+ .node = LIST_HEAD_INIT((name).node), \
46
+ raw_spinlock_t lock;
47
+ struct list_head list;
50
+#define DEFINE_SWAIT_HEAD(name) \
51
+ struct swait_head name = { \
52
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
53
+ .list = LIST_HEAD_INIT((name).list), \
56
+extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key);
58
+#define init_swait_head(swh) \
60
+ static struct lock_class_key __key; \
62
+ __init_swait_head((swh), &__key); \
68
+static inline bool swaiter_enqueued(struct swaiter *w)
70
+ return w->task != NULL;
73
+extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state);
74
+extern void swait_finish(struct swait_head *head, struct swaiter *w);
77
+ * Adds w to head->list. Must be called with head->lock locked.
79
+static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w)
81
+ list_add(&w->node, &head->list);
85
+ * Removes w from head->list. Must be called with head->lock locked.
87
+static inline void __swait_dequeue(struct swaiter *w)
89
+ list_del_init(&w->node);
95
+extern void __swait_wake(struct swait_head *head, unsigned int state);
97
+static inline void swait_wake(struct swait_head *head)
99
+ __swait_wake(head, TASK_NORMAL);
106
+#define __swait_event(wq, condition) \
108
+ DEFINE_SWAITER(__wait); \
111
+ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
116
+ swait_finish(&wq, &__wait); \
120
+ * swait_event - sleep until a condition gets true
121
+ * @wq: the waitqueue to wait on
122
+ * @condition: a C expression for the event to wait for
124
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
125
+ * @condition evaluates to true. The @condition is checked each time
126
+ * the waitqueue @wq is woken up.
128
+ * wake_up() has to be called after changing any variable that could
129
+ * change the result of the wait condition.
131
+#define swait_event(wq, condition) \
135
+ __swait_event(wq, condition); \
138
+#define __swait_event_timeout(wq, condition, ret) \
140
+ DEFINE_SWAITER(__wait); \
143
+ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
146
+ ret = schedule_timeout(ret); \
150
+ swait_finish(&wq, &__wait); \
154
+ * swait_event_timeout - sleep until a condition gets true or a timeout elapses
155
+ * @wq: the waitqueue to wait on
156
+ * @condition: a C expression for the event to wait for
157
+ * @timeout: timeout, in jiffies
159
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
160
+ * @condition evaluates to true. The @condition is checked each time
161
+ * the waitqueue @wq is woken up.
163
+ * wake_up() has to be called after changing any variable that could
164
+ * change the result of the wait condition.
166
+ * The function returns 0 if the @timeout elapsed, and the remaining
167
+ * jiffies if the condition evaluated to true before the timeout elapsed.
169
+#define swait_event_timeout(wq, condition, timeout) \
171
+ long __ret = timeout; \
172
+ if (!(condition)) \
173
+ __swait_event_timeout(wq, condition, __ret); \
178
Index: linux-3.2/kernel/Makefile
179
===================================================================
180
--- linux-3.2.orig/kernel/Makefile
181
+++ linux-3.2/kernel/Makefile
182
@@ -10,7 +10,7 @@ obj-y = sched.o fork.o exec_domain.o
183
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \
184
hrtimer.o nsproxy.o srcu.o semaphore.o \
185
notifier.o ksysfs.o sched_clock.o cred.o \
187
+ async.o range.o wait-simple.o
190
ifdef CONFIG_FUNCTION_TRACER
191
Index: linux-3.2/kernel/wait-simple.c
192
===================================================================
194
+++ linux-3.2/kernel/wait-simple.c
197
+ * Simple waitqueues without fancy flags and callbacks
199
+ * (C) 2011 Thomas Gleixner <tglx@linutronix.de>
201
+ * Based on kernel/wait.c
203
+ * For licencing details see kernel-base/COPYING
205
+#include <linux/init.h>
206
+#include <linux/export.h>
207
+#include <linux/sched.h>
208
+#include <linux/wait-simple.h>
210
+void __init_swait_head(struct swait_head *head, struct lock_class_key *key)
212
+ raw_spin_lock_init(&head->lock);
213
+ lockdep_set_class(&head->lock, key);
214
+ INIT_LIST_HEAD(&head->list);
216
+EXPORT_SYMBOL_GPL(__init_swait_head);
218
+void swait_prepare(struct swait_head *head, struct swaiter *w, int state)
220
+ unsigned long flags;
222
+ raw_spin_lock_irqsave(&head->lock, flags);
224
+ __swait_enqueue(head, w);
225
+ set_current_state(state);
226
+ raw_spin_unlock_irqrestore(&head->lock, flags);
228
+EXPORT_SYMBOL_GPL(swait_prepare);
230
+void swait_finish(struct swait_head *head, struct swaiter *w)
232
+ unsigned long flags;
234
+ __set_current_state(TASK_RUNNING);
236
+ raw_spin_lock_irqsave(&head->lock, flags);
237
+ __swait_dequeue(w);
238
+ raw_spin_unlock_irqrestore(&head->lock, flags);
241
+EXPORT_SYMBOL_GPL(swait_finish);
243
+void __swait_wake(struct swait_head *head, unsigned int state)
245
+ struct swaiter *curr, *next;
246
+ unsigned long flags;
248
+ raw_spin_lock_irqsave(&head->lock, flags);
250
+ list_for_each_entry_safe(curr, next, &head->list, node) {
251
+ if (wake_up_state(curr->task, state)) {
252
+ __swait_dequeue(curr);
257
+ raw_spin_unlock_irqrestore(&head->lock, flags);