33
40
#include <linux/kallsyms.h>
34
41
#include <linux/debug_locks.h>
35
42
#include <linux/lockdep.h>
36
#define CREATE_TRACE_POINTS
37
#include <trace/events/workqueue.h>
40
* The per-CPU workqueue (if single thread, we always use the first
43
#include <linux/idr.h>
45
#include "workqueue_sched.h"
48
/* global_cwq flags */
49
GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
50
GCWQ_MANAGING_WORKERS = 1 << 1, /* managing workers */
51
GCWQ_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
52
GCWQ_FREEZING = 1 << 3, /* freeze in progress */
53
GCWQ_HIGHPRI_PENDING = 1 << 4, /* highpri works on queue */
56
WORKER_STARTED = 1 << 0, /* started */
57
WORKER_DIE = 1 << 1, /* die die die */
58
WORKER_IDLE = 1 << 2, /* is idle */
59
WORKER_PREP = 1 << 3, /* preparing to run works */
60
WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
61
WORKER_REBIND = 1 << 5, /* mom is home, come back */
62
WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
63
WORKER_UNBOUND = 1 << 7, /* worker is unbound */
65
WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
66
WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
68
/* gcwq->trustee_state */
69
TRUSTEE_START = 0, /* start */
70
TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */
71
TRUSTEE_BUTCHER = 2, /* butcher workers */
72
TRUSTEE_RELEASE = 3, /* release workers */
73
TRUSTEE_DONE = 4, /* trustee is done */
75
BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
76
BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
77
BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
79
MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
80
IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
82
MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
83
/* call for help after 10ms
85
MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
86
CREATE_COOLDOWN = HZ, /* time to breath after fail */
87
TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
90
* Rescue workers are used only on emergencies and shared by
93
RESCUER_NICE_LEVEL = -20,
97
* Structure fields follow one of the following exclusion rules.
99
* I: Modifiable by initialization/destruction paths and read-only for
102
* P: Preemption protected. Disabling preemption is enough and should
103
* only be modified and accessed from the local cpu.
105
* L: gcwq->lock protected. Access with gcwq->lock held.
107
* X: During normal operation, modification requires gcwq->lock and
108
* should be done only from local cpu. Either disabling preemption
109
* on local cpu or grabbing gcwq->lock is enough for read access.
110
* If GCWQ_DISASSOCIATED is set, it's identical to L.
112
* F: wq->flush_mutex protected.
114
* W: workqueue_lock protected.
120
* The poor guys doing the actual heavy lifting. All on-duty workers
121
* are either serving the manager role, on idle list or on busy hash.
124
/* on idle list while idle, on busy hash table while busy */
126
struct list_head entry; /* L: while idle */
127
struct hlist_node hentry; /* L: while busy */
130
struct work_struct *current_work; /* L: work being processed */
131
struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
132
struct list_head scheduled; /* L: scheduled works */
133
struct task_struct *task; /* I: worker task */
134
struct global_cwq *gcwq; /* I: the associated gcwq */
135
/* 64 bytes boundary on 64bit, 32 on 32bit */
136
unsigned long last_active; /* L: last active timestamp */
137
unsigned int flags; /* X: flags */
138
int id; /* I: worker id */
139
struct work_struct rebind_work; /* L: rebind worker to cpu */
143
* Global per-cpu workqueue. There's one and only one for each cpu
144
* and all works are queued and processed here regardless of their
148
spinlock_t lock; /* the gcwq lock */
149
struct list_head worklist; /* L: list of pending works */
150
unsigned int cpu; /* I: the associated cpu */
151
unsigned int flags; /* L: GCWQ_* flags */
153
int nr_workers; /* L: total number of workers */
154
int nr_idle; /* L: currently idle ones */
156
/* workers are chained either in the idle_list or busy_hash */
157
struct list_head idle_list; /* X: list of idle workers */
158
struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
159
/* L: hash of busy workers */
161
struct timer_list idle_timer; /* L: worker idle timeout */
162
struct timer_list mayday_timer; /* L: SOS timer for dworkers */
164
struct ida worker_ida; /* L: for worker IDs */
166
struct task_struct *trustee; /* L: for gcwq shutdown */
167
unsigned int trustee_state; /* L: trustee state */
168
wait_queue_head_t trustee_wait; /* trustee wait */
169
struct worker *first_idle; /* L: first idle worker */
170
} ____cacheline_aligned_in_smp;
173
* The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
174
* work_struct->data are used for flags and thus cwqs need to be
175
* aligned at two's power of the number of flag bits.
43
177
struct cpu_workqueue_struct {
47
struct list_head worklist;
48
wait_queue_head_t more_work;
49
struct work_struct *current_work;
51
struct workqueue_struct *wq;
52
struct task_struct *thread;
53
} ____cacheline_aligned;
178
struct global_cwq *gcwq; /* I: the associated gcwq */
179
struct workqueue_struct *wq; /* I: the owning workqueue */
180
int work_color; /* L: current color */
181
int flush_color; /* L: flushing color */
182
int nr_in_flight[WORK_NR_COLORS];
183
/* L: nr of in_flight works */
184
int nr_active; /* L: nr of active works */
185
int max_active; /* L: max active works */
186
struct list_head delayed_works; /* L: delayed works */
190
* Structure used to wait for workqueue flush.
193
struct list_head list; /* F: list of flushers */
194
int flush_color; /* F: flush color waiting for */
195
struct completion done; /* flush completion */
199
* All cpumasks are assumed to be always set on UP and thus can't be
200
* used to determine whether there's something to be done.
203
typedef cpumask_var_t mayday_mask_t;
204
#define mayday_test_and_set_cpu(cpu, mask) \
205
cpumask_test_and_set_cpu((cpu), (mask))
206
#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
207
#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
208
#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
209
#define free_mayday_mask(mask) free_cpumask_var((mask))
211
typedef unsigned long mayday_mask_t;
212
#define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
213
#define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
214
#define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
215
#define alloc_mayday_mask(maskp, gfp) true
216
#define free_mayday_mask(mask) do { } while (0)
56
220
* The externally visible workqueue abstraction is an array of
57
221
* per-CPU workqueues:
59
223
struct workqueue_struct {
60
struct cpu_workqueue_struct *cpu_wq;
61
struct list_head list;
64
int freezeable; /* Freeze threads during suspend */
224
unsigned int flags; /* I: WQ_* flags */
226
struct cpu_workqueue_struct __percpu *pcpu;
227
struct cpu_workqueue_struct *single;
229
} cpu_wq; /* I: cwq's */
230
struct list_head list; /* W: list of all workqueues */
232
struct mutex flush_mutex; /* protects wq flushing */
233
int work_color; /* F: current work color */
234
int flush_color; /* F: current flush color */
235
atomic_t nr_cwqs_to_flush; /* flush in progress */
236
struct wq_flusher *first_flusher; /* F: first flusher */
237
struct list_head flusher_queue; /* F: flush waiters */
238
struct list_head flusher_overflow; /* F: flush overflow list */
240
mayday_mask_t mayday_mask; /* cpus requesting rescue */
241
struct worker *rescuer; /* I: rescue worker */
243
int saved_max_active; /* W: saved cwq max_active */
244
const char *name; /* I: workqueue name */
66
245
#ifdef CONFIG_LOCKDEP
67
struct lockdep_map lockdep_map;
246
struct lockdep_map lockdep_map;
250
struct workqueue_struct *system_wq __read_mostly;
251
struct workqueue_struct *system_long_wq __read_mostly;
252
struct workqueue_struct *system_nrt_wq __read_mostly;
253
struct workqueue_struct *system_unbound_wq __read_mostly;
254
EXPORT_SYMBOL_GPL(system_wq);
255
EXPORT_SYMBOL_GPL(system_long_wq);
256
EXPORT_SYMBOL_GPL(system_nrt_wq);
257
EXPORT_SYMBOL_GPL(system_unbound_wq);
259
#define CREATE_TRACE_POINTS
260
#include <trace/events/workqueue.h>
262
#define for_each_busy_worker(worker, i, pos, gcwq) \
263
for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
264
hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
266
static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
269
if (cpu < nr_cpu_ids) {
271
cpu = cpumask_next(cpu, mask);
272
if (cpu < nr_cpu_ids)
276
return WORK_CPU_UNBOUND;
278
return WORK_CPU_NONE;
281
static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
282
struct workqueue_struct *wq)
284
return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
290
* An extra gcwq is defined for an invalid cpu number
291
* (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
292
* specific CPU. The following iterators are similar to
293
* for_each_*_cpu() iterators but also considers the unbound gcwq.
295
* for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND
296
* for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND
297
* for_each_cwq_cpu() : possible CPUs for bound workqueues,
298
* WORK_CPU_UNBOUND for unbound workqueues
300
#define for_each_gcwq_cpu(cpu) \
301
for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \
302
(cpu) < WORK_CPU_NONE; \
303
(cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
305
#define for_each_online_gcwq_cpu(cpu) \
306
for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \
307
(cpu) < WORK_CPU_NONE; \
308
(cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
310
#define for_each_cwq_cpu(cpu, wq) \
311
for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \
312
(cpu) < WORK_CPU_NONE; \
313
(cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
315
#ifdef CONFIG_DEBUG_OBJECTS_WORK
317
static struct debug_obj_descr work_debug_descr;
320
* fixup_init is called when:
321
* - an active object is initialized
323
static int work_fixup_init(void *addr, enum debug_obj_state state)
325
struct work_struct *work = addr;
328
case ODEBUG_STATE_ACTIVE:
329
cancel_work_sync(work);
330
debug_object_init(work, &work_debug_descr);
338
* fixup_activate is called when:
339
* - an active object is activated
340
* - an unknown object is activated (might be a statically initialized object)
342
static int work_fixup_activate(void *addr, enum debug_obj_state state)
344
struct work_struct *work = addr;
348
case ODEBUG_STATE_NOTAVAILABLE:
350
* This is not really a fixup. The work struct was
351
* statically initialized. We just make sure that it
352
* is tracked in the object tracker.
354
if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
355
debug_object_init(work, &work_debug_descr);
356
debug_object_activate(work, &work_debug_descr);
362
case ODEBUG_STATE_ACTIVE:
371
* fixup_free is called when:
372
* - an active object is freed
374
static int work_fixup_free(void *addr, enum debug_obj_state state)
376
struct work_struct *work = addr;
379
case ODEBUG_STATE_ACTIVE:
380
cancel_work_sync(work);
381
debug_object_free(work, &work_debug_descr);
388
static struct debug_obj_descr work_debug_descr = {
389
.name = "work_struct",
390
.fixup_init = work_fixup_init,
391
.fixup_activate = work_fixup_activate,
392
.fixup_free = work_fixup_free,
395
static inline void debug_work_activate(struct work_struct *work)
397
debug_object_activate(work, &work_debug_descr);
400
static inline void debug_work_deactivate(struct work_struct *work)
402
debug_object_deactivate(work, &work_debug_descr);
405
void __init_work(struct work_struct *work, int onstack)
408
debug_object_init_on_stack(work, &work_debug_descr);
410
debug_object_init(work, &work_debug_descr);
412
EXPORT_SYMBOL_GPL(__init_work);
414
void destroy_work_on_stack(struct work_struct *work)
416
debug_object_free(work, &work_debug_descr);
418
EXPORT_SYMBOL_GPL(destroy_work_on_stack);
421
static inline void debug_work_activate(struct work_struct *work) { }
422
static inline void debug_work_deactivate(struct work_struct *work) { }
71
425
/* Serializes the accesses to the list of workqueues. */
72
426
static DEFINE_SPINLOCK(workqueue_lock);
73
427
static LIST_HEAD(workqueues);
75
static int singlethread_cpu __read_mostly;
76
static const struct cpumask *cpu_singlethread_map __read_mostly;
78
* _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
79
* flushes cwq->worklist. This means that flush_workqueue/wait_on_work
80
* which comes in between can't use for_each_online_cpu(). We could
81
* use cpu_possible_map, the cpumask below is more a documentation
84
static cpumask_var_t cpu_populated_map __read_mostly;
86
/* If it's single threaded, it isn't in the list of workqueues. */
87
static inline int is_wq_single_threaded(struct workqueue_struct *wq)
89
return wq->singlethread;
92
static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
94
return is_wq_single_threaded(wq)
95
? cpu_singlethread_map : cpu_populated_map;
99
struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
101
if (unlikely(is_wq_single_threaded(wq)))
102
cpu = singlethread_cpu;
103
return per_cpu_ptr(wq->cpu_wq, cpu);
107
* Set the workqueue on which a work item is to be run
108
* - Must *only* be called if the pending flag is set
110
static inline void set_wq_data(struct work_struct *work,
111
struct cpu_workqueue_struct *cwq)
428
static bool workqueue_freezing; /* W: have wqs started freezing? */
431
* The almighty global cpu workqueues. nr_running is the only field
432
* which is expected to be used frequently by other cpus via
433
* try_to_wake_up(). Put it in a separate cacheline.
435
static DEFINE_PER_CPU(struct global_cwq, global_cwq);
436
static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
439
* Global cpu workqueue and nr_running counter for unbound gcwq. The
440
* gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
441
* workers have WORKER_UNBOUND set.
443
static struct global_cwq unbound_global_cwq;
444
static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0); /* always 0 */
446
static int worker_thread(void *__worker);
448
static struct global_cwq *get_gcwq(unsigned int cpu)
450
if (cpu != WORK_CPU_UNBOUND)
451
return &per_cpu(global_cwq, cpu);
453
return &unbound_global_cwq;
456
static atomic_t *get_gcwq_nr_running(unsigned int cpu)
458
if (cpu != WORK_CPU_UNBOUND)
459
return &per_cpu(gcwq_nr_running, cpu);
461
return &unbound_gcwq_nr_running;
464
static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
465
struct workqueue_struct *wq)
467
if (!(wq->flags & WQ_UNBOUND)) {
468
if (likely(cpu < nr_cpu_ids)) {
470
return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
472
return wq->cpu_wq.single;
475
} else if (likely(cpu == WORK_CPU_UNBOUND))
476
return wq->cpu_wq.single;
480
static unsigned int work_color_to_flags(int color)
482
return color << WORK_STRUCT_COLOR_SHIFT;
485
static int get_work_color(struct work_struct *work)
487
return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
488
((1 << WORK_STRUCT_COLOR_BITS) - 1);
491
static int work_next_color(int color)
493
return (color + 1) % WORK_NR_COLORS;
497
* A work's data points to the cwq with WORK_STRUCT_CWQ set while the
498
* work is on queue. Once execution starts, WORK_STRUCT_CWQ is
499
* cleared and the work data contains the cpu number it was last on.
501
* set_work_{cwq|cpu}() and clear_work_data() can be used to set the
502
* cwq, cpu or clear work->data. These functions should only be
503
* called while the work is owned - ie. while the PENDING bit is set.
505
* get_work_[g]cwq() can be used to obtain the gcwq or cwq
506
* corresponding to a work. gcwq is available once the work has been
507
* queued anywhere after initialization. cwq is available only from
508
* queueing until execution starts.
510
static inline void set_work_data(struct work_struct *work, unsigned long data,
115
513
BUG_ON(!work_pending(work));
117
new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
118
new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
119
atomic_long_set(&work->data, new);
123
struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
125
return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
514
atomic_long_set(&work->data, data | flags | work_static(work));
517
static void set_work_cwq(struct work_struct *work,
518
struct cpu_workqueue_struct *cwq,
519
unsigned long extra_flags)
521
set_work_data(work, (unsigned long)cwq,
522
WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
525
static void set_work_cpu(struct work_struct *work, unsigned int cpu)
527
set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
530
static void clear_work_data(struct work_struct *work)
532
set_work_data(work, WORK_STRUCT_NO_CPU, 0);
535
static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
537
unsigned long data = atomic_long_read(&work->data);
539
if (data & WORK_STRUCT_CWQ)
540
return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
545
static struct global_cwq *get_work_gcwq(struct work_struct *work)
547
unsigned long data = atomic_long_read(&work->data);
550
if (data & WORK_STRUCT_CWQ)
551
return ((struct cpu_workqueue_struct *)
552
(data & WORK_STRUCT_WQ_DATA_MASK))->gcwq;
554
cpu = data >> WORK_STRUCT_FLAG_BITS;
555
if (cpu == WORK_CPU_NONE)
558
BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
559
return get_gcwq(cpu);
563
* Policy functions. These define the policies on how the global
564
* worker pool is managed. Unless noted otherwise, these functions
565
* assume that they're being called with gcwq->lock held.
568
static bool __need_more_worker(struct global_cwq *gcwq)
570
return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
571
gcwq->flags & GCWQ_HIGHPRI_PENDING;
575
* Need to wake up a worker? Called from anything but currently
578
static bool need_more_worker(struct global_cwq *gcwq)
580
return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
583
/* Can I start working? Called from busy but !running workers. */
584
static bool may_start_working(struct global_cwq *gcwq)
586
return gcwq->nr_idle;
589
/* Do I need to keep working? Called from currently running workers. */
590
static bool keep_working(struct global_cwq *gcwq)
592
atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
594
return !list_empty(&gcwq->worklist) &&
595
(atomic_read(nr_running) <= 1 ||
596
gcwq->flags & GCWQ_HIGHPRI_PENDING);
599
/* Do we need a new worker? Called from manager. */
600
static bool need_to_create_worker(struct global_cwq *gcwq)
602
return need_more_worker(gcwq) && !may_start_working(gcwq);
605
/* Do I need to be the manager? */
606
static bool need_to_manage_workers(struct global_cwq *gcwq)
608
return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
611
/* Do we have too many workers and should some go away? */
612
static bool too_many_workers(struct global_cwq *gcwq)
614
bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
615
int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
616
int nr_busy = gcwq->nr_workers - nr_idle;
618
return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
625
/* Return the first worker. Safe with preemption disabled */
626
static struct worker *first_worker(struct global_cwq *gcwq)
628
if (unlikely(list_empty(&gcwq->idle_list)))
631
return list_first_entry(&gcwq->idle_list, struct worker, entry);
635
* wake_up_worker - wake up an idle worker
636
* @gcwq: gcwq to wake worker for
638
* Wake up the first idle worker of @gcwq.
641
* spin_lock_irq(gcwq->lock).
643
static void wake_up_worker(struct global_cwq *gcwq)
645
struct worker *worker = first_worker(gcwq);
648
wake_up_process(worker->task);
652
* wq_worker_waking_up - a worker is waking up
653
* @task: task waking up
654
* @cpu: CPU @task is waking up to
656
* This function is called during try_to_wake_up() when a worker is
660
* spin_lock_irq(rq->lock)
662
void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
664
struct worker *worker = kthread_data(task);
666
if (!(worker->flags & WORKER_NOT_RUNNING))
667
atomic_inc(get_gcwq_nr_running(cpu));
671
* wq_worker_sleeping - a worker is going to sleep
672
* @task: task going to sleep
673
* @cpu: CPU in question, must be the current CPU number
675
* This function is called during schedule() when a busy worker is
676
* going to sleep. Worker on the same cpu can be woken up by
677
* returning pointer to its task.
680
* spin_lock_irq(rq->lock)
683
* Worker task on @cpu to wake up, %NULL if none.
685
struct task_struct *wq_worker_sleeping(struct task_struct *task,
688
struct worker *worker = kthread_data(task), *to_wakeup = NULL;
689
struct global_cwq *gcwq = get_gcwq(cpu);
690
atomic_t *nr_running = get_gcwq_nr_running(cpu);
692
if (worker->flags & WORKER_NOT_RUNNING)
695
/* this can only happen on the local cpu */
696
BUG_ON(cpu != raw_smp_processor_id());
699
* The counterpart of the following dec_and_test, implied mb,
700
* worklist not empty test sequence is in insert_work().
701
* Please read comment there.
703
* NOT_RUNNING is clear. This means that trustee is not in
704
* charge and we're running on the local cpu w/ rq lock held
705
* and preemption disabled, which in turn means that none else
706
* could be manipulating idle_list, so dereferencing idle_list
707
* without gcwq lock is safe.
709
if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
710
to_wakeup = first_worker(gcwq);
711
return to_wakeup ? to_wakeup->task : NULL;
715
* worker_set_flags - set worker flags and adjust nr_running accordingly
717
* @flags: flags to set
718
* @wakeup: wakeup an idle worker if necessary
720
* Set @flags in @worker->flags and adjust nr_running accordingly. If
721
* nr_running becomes zero and @wakeup is %true, an idle worker is
725
* spin_lock_irq(gcwq->lock)
727
static inline void worker_set_flags(struct worker *worker, unsigned int flags,
730
struct global_cwq *gcwq = worker->gcwq;
732
WARN_ON_ONCE(worker->task != current);
735
* If transitioning into NOT_RUNNING, adjust nr_running and
736
* wake up an idle worker as necessary if requested by
739
if ((flags & WORKER_NOT_RUNNING) &&
740
!(worker->flags & WORKER_NOT_RUNNING)) {
741
atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
744
if (atomic_dec_and_test(nr_running) &&
745
!list_empty(&gcwq->worklist))
746
wake_up_worker(gcwq);
748
atomic_dec(nr_running);
751
worker->flags |= flags;
755
* worker_clr_flags - clear worker flags and adjust nr_running accordingly
757
* @flags: flags to clear
759
* Clear @flags in @worker->flags and adjust nr_running accordingly.
762
* spin_lock_irq(gcwq->lock)
764
static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
766
struct global_cwq *gcwq = worker->gcwq;
767
unsigned int oflags = worker->flags;
769
WARN_ON_ONCE(worker->task != current);
771
worker->flags &= ~flags;
774
* If transitioning out of NOT_RUNNING, increment nr_running. Note
775
* that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
776
* of multiple flags, not a single flag.
778
if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
779
if (!(worker->flags & WORKER_NOT_RUNNING))
780
atomic_inc(get_gcwq_nr_running(gcwq->cpu));
784
* busy_worker_head - return the busy hash head for a work
785
* @gcwq: gcwq of interest
786
* @work: work to be hashed
788
* Return hash head of @gcwq for @work.
791
* spin_lock_irq(gcwq->lock).
794
* Pointer to the hash head.
796
static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
797
struct work_struct *work)
799
const int base_shift = ilog2(sizeof(struct work_struct));
800
unsigned long v = (unsigned long)work;
802
/* simple shift and fold hash, do we need something better? */
804
v += v >> BUSY_WORKER_HASH_ORDER;
805
v &= BUSY_WORKER_HASH_MASK;
807
return &gcwq->busy_hash[v];
811
* __find_worker_executing_work - find worker which is executing a work
812
* @gcwq: gcwq of interest
813
* @bwh: hash head as returned by busy_worker_head()
814
* @work: work to find worker for
816
* Find a worker which is executing @work on @gcwq. @bwh should be
817
* the hash head obtained by calling busy_worker_head() with the same
821
* spin_lock_irq(gcwq->lock).
824
* Pointer to worker which is executing @work if found, NULL
827
static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
828
struct hlist_head *bwh,
829
struct work_struct *work)
831
struct worker *worker;
832
struct hlist_node *tmp;
834
hlist_for_each_entry(worker, tmp, bwh, hentry)
835
if (worker->current_work == work)
841
* find_worker_executing_work - find worker which is executing a work
842
* @gcwq: gcwq of interest
843
* @work: work to find worker for
845
* Find a worker which is executing @work on @gcwq. This function is
846
* identical to __find_worker_executing_work() except that this
847
* function calculates @bwh itself.
850
* spin_lock_irq(gcwq->lock).
853
* Pointer to worker which is executing @work if found, NULL
856
static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
857
struct work_struct *work)
859
return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
864
* gcwq_determine_ins_pos - find insertion position
865
* @gcwq: gcwq of interest
866
* @cwq: cwq a work is being queued for
868
* A work for @cwq is about to be queued on @gcwq, determine insertion
869
* position for the work. If @cwq is for HIGHPRI wq, the work is
870
* queued at the head of the queue but in FIFO order with respect to
871
* other HIGHPRI works; otherwise, at the end of the queue. This
872
* function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
873
* there are HIGHPRI works pending.
876
* spin_lock_irq(gcwq->lock).
879
* Pointer to inserstion position.
881
static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
882
struct cpu_workqueue_struct *cwq)
884
struct work_struct *twork;
886
if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
887
return &gcwq->worklist;
889
list_for_each_entry(twork, &gcwq->worklist, entry) {
890
struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
892
if (!(tcwq->wq->flags & WQ_HIGHPRI))
896
gcwq->flags |= GCWQ_HIGHPRI_PENDING;
897
return &twork->entry;
901
* insert_work - insert a work into gcwq
902
* @cwq: cwq @work belongs to
903
* @work: work to insert
904
* @head: insertion point
905
* @extra_flags: extra WORK_STRUCT_* flags to set
907
* Insert @work which belongs to @cwq into @gcwq after @head.
908
* @extra_flags is or'd to work_struct flags.
911
* spin_lock_irq(gcwq->lock).
128
913
static void insert_work(struct cpu_workqueue_struct *cwq,
129
struct work_struct *work, struct list_head *head)
914
struct work_struct *work, struct list_head *head,
915
unsigned int extra_flags)
131
trace_workqueue_insertion(cwq->thread, work);
133
set_wq_data(work, cwq);
917
struct global_cwq *gcwq = cwq->gcwq;
919
/* we own @work, set data and link */
920
set_work_cwq(work, cwq, extra_flags);
135
923
* Ensure that we get the right work->data if we see the
136
924
* result of list_add() below, see try_to_grab_pending().
139
928
list_add_tail(&work->entry, head);
140
wake_up(&cwq->more_work);
143
static void __queue_work(struct cpu_workqueue_struct *cwq,
931
* Ensure either worker_sched_deactivated() sees the above
932
* list_add_tail() or we see zero nr_running to avoid workers
933
* lying around lazily while there are works to be processed.
937
if (__need_more_worker(gcwq))
938
wake_up_worker(gcwq);
942
* Test whether @work is being queued from another work executing on the
943
* same workqueue. This is rather expensive and should only be used from
946
static bool is_chained_work(struct workqueue_struct *wq)
951
for_each_gcwq_cpu(cpu) {
952
struct global_cwq *gcwq = get_gcwq(cpu);
953
struct worker *worker;
954
struct hlist_node *pos;
957
spin_lock_irqsave(&gcwq->lock, flags);
958
for_each_busy_worker(worker, i, pos, gcwq) {
959
if (worker->task != current)
961
spin_unlock_irqrestore(&gcwq->lock, flags);
963
* I'm @worker, no locking necessary. See if @work
964
* is headed to the same workqueue.
966
return worker->current_cwq->wq == wq;
968
spin_unlock_irqrestore(&gcwq->lock, flags);
973
static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
144
974
struct work_struct *work)
976
struct global_cwq *gcwq;
977
struct cpu_workqueue_struct *cwq;
978
struct list_head *worklist;
979
unsigned int work_flags;
146
980
unsigned long flags;
148
spin_lock_irqsave(&cwq->lock, flags);
149
insert_work(cwq, work, &cwq->worklist);
150
spin_unlock_irqrestore(&cwq->lock, flags);
982
debug_work_activate(work);
984
/* if dying, only works from the same workqueue are allowed */
985
if (unlikely(wq->flags & WQ_DYING) &&
986
WARN_ON_ONCE(!is_chained_work(wq)))
989
/* determine gcwq to use */
990
if (!(wq->flags & WQ_UNBOUND)) {
991
struct global_cwq *last_gcwq;
993
if (unlikely(cpu == WORK_CPU_UNBOUND))
994
cpu = raw_smp_processor_id();
997
* It's multi cpu. If @wq is non-reentrant and @work
998
* was previously on a different cpu, it might still
999
* be running there, in which case the work needs to
1000
* be queued on that cpu to guarantee non-reentrance.
1002
gcwq = get_gcwq(cpu);
1003
if (wq->flags & WQ_NON_REENTRANT &&
1004
(last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
1005
struct worker *worker;
1007
spin_lock_irqsave(&last_gcwq->lock, flags);
1009
worker = find_worker_executing_work(last_gcwq, work);
1011
if (worker && worker->current_cwq->wq == wq)
1014
/* meh... not running there, queue here */
1015
spin_unlock_irqrestore(&last_gcwq->lock, flags);
1016
spin_lock_irqsave(&gcwq->lock, flags);
1019
spin_lock_irqsave(&gcwq->lock, flags);
1021
gcwq = get_gcwq(WORK_CPU_UNBOUND);
1022
spin_lock_irqsave(&gcwq->lock, flags);
1025
/* gcwq determined, get cwq and queue */
1026
cwq = get_cwq(gcwq->cpu, wq);
1027
trace_workqueue_queue_work(cpu, cwq, work);
1029
BUG_ON(!list_empty(&work->entry));
1031
cwq->nr_in_flight[cwq->work_color]++;
1032
work_flags = work_color_to_flags(cwq->work_color);
1034
if (likely(cwq->nr_active < cwq->max_active)) {
1035
trace_workqueue_activate_work(work);
1037
worklist = gcwq_determine_ins_pos(gcwq, cwq);
1039
work_flags |= WORK_STRUCT_DELAYED;
1040
worklist = &cwq->delayed_works;
1043
insert_work(cwq, work, worklist, work_flags);
1045
spin_unlock_irqrestore(&gcwq->lock, flags);
262
1172
EXPORT_SYMBOL_GPL(queue_delayed_work_on);
264
static void run_workqueue(struct cpu_workqueue_struct *cwq)
266
spin_lock_irq(&cwq->lock);
267
while (!list_empty(&cwq->worklist)) {
268
struct work_struct *work = list_entry(cwq->worklist.next,
269
struct work_struct, entry);
270
work_func_t f = work->func;
1175
* worker_enter_idle - enter idle state
1176
* @worker: worker which is entering idle state
1178
* @worker is entering idle state. Update stats and idle timer if
1182
* spin_lock_irq(gcwq->lock).
1184
static void worker_enter_idle(struct worker *worker)
1186
struct global_cwq *gcwq = worker->gcwq;
1188
BUG_ON(worker->flags & WORKER_IDLE);
1189
BUG_ON(!list_empty(&worker->entry) &&
1190
(worker->hentry.next || worker->hentry.pprev));
1192
/* can't use worker_set_flags(), also called from start_worker() */
1193
worker->flags |= WORKER_IDLE;
1195
worker->last_active = jiffies;
1197
/* idle_list is LIFO */
1198
list_add(&worker->entry, &gcwq->idle_list);
1200
if (likely(!(worker->flags & WORKER_ROGUE))) {
1201
if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
1202
mod_timer(&gcwq->idle_timer,
1203
jiffies + IDLE_WORKER_TIMEOUT);
1205
wake_up_all(&gcwq->trustee_wait);
1207
/* sanity check nr_running */
1208
WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
1209
atomic_read(get_gcwq_nr_running(gcwq->cpu)));
1213
* worker_leave_idle - leave idle state
1214
* @worker: worker which is leaving idle state
1216
* @worker is leaving idle state. Update stats.
1219
* spin_lock_irq(gcwq->lock).
1221
static void worker_leave_idle(struct worker *worker)
1223
struct global_cwq *gcwq = worker->gcwq;
1225
BUG_ON(!(worker->flags & WORKER_IDLE));
1226
worker_clr_flags(worker, WORKER_IDLE);
1228
list_del_init(&worker->entry);
1232
* worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1235
* Works which are scheduled while the cpu is online must at least be
1236
* scheduled to a worker which is bound to the cpu so that if they are
1237
* flushed from cpu callbacks while cpu is going down, they are
1238
* guaranteed to execute on the cpu.
1240
* This function is to be used by rogue workers and rescuers to bind
1241
* themselves to the target cpu and may race with cpu going down or
1242
* coming online. kthread_bind() can't be used because it may put the
1243
* worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1244
* verbatim as it's best effort and blocking and gcwq may be
1245
* [dis]associated in the meantime.
1247
* This function tries set_cpus_allowed() and locks gcwq and verifies
1248
* the binding against GCWQ_DISASSOCIATED which is set during
1249
* CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1250
* idle state or fetches works without dropping lock, it can guarantee
1251
* the scheduling requirement described in the first paragraph.
1254
* Might sleep. Called without any lock but returns with gcwq->lock
1258
* %true if the associated gcwq is online (@worker is successfully
1259
* bound), %false if offline.
1261
static bool worker_maybe_bind_and_lock(struct worker *worker)
1262
__acquires(&gcwq->lock)
1264
struct global_cwq *gcwq = worker->gcwq;
1265
struct task_struct *task = worker->task;
1269
* The following call may fail, succeed or succeed
1270
* without actually migrating the task to the cpu if
1271
* it races with cpu hotunplug operation. Verify
1272
* against GCWQ_DISASSOCIATED.
1274
if (!(gcwq->flags & GCWQ_DISASSOCIATED))
1275
set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
1277
spin_lock_irq(&gcwq->lock);
1278
if (gcwq->flags & GCWQ_DISASSOCIATED)
1280
if (task_cpu(task) == gcwq->cpu &&
1281
cpumask_equal(¤t->cpus_allowed,
1282
get_cpu_mask(gcwq->cpu)))
1284
spin_unlock_irq(&gcwq->lock);
1286
/* CPU has come up inbetween, retry migration */
1292
* Function for worker->rebind_work used to rebind rogue busy workers
1293
* to the associated cpu which is coming back online. This is
1294
* scheduled by cpu up but can race with other cpu hotplug operations
1295
* and may be executed twice without intervening cpu down.
1297
static void worker_rebind_fn(struct work_struct *work)
1299
struct worker *worker = container_of(work, struct worker, rebind_work);
1300
struct global_cwq *gcwq = worker->gcwq;
1302
if (worker_maybe_bind_and_lock(worker))
1303
worker_clr_flags(worker, WORKER_REBIND);
1305
spin_unlock_irq(&gcwq->lock);
1308
static struct worker *alloc_worker(void)
1310
struct worker *worker;
1312
worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1314
INIT_LIST_HEAD(&worker->entry);
1315
INIT_LIST_HEAD(&worker->scheduled);
1316
INIT_WORK(&worker->rebind_work, worker_rebind_fn);
1317
/* on creation a worker is in !idle && prep state */
1318
worker->flags = WORKER_PREP;
1324
* create_worker - create a new workqueue worker
1325
* @gcwq: gcwq the new worker will belong to
1326
* @bind: whether to set affinity to @cpu or not
1328
* Create a new worker which is bound to @gcwq. The returned worker
1329
* can be started by calling start_worker() or destroyed using
1333
* Might sleep. Does GFP_KERNEL allocations.
1336
* Pointer to the newly created worker.
1338
static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
1340
bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
1341
struct worker *worker = NULL;
1344
spin_lock_irq(&gcwq->lock);
1345
while (ida_get_new(&gcwq->worker_ida, &id)) {
1346
spin_unlock_irq(&gcwq->lock);
1347
if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
1349
spin_lock_irq(&gcwq->lock);
1351
spin_unlock_irq(&gcwq->lock);
1353
worker = alloc_worker();
1357
worker->gcwq = gcwq;
1360
if (!on_unbound_cpu)
1361
worker->task = kthread_create(worker_thread, worker,
1362
"kworker/%u:%d", gcwq->cpu, id);
1364
worker->task = kthread_create(worker_thread, worker,
1365
"kworker/u:%d", id);
1366
if (IS_ERR(worker->task))
1370
* A rogue worker will become a regular one if CPU comes
1371
* online later on. Make sure every worker has
1372
* PF_THREAD_BOUND set.
1374
if (bind && !on_unbound_cpu)
1375
kthread_bind(worker->task, gcwq->cpu);
1377
worker->task->flags |= PF_THREAD_BOUND;
1379
worker->flags |= WORKER_UNBOUND;
1385
spin_lock_irq(&gcwq->lock);
1386
ida_remove(&gcwq->worker_ida, id);
1387
spin_unlock_irq(&gcwq->lock);
1394
* start_worker - start a newly created worker
1395
* @worker: worker to start
1397
* Make the gcwq aware of @worker and start it.
1400
* spin_lock_irq(gcwq->lock).
1402
static void start_worker(struct worker *worker)
1404
worker->flags |= WORKER_STARTED;
1405
worker->gcwq->nr_workers++;
1406
worker_enter_idle(worker);
1407
wake_up_process(worker->task);
1411
* destroy_worker - destroy a workqueue worker
1412
* @worker: worker to be destroyed
1414
* Destroy @worker and adjust @gcwq stats accordingly.
1417
* spin_lock_irq(gcwq->lock) which is released and regrabbed.
1419
static void destroy_worker(struct worker *worker)
1421
struct global_cwq *gcwq = worker->gcwq;
1422
int id = worker->id;
1424
/* sanity check frenzy */
1425
BUG_ON(worker->current_work);
1426
BUG_ON(!list_empty(&worker->scheduled));
1428
if (worker->flags & WORKER_STARTED)
1430
if (worker->flags & WORKER_IDLE)
1433
list_del_init(&worker->entry);
1434
worker->flags |= WORKER_DIE;
1436
spin_unlock_irq(&gcwq->lock);
1438
kthread_stop(worker->task);
1441
spin_lock_irq(&gcwq->lock);
1442
ida_remove(&gcwq->worker_ida, id);
1445
static void idle_worker_timeout(unsigned long __gcwq)
1447
struct global_cwq *gcwq = (void *)__gcwq;
1449
spin_lock_irq(&gcwq->lock);
1451
if (too_many_workers(gcwq)) {
1452
struct worker *worker;
1453
unsigned long expires;
1455
/* idle_list is kept in LIFO order, check the last one */
1456
worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1457
expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1459
if (time_before(jiffies, expires))
1460
mod_timer(&gcwq->idle_timer, expires);
1462
/* it's been idle for too long, wake up manager */
1463
gcwq->flags |= GCWQ_MANAGE_WORKERS;
1464
wake_up_worker(gcwq);
1468
spin_unlock_irq(&gcwq->lock);
1471
static bool send_mayday(struct work_struct *work)
1473
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1474
struct workqueue_struct *wq = cwq->wq;
1477
if (!(wq->flags & WQ_RESCUER))
1480
/* mayday mayday mayday */
1481
cpu = cwq->gcwq->cpu;
1482
/* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1483
if (cpu == WORK_CPU_UNBOUND)
1485
if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
1486
wake_up_process(wq->rescuer->task);
1490
static void gcwq_mayday_timeout(unsigned long __gcwq)
1492
struct global_cwq *gcwq = (void *)__gcwq;
1493
struct work_struct *work;
1495
spin_lock_irq(&gcwq->lock);
1497
if (need_to_create_worker(gcwq)) {
1499
* We've been trying to create a new worker but
1500
* haven't been successful. We might be hitting an
1501
* allocation deadlock. Send distress signals to
1504
list_for_each_entry(work, &gcwq->worklist, entry)
1508
spin_unlock_irq(&gcwq->lock);
1510
mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
1514
* maybe_create_worker - create a new worker if necessary
1515
* @gcwq: gcwq to create a new worker for
1517
* Create a new worker for @gcwq if necessary. @gcwq is guaranteed to
1518
* have at least one idle worker on return from this function. If
1519
* creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1520
* sent to all rescuers with works scheduled on @gcwq to resolve
1521
* possible allocation deadlock.
1523
* On return, need_to_create_worker() is guaranteed to be false and
1524
* may_start_working() true.
1527
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
1528
* multiple times. Does GFP_KERNEL allocations. Called only from
1532
* false if no action was taken and gcwq->lock stayed locked, true
1535
static bool maybe_create_worker(struct global_cwq *gcwq)
1536
__releases(&gcwq->lock)
1537
__acquires(&gcwq->lock)
1539
if (!need_to_create_worker(gcwq))
1542
spin_unlock_irq(&gcwq->lock);
1544
/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1545
mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1548
struct worker *worker;
1550
worker = create_worker(gcwq, true);
1552
del_timer_sync(&gcwq->mayday_timer);
1553
spin_lock_irq(&gcwq->lock);
1554
start_worker(worker);
1555
BUG_ON(need_to_create_worker(gcwq));
1559
if (!need_to_create_worker(gcwq))
1562
__set_current_state(TASK_INTERRUPTIBLE);
1563
schedule_timeout(CREATE_COOLDOWN);
1565
if (!need_to_create_worker(gcwq))
1569
del_timer_sync(&gcwq->mayday_timer);
1570
spin_lock_irq(&gcwq->lock);
1571
if (need_to_create_worker(gcwq))
1577
* maybe_destroy_worker - destroy workers which have been idle for a while
1578
* @gcwq: gcwq to destroy workers for
1580
* Destroy @gcwq workers which have been idle for longer than
1581
* IDLE_WORKER_TIMEOUT.
1584
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
1585
* multiple times. Called only from manager.
1588
* false if no action was taken and gcwq->lock stayed locked, true
1591
static bool maybe_destroy_workers(struct global_cwq *gcwq)
1595
while (too_many_workers(gcwq)) {
1596
struct worker *worker;
1597
unsigned long expires;
1599
worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1600
expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1602
if (time_before(jiffies, expires)) {
1603
mod_timer(&gcwq->idle_timer, expires);
1607
destroy_worker(worker);
1615
* manage_workers - manage worker pool
1618
* Assume the manager role and manage gcwq worker pool @worker belongs
1619
* to. At any given time, there can be only zero or one manager per
1620
* gcwq. The exclusion is handled automatically by this function.
1622
* The caller can safely start processing works on false return. On
1623
* true return, it's guaranteed that need_to_create_worker() is false
1624
* and may_start_working() is true.
1627
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
1628
* multiple times. Does GFP_KERNEL allocations.
1631
* false if no action was taken and gcwq->lock stayed locked, true if
1632
* some action was taken.
1634
static bool manage_workers(struct worker *worker)
1636
struct global_cwq *gcwq = worker->gcwq;
1639
if (gcwq->flags & GCWQ_MANAGING_WORKERS)
1642
gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
1643
gcwq->flags |= GCWQ_MANAGING_WORKERS;
1646
* Destroy and then create so that may_start_working() is true
1649
ret |= maybe_destroy_workers(gcwq);
1650
ret |= maybe_create_worker(gcwq);
1652
gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
1655
* The trustee might be waiting to take over the manager
1656
* position, tell it we're done.
1658
if (unlikely(gcwq->trustee))
1659
wake_up_all(&gcwq->trustee_wait);
1665
* move_linked_works - move linked works to a list
1666
* @work: start of series of works to be scheduled
1667
* @head: target list to append @work to
1668
* @nextp: out paramter for nested worklist walking
1670
* Schedule linked works starting from @work to @head. Work series to
1671
* be scheduled starts at @work and includes any consecutive work with
1672
* WORK_STRUCT_LINKED set in its predecessor.
1674
* If @nextp is not NULL, it's updated to point to the next work of
1675
* the last scheduled work. This allows move_linked_works() to be
1676
* nested inside outer list_for_each_entry_safe().
1679
* spin_lock_irq(gcwq->lock).
1681
static void move_linked_works(struct work_struct *work, struct list_head *head,
1682
struct work_struct **nextp)
1684
struct work_struct *n;
1687
* Linked worklist will always end before the end of the list,
1688
* use NULL for list head.
1690
list_for_each_entry_safe_from(work, n, NULL, entry) {
1691
list_move_tail(&work->entry, head);
1692
if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1697
* If we're already inside safe list traversal and have moved
1698
* multiple works to the scheduled queue, the next position
1699
* needs to be updated.
1705
static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1707
struct work_struct *work = list_first_entry(&cwq->delayed_works,
1708
struct work_struct, entry);
1709
struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1711
trace_workqueue_activate_work(work);
1712
move_linked_works(work, pos, NULL);
1713
__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1718
* cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1719
* @cwq: cwq of interest
1720
* @color: color of work which left the queue
1721
* @delayed: for a delayed work
1723
* A work either has completed or is removed from pending queue,
1724
* decrement nr_in_flight of its cwq and handle workqueue flushing.
1727
* spin_lock_irq(gcwq->lock).
1729
static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1732
/* ignore uncolored works */
1733
if (color == WORK_NO_COLOR)
1736
cwq->nr_in_flight[color]--;
1740
if (!list_empty(&cwq->delayed_works)) {
1741
/* one down, submit a delayed one */
1742
if (cwq->nr_active < cwq->max_active)
1743
cwq_activate_first_delayed(cwq);
1747
/* is flush in progress and are we at the flushing tip? */
1748
if (likely(cwq->flush_color != color))
1751
/* are there still in-flight works? */
1752
if (cwq->nr_in_flight[color])
1755
/* this cwq is done, clear flush_color */
1756
cwq->flush_color = -1;
1759
* If this was the last cwq, wake up the first flusher. It
1760
* will handle the rest.
1762
if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1763
complete(&cwq->wq->first_flusher->done);
1767
* process_one_work - process single work
1769
* @work: work to process
1771
* Process @work. This function contains all the logics necessary to
1772
* process a single work including synchronization against and
1773
* interaction with other workers on the same cpu, queueing and
1774
* flushing. As long as context requirement is met, any worker can
1775
* call this function to process a work.
1778
* spin_lock_irq(gcwq->lock) which is released and regrabbed.
1780
static void process_one_work(struct worker *worker, struct work_struct *work)
1781
__releases(&gcwq->lock)
1782
__acquires(&gcwq->lock)
1784
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1785
struct global_cwq *gcwq = cwq->gcwq;
1786
struct hlist_head *bwh = busy_worker_head(gcwq, work);
1787
bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
1788
work_func_t f = work->func;
1790
struct worker *collision;
271
1791
#ifdef CONFIG_LOCKDEP
273
* It is permissible to free the struct work_struct
274
* from inside the function that is called from it,
275
* this we need to take into account for lockdep too.
276
* To avoid bogus "held lock freed" warnings as well
277
* as problems when looking into work->lockdep_map,
278
* make a copy and use that here.
280
struct lockdep_map lockdep_map = work->lockdep_map;
1793
* It is permissible to free the struct work_struct from
1794
* inside the function that is called from it, this we need to
1795
* take into account for lockdep too. To avoid bogus "held
1796
* lock freed" warnings as well as problems when looking into
1797
* work->lockdep_map, make a copy and use that here.
1799
struct lockdep_map lockdep_map = work->lockdep_map;
282
trace_workqueue_execution(cwq->thread, work);
283
cwq->current_work = work;
284
list_del_init(cwq->worklist.next);
285
spin_unlock_irq(&cwq->lock);
287
BUG_ON(get_wq_data(work) != cwq);
288
work_clear_pending(work);
289
lock_map_acquire(&cwq->wq->lockdep_map);
290
lock_map_acquire(&lockdep_map);
292
lock_map_release(&lockdep_map);
293
lock_map_release(&cwq->wq->lockdep_map);
295
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
296
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
298
current->comm, preempt_count(),
299
task_pid_nr(current));
300
printk(KERN_ERR " last function: ");
301
print_symbol("%s\n", (unsigned long)f);
302
debug_show_held_locks(current);
1802
* A single work shouldn't be executed concurrently by
1803
* multiple workers on a single cpu. Check whether anyone is
1804
* already processing the work. If so, defer the work to the
1805
* currently executing one.
1807
collision = __find_worker_executing_work(gcwq, bwh, work);
1808
if (unlikely(collision)) {
1809
move_linked_works(work, &collision->scheduled, NULL);
1813
/* claim and process */
1814
debug_work_deactivate(work);
1815
hlist_add_head(&worker->hentry, bwh);
1816
worker->current_work = work;
1817
worker->current_cwq = cwq;
1818
work_color = get_work_color(work);
1820
/* record the current cpu number in the work data and dequeue */
1821
set_work_cpu(work, gcwq->cpu);
1822
list_del_init(&work->entry);
1825
* If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1826
* wake up another worker; otherwise, clear HIGHPRI_PENDING.
1828
if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
1829
struct work_struct *nwork = list_first_entry(&gcwq->worklist,
1830
struct work_struct, entry);
1832
if (!list_empty(&gcwq->worklist) &&
1833
get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
1834
wake_up_worker(gcwq);
1836
gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1840
* CPU intensive works don't participate in concurrency
1841
* management. They're the scheduler's responsibility.
1843
if (unlikely(cpu_intensive))
1844
worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
1846
spin_unlock_irq(&gcwq->lock);
1848
work_clear_pending(work);
1849
lock_map_acquire_read(&cwq->wq->lockdep_map);
1850
lock_map_acquire(&lockdep_map);
1851
trace_workqueue_execute_start(work);
1854
* While we must be careful to not use "work" after this, the trace
1855
* point will only record its address.
1857
trace_workqueue_execute_end(work);
1858
lock_map_release(&lockdep_map);
1859
lock_map_release(&cwq->wq->lockdep_map);
1861
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1862
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1864
current->comm, preempt_count(), task_pid_nr(current));
1865
printk(KERN_ERR " last function: ");
1866
print_symbol("%s\n", (unsigned long)f);
1867
debug_show_held_locks(current);
1871
spin_lock_irq(&gcwq->lock);
1873
/* clear cpu intensive status */
1874
if (unlikely(cpu_intensive))
1875
worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
1877
/* we're done with it, release */
1878
hlist_del_init(&worker->hentry);
1879
worker->current_work = NULL;
1880
worker->current_cwq = NULL;
1881
cwq_dec_nr_in_flight(cwq, work_color, false);
1885
* process_scheduled_works - process scheduled works
1888
* Process all scheduled works. Please note that the scheduled list
1889
* may change while processing a work, so this function repeatedly
1890
* fetches a work from the top and executes it.
1893
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
1896
static void process_scheduled_works(struct worker *worker)
1898
while (!list_empty(&worker->scheduled)) {
1899
struct work_struct *work = list_first_entry(&worker->scheduled,
1900
struct work_struct, entry);
1901
process_one_work(worker, work);
1906
* worker_thread - the worker thread function
1909
* The gcwq worker thread function. There's a single dynamic pool of
1910
* these per each cpu. These workers process all works regardless of
1911
* their specific target workqueue. The only exception is works which
1912
* belong to workqueues with a rescuer which will be explained in
1915
static int worker_thread(void *__worker)
1917
struct worker *worker = __worker;
1918
struct global_cwq *gcwq = worker->gcwq;
1920
/* tell the scheduler that this is a workqueue worker */
1921
worker->task->flags |= PF_WQ_WORKER;
1923
spin_lock_irq(&gcwq->lock);
1925
/* DIE can be set only while we're idle, checking here is enough */
1926
if (worker->flags & WORKER_DIE) {
1927
spin_unlock_irq(&gcwq->lock);
1928
worker->task->flags &= ~PF_WQ_WORKER;
1932
worker_leave_idle(worker);
1934
/* no more worker necessary? */
1935
if (!need_more_worker(gcwq))
1938
/* do we need to manage? */
1939
if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
1943
* ->scheduled list can only be filled while a worker is
1944
* preparing to process a work or actually processing it.
1945
* Make sure nobody diddled with it while I was sleeping.
1947
BUG_ON(!list_empty(&worker->scheduled));
1950
* When control reaches this point, we're guaranteed to have
1951
* at least one idle worker or that someone else has already
1952
* assumed the manager role.
1954
worker_clr_flags(worker, WORKER_PREP);
1957
struct work_struct *work =
1958
list_first_entry(&gcwq->worklist,
1959
struct work_struct, entry);
1961
if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1962
/* optimization path, not strictly necessary */
1963
process_one_work(worker, work);
1964
if (unlikely(!list_empty(&worker->scheduled)))
1965
process_scheduled_works(worker);
1967
move_linked_works(work, &worker->scheduled, NULL);
1968
process_scheduled_works(worker);
306
spin_lock_irq(&cwq->lock);
307
cwq->current_work = NULL;
309
spin_unlock_irq(&cwq->lock);
1970
} while (keep_working(gcwq));
1972
worker_set_flags(worker, WORKER_PREP, false);
1974
if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
1978
* gcwq->lock is held and there's no work to process and no
1979
* need to manage, sleep. Workers are woken up only while
1980
* holding gcwq->lock or from local cpu, so setting the
1981
* current state before releasing gcwq->lock is enough to
1982
* prevent losing any event.
1984
worker_enter_idle(worker);
1985
__set_current_state(TASK_INTERRUPTIBLE);
1986
spin_unlock_irq(&gcwq->lock);
312
static int worker_thread(void *__cwq)
1992
* rescuer_thread - the rescuer thread function
1993
* @__wq: the associated workqueue
1995
* Workqueue rescuer thread function. There's one rescuer for each
1996
* workqueue which has WQ_RESCUER set.
1998
* Regular work processing on a gcwq may block trying to create a new
1999
* worker which uses GFP_KERNEL allocation which has slight chance of
2000
* developing into deadlock if some works currently on the same queue
2001
* need to be processed to satisfy the GFP_KERNEL allocation. This is
2002
* the problem rescuer solves.
2004
* When such condition is possible, the gcwq summons rescuers of all
2005
* workqueues which have works queued on the gcwq and let them process
2006
* those works so that forward progress can be guaranteed.
2008
* This should happen rarely.
2010
static int rescuer_thread(void *__wq)
314
struct cpu_workqueue_struct *cwq = __cwq;
317
if (cwq->wq->freezeable)
321
prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
322
if (!freezing(current) &&
323
!kthread_should_stop() &&
324
list_empty(&cwq->worklist))
326
finish_wait(&cwq->more_work, &wait);
330
if (kthread_should_stop())
2012
struct workqueue_struct *wq = __wq;
2013
struct worker *rescuer = wq->rescuer;
2014
struct list_head *scheduled = &rescuer->scheduled;
2015
bool is_unbound = wq->flags & WQ_UNBOUND;
2018
set_user_nice(current, RESCUER_NICE_LEVEL);
2020
set_current_state(TASK_INTERRUPTIBLE);
2022
if (kthread_should_stop())
2026
* See whether any cpu is asking for help. Unbounded
2027
* workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
2029
for_each_mayday_cpu(cpu, wq->mayday_mask) {
2030
unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
2031
struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
2032
struct global_cwq *gcwq = cwq->gcwq;
2033
struct work_struct *work, *n;
2035
__set_current_state(TASK_RUNNING);
2036
mayday_clear_cpu(cpu, wq->mayday_mask);
2038
/* migrate to the target cpu if possible */
2039
rescuer->gcwq = gcwq;
2040
worker_maybe_bind_and_lock(rescuer);
2043
* Slurp in all works issued via this workqueue and
2046
BUG_ON(!list_empty(&rescuer->scheduled));
2047
list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
2048
if (get_work_cwq(work) == cwq)
2049
move_linked_works(work, scheduled, &n);
2051
process_scheduled_works(rescuer);
2054
* Leave this gcwq. If keep_working() is %true, notify a
2055
* regular worker; otherwise, we end up with 0 concurrency
2056
* and stalling the execution.
2058
if (keep_working(gcwq))
2059
wake_up_worker(gcwq);
2061
spin_unlock_irq(&gcwq->lock);
339
2068
struct wq_barrier {
388
2219
* We sleep until all works which were queued on entry have been handled,
389
2220
* but we are not livelocked by new incoming ones.
391
* This function used to run the workqueues itself. Now we just wait for the
392
* helper threads to do it.
394
2222
void flush_workqueue(struct workqueue_struct *wq)
396
const struct cpumask *cpu_map = wq_cpu_map(wq);
2224
struct wq_flusher this_flusher = {
2225
.list = LIST_HEAD_INIT(this_flusher.list),
2227
.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
400
2231
lock_map_acquire(&wq->lockdep_map);
401
2232
lock_map_release(&wq->lockdep_map);
402
for_each_cpu(cpu, cpu_map)
403
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
2234
mutex_lock(&wq->flush_mutex);
2237
* Start-to-wait phase
2239
next_color = work_next_color(wq->work_color);
2241
if (next_color != wq->flush_color) {
2243
* Color space is not full. The current work_color
2244
* becomes our flush_color and work_color is advanced
2247
BUG_ON(!list_empty(&wq->flusher_overflow));
2248
this_flusher.flush_color = wq->work_color;
2249
wq->work_color = next_color;
2251
if (!wq->first_flusher) {
2252
/* no flush in progress, become the first flusher */
2253
BUG_ON(wq->flush_color != this_flusher.flush_color);
2255
wq->first_flusher = &this_flusher;
2257
if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
2259
/* nothing to flush, done */
2260
wq->flush_color = next_color;
2261
wq->first_flusher = NULL;
2266
BUG_ON(wq->flush_color == this_flusher.flush_color);
2267
list_add_tail(&this_flusher.list, &wq->flusher_queue);
2268
flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2272
* Oops, color space is full, wait on overflow queue.
2273
* The next flush completion will assign us
2274
* flush_color and transfer to flusher_queue.
2276
list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2279
mutex_unlock(&wq->flush_mutex);
2281
wait_for_completion(&this_flusher.done);
2284
* Wake-up-and-cascade phase
2286
* First flushers are responsible for cascading flushes and
2287
* handling overflow. Non-first flushers can simply return.
2289
if (wq->first_flusher != &this_flusher)
2292
mutex_lock(&wq->flush_mutex);
2294
/* we might have raced, check again with mutex held */
2295
if (wq->first_flusher != &this_flusher)
2298
wq->first_flusher = NULL;
2300
BUG_ON(!list_empty(&this_flusher.list));
2301
BUG_ON(wq->flush_color != this_flusher.flush_color);
2304
struct wq_flusher *next, *tmp;
2306
/* complete all the flushers sharing the current flush color */
2307
list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2308
if (next->flush_color != wq->flush_color)
2310
list_del_init(&next->list);
2311
complete(&next->done);
2314
BUG_ON(!list_empty(&wq->flusher_overflow) &&
2315
wq->flush_color != work_next_color(wq->work_color));
2317
/* this flush_color is finished, advance by one */
2318
wq->flush_color = work_next_color(wq->flush_color);
2320
/* one color has been freed, handle overflow queue */
2321
if (!list_empty(&wq->flusher_overflow)) {
2323
* Assign the same color to all overflowed
2324
* flushers, advance work_color and append to
2325
* flusher_queue. This is the start-to-wait
2326
* phase for these overflowed flushers.
2328
list_for_each_entry(tmp, &wq->flusher_overflow, list)
2329
tmp->flush_color = wq->work_color;
2331
wq->work_color = work_next_color(wq->work_color);
2333
list_splice_tail_init(&wq->flusher_overflow,
2334
&wq->flusher_queue);
2335
flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2338
if (list_empty(&wq->flusher_queue)) {
2339
BUG_ON(wq->flush_color != wq->work_color);
2344
* Need to flush more colors. Make the next flusher
2345
* the new first flusher and arm cwqs.
2347
BUG_ON(wq->flush_color == wq->work_color);
2348
BUG_ON(wq->flush_color != next->flush_color);
2350
list_del_init(&next->list);
2351
wq->first_flusher = next;
2353
if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
2357
* Meh... this color is already done, clear first
2358
* flusher and repeat cascading.
2360
wq->first_flusher = NULL;
2364
mutex_unlock(&wq->flush_mutex);
405
2366
EXPORT_SYMBOL_GPL(flush_workqueue);
408
* flush_work - block until a work_struct's callback has terminated
409
* @work: the work which is to be flushed
411
* Returns false if @work has already terminated.
413
* It is expected that, prior to calling flush_work(), the caller has
414
* arranged for the work to not be requeued, otherwise it doesn't make
415
* sense to use this function.
417
int flush_work(struct work_struct *work)
2368
static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2369
bool wait_executing)
2371
struct worker *worker = NULL;
2372
struct global_cwq *gcwq;
419
2373
struct cpu_workqueue_struct *cwq;
420
struct list_head *prev;
421
struct wq_barrier barr;
424
cwq = get_wq_data(work);
428
lock_map_acquire(&cwq->wq->lockdep_map);
429
lock_map_release(&cwq->wq->lockdep_map);
432
spin_lock_irq(&cwq->lock);
2376
gcwq = get_work_gcwq(work);
2380
spin_lock_irq(&gcwq->lock);
433
2381
if (!list_empty(&work->entry)) {
435
2383
* See the comment near try_to_grab_pending()->smp_rmb().
436
* If it was re-queued under us we are not going to wait.
2384
* If it was re-queued to a different gcwq under us, we
2385
* are not going to wait.
439
if (unlikely(cwq != get_wq_data(work)))
443
if (cwq->current_work != work)
445
prev = &cwq->worklist;
447
insert_wq_barrier(cwq, &barr, prev->next);
449
spin_unlock_irq(&cwq->lock);
453
wait_for_completion(&barr.done);
2388
cwq = get_work_cwq(work);
2389
if (unlikely(!cwq || gcwq != cwq->gcwq))
2391
} else if (wait_executing) {
2392
worker = find_worker_executing_work(gcwq, work);
2395
cwq = worker->current_cwq;
2399
insert_wq_barrier(cwq, barr, work, worker);
2400
spin_unlock_irq(&gcwq->lock);
2403
* If @max_active is 1 or rescuer is in use, flushing another work
2404
* item on the same workqueue may lead to deadlock. Make sure the
2405
* flusher is not running on the same workqueue by verifying write
2408
if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
2409
lock_map_acquire(&cwq->wq->lockdep_map);
2411
lock_map_acquire_read(&cwq->wq->lockdep_map);
2412
lock_map_release(&cwq->wq->lockdep_map);
2416
spin_unlock_irq(&gcwq->lock);
2421
* flush_work - wait for a work to finish executing the last queueing instance
2422
* @work: the work to flush
2424
* Wait until @work has finished execution. This function considers
2425
* only the last queueing instance of @work. If @work has been
2426
* enqueued across different CPUs on a non-reentrant workqueue or on
2427
* multiple workqueues, @work might still be executing on return on
2428
* some of the CPUs from earlier queueing.
2430
* If @work was queued only on a non-reentrant, ordered or unbound
2431
* workqueue, @work is guaranteed to be idle on return if it hasn't
2432
* been requeued since flush started.
2435
* %true if flush_work() waited for the work to finish execution,
2436
* %false if it was already idle.
2438
bool flush_work(struct work_struct *work)
2440
struct wq_barrier barr;
2442
if (start_flush_work(work, &barr, true)) {
2443
wait_for_completion(&barr.done);
2444
destroy_work_on_stack(&barr.work);
456
2449
EXPORT_SYMBOL_GPL(flush_work);
2451
static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2453
struct wq_barrier barr;
2454
struct worker *worker;
2456
spin_lock_irq(&gcwq->lock);
2458
worker = find_worker_executing_work(gcwq, work);
2459
if (unlikely(worker))
2460
insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2462
spin_unlock_irq(&gcwq->lock);
2464
if (unlikely(worker)) {
2465
wait_for_completion(&barr.done);
2466
destroy_work_on_stack(&barr.work);
2472
static bool wait_on_work(struct work_struct *work)
2479
lock_map_acquire(&work->lockdep_map);
2480
lock_map_release(&work->lockdep_map);
2482
for_each_gcwq_cpu(cpu)
2483
ret |= wait_on_cpu_work(get_gcwq(cpu), work);
2488
* flush_work_sync - wait until a work has finished execution
2489
* @work: the work to flush
2491
* Wait until @work has finished execution. On return, it's
2492
* guaranteed that all queueing instances of @work which happened
2493
* before this function is called are finished. In other words, if
2494
* @work hasn't been requeued since this function was called, @work is
2495
* guaranteed to be idle on return.
2498
* %true if flush_work_sync() waited for the work to finish execution,
2499
* %false if it was already idle.
2501
bool flush_work_sync(struct work_struct *work)
2503
struct wq_barrier barr;
2504
bool pending, waited;
2506
/* we'll wait for executions separately, queue barr only if pending */
2507
pending = start_flush_work(work, &barr, false);
2509
/* wait for executions to finish */
2510
waited = wait_on_work(work);
2512
/* wait for the pending one */
2514
wait_for_completion(&barr.done);
2515
destroy_work_on_stack(&barr.work);
2518
return pending || waited;
2520
EXPORT_SYMBOL_GPL(flush_work_sync);
459
2523
* Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
460
2524
* so this work can't be re-armed in any way.
462
2526
static int try_to_grab_pending(struct work_struct *work)
464
struct cpu_workqueue_struct *cwq;
2528
struct global_cwq *gcwq;
467
if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
2531
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
471
2535
* The queueing is in progress, or it is already queued. Try to
472
2536
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
475
cwq = get_wq_data(work);
2538
gcwq = get_work_gcwq(work);
479
spin_lock_irq(&cwq->lock);
2542
spin_lock_irq(&gcwq->lock);
480
2543
if (!list_empty(&work->entry)) {
482
* This work is queued, but perhaps we locked the wrong cwq.
2545
* This work is queued, but perhaps we locked the wrong gcwq.
483
2546
* In that case we must see the new value after rmb(), see
484
2547
* insert_work()->wmb().
487
if (cwq == get_wq_data(work)) {
2550
if (gcwq == get_work_gcwq(work)) {
2551
debug_work_deactivate(work);
488
2552
list_del_init(&work->entry);
2553
cwq_dec_nr_in_flight(get_work_cwq(work),
2554
get_work_color(work),
2555
*work_data_bits(work) & WORK_STRUCT_DELAYED);
492
spin_unlock_irq(&cwq->lock);
2559
spin_unlock_irq(&gcwq->lock);
497
static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
498
struct work_struct *work)
500
struct wq_barrier barr;
503
spin_lock_irq(&cwq->lock);
504
if (unlikely(cwq->current_work == work)) {
505
insert_wq_barrier(cwq, &barr, cwq->worklist.next);
508
spin_unlock_irq(&cwq->lock);
510
if (unlikely(running))
511
wait_for_completion(&barr.done);
514
static void wait_on_work(struct work_struct *work)
516
struct cpu_workqueue_struct *cwq;
517
struct workqueue_struct *wq;
518
const struct cpumask *cpu_map;
523
lock_map_acquire(&work->lockdep_map);
524
lock_map_release(&work->lockdep_map);
526
cwq = get_wq_data(work);
531
cpu_map = wq_cpu_map(wq);
533
for_each_cpu(cpu, cpu_map)
534
wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
537
static int __cancel_work_timer(struct work_struct *work,
2564
static bool __cancel_work_timer(struct work_struct *work,
538
2565
struct timer_list* timer)
755
2815
int keventd_up(void)
757
return keventd_wq != NULL;
760
int current_is_keventd(void)
762
struct cpu_workqueue_struct *cwq;
763
int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
768
cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
769
if (current == cwq->thread)
776
static struct cpu_workqueue_struct *
777
init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
779
struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
782
spin_lock_init(&cwq->lock);
783
INIT_LIST_HEAD(&cwq->worklist);
784
init_waitqueue_head(&cwq->more_work);
789
static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
791
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
792
struct workqueue_struct *wq = cwq->wq;
793
const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
794
struct task_struct *p;
796
p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
2817
return system_wq != NULL;
2820
static int alloc_cwqs(struct workqueue_struct *wq)
798
* Nobody can add the work_struct to this cwq,
799
* if (caller is __create_workqueue)
800
* nobody should see this wq
801
* else // caller is CPU_UP_PREPARE
802
* cpu is not on cpu_online_map
803
* so we can abort safely.
808
sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);
811
trace_workqueue_creation(cwq->thread, cpu);
816
static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
818
struct task_struct *p = cwq->thread;
822
kthread_bind(p, cpu);
827
struct workqueue_struct *__create_workqueue_key(const char *name,
831
struct lock_class_key *key,
832
const char *lock_name)
2823
* cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
2824
* Make sure that the alignment isn't lower than that of
2825
* unsigned long long.
2827
const size_t size = sizeof(struct cpu_workqueue_struct);
2828
const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
2829
__alignof__(unsigned long long));
2831
bool percpu = !(wq->flags & WQ_UNBOUND);
2833
bool percpu = false;
2837
wq->cpu_wq.pcpu = __alloc_percpu(size, align);
2842
* Allocate enough room to align cwq and put an extra
2843
* pointer at the end pointing back to the originally
2844
* allocated pointer which will be used for free.
2846
ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
2848
wq->cpu_wq.single = PTR_ALIGN(ptr, align);
2849
*(void **)(wq->cpu_wq.single + 1) = ptr;
2853
/* just in case, make sure it's actually aligned
2854
* - this is affected by PERCPU() alignment in vmlinux.lds.S
2856
BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
2857
return wq->cpu_wq.v ? 0 : -ENOMEM;
2860
static void free_cwqs(struct workqueue_struct *wq)
2863
bool percpu = !(wq->flags & WQ_UNBOUND);
2865
bool percpu = false;
2869
free_percpu(wq->cpu_wq.pcpu);
2870
else if (wq->cpu_wq.single) {
2871
/* the pointer to free is stored right after the cwq */
2872
kfree(*(void **)(wq->cpu_wq.single + 1));
2876
static int wq_clamp_max_active(int max_active, unsigned int flags,
2879
int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
2881
if (max_active < 1 || max_active > lim)
2882
printk(KERN_WARNING "workqueue: max_active %d requested for %s "
2883
"is out of range, clamping between %d and %d\n",
2884
max_active, name, 1, lim);
2886
return clamp_val(max_active, 1, lim);
2889
struct workqueue_struct *__alloc_workqueue_key(const char *name,
2892
struct lock_class_key *key,
2893
const char *lock_name)
834
2895
struct workqueue_struct *wq;
835
struct cpu_workqueue_struct *cwq;
2899
* Workqueues which may be used during memory reclaim should
2900
* have a rescuer to guarantee forward progress.
2902
if (flags & WQ_MEM_RECLAIM)
2903
flags |= WQ_RESCUER;
2906
* Unbound workqueues aren't concurrency managed and should be
2907
* dispatched to workers immediately.
2909
if (flags & WQ_UNBOUND)
2910
flags |= WQ_HIGHPRI;
2912
max_active = max_active ?: WQ_DFL_ACTIVE;
2913
max_active = wq_clamp_max_active(max_active, flags, name);
838
2915
wq = kzalloc(sizeof(*wq), GFP_KERNEL);
842
wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
2920
wq->saved_max_active = max_active;
2921
mutex_init(&wq->flush_mutex);
2922
atomic_set(&wq->nr_cwqs_to_flush, 0);
2923
INIT_LIST_HEAD(&wq->flusher_queue);
2924
INIT_LIST_HEAD(&wq->flusher_overflow);
848
2926
wq->name = name;
849
2927
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
850
wq->singlethread = singlethread;
851
wq->freezeable = freezeable;
853
2928
INIT_LIST_HEAD(&wq->list);
856
cwq = init_cpu_workqueue(wq, singlethread_cpu);
857
err = create_workqueue_thread(cwq, singlethread_cpu);
858
start_workqueue_thread(cwq, -1);
860
cpu_maps_update_begin();
862
* We must place this wq on list even if the code below fails.
863
* cpu_down(cpu) can remove cpu from cpu_populated_map before
864
* destroy_workqueue() takes the lock, in that case we leak
867
spin_lock(&workqueue_lock);
868
list_add(&wq->list, &workqueues);
869
spin_unlock(&workqueue_lock);
871
* We must initialize cwqs for each possible cpu even if we
872
* are going to call destroy_workqueue() finally. Otherwise
873
* cpu_up() can hit the uninitialized cwq once we drop the
876
for_each_possible_cpu(cpu) {
877
cwq = init_cpu_workqueue(wq, cpu);
878
if (err || !cpu_online(cpu))
880
err = create_workqueue_thread(cwq, cpu);
881
start_workqueue_thread(cwq, cpu);
883
cpu_maps_update_done();
887
destroy_workqueue(wq);
2930
if (alloc_cwqs(wq) < 0)
2933
for_each_cwq_cpu(cpu, wq) {
2934
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2935
struct global_cwq *gcwq = get_gcwq(cpu);
2937
BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
2940
cwq->flush_color = -1;
2941
cwq->max_active = max_active;
2942
INIT_LIST_HEAD(&cwq->delayed_works);
2945
if (flags & WQ_RESCUER) {
2946
struct worker *rescuer;
2948
if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
2951
wq->rescuer = rescuer = alloc_worker();
2955
rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
2956
if (IS_ERR(rescuer->task))
2959
rescuer->task->flags |= PF_THREAD_BOUND;
2960
wake_up_process(rescuer->task);
2964
* workqueue_lock protects global freeze state and workqueues
2965
* list. Grab it, set max_active accordingly and add the new
2966
* workqueue to workqueues list.
2968
spin_lock(&workqueue_lock);
2970
if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
2971
for_each_cwq_cpu(cpu, wq)
2972
get_cwq(cpu, wq)->max_active = 0;
2974
list_add(&wq->list, &workqueues);
2976
spin_unlock(&workqueue_lock);
892
EXPORT_SYMBOL_GPL(__create_workqueue_key);
894
static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
897
* Our caller is either destroy_workqueue() or CPU_POST_DEAD,
898
* cpu_add_remove_lock protects cwq->thread.
900
if (cwq->thread == NULL)
903
lock_map_acquire(&cwq->wq->lockdep_map);
904
lock_map_release(&cwq->wq->lockdep_map);
906
flush_cpu_workqueue(cwq);
908
* If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
909
* a concurrent flush_workqueue() can insert a barrier after us.
910
* However, in that case run_workqueue() won't return and check
911
* kthread_should_stop() until it flushes all work_struct's.
912
* When ->worklist becomes empty it is safe to exit because no
913
* more work_structs can be queued on this cwq: flush_workqueue
914
* checks list_empty(), and a "normal" queue_work() can't use
917
trace_workqueue_destruction(cwq->thread);
918
kthread_stop(cwq->thread);
2982
free_mayday_mask(wq->mayday_mask);
2988
EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
923
2991
* destroy_workqueue - safely terminate a workqueue
928
2996
void destroy_workqueue(struct workqueue_struct *wq)
930
const struct cpumask *cpu_map = wq_cpu_map(wq);
933
cpu_maps_update_begin();
2998
unsigned int flush_cnt = 0;
3002
* Mark @wq dying and drain all pending works. Once WQ_DYING is
3003
* set, only chain queueing is allowed. IOW, only currently
3004
* pending or running work items on @wq can queue further work
3005
* items on it. @wq is flushed repeatedly until it becomes empty.
3006
* The number of flushing is detemined by the depth of chaining and
3007
* should be relatively short. Whine if it takes too long.
3009
wq->flags |= WQ_DYING;
3011
flush_workqueue(wq);
3013
for_each_cwq_cpu(cpu, wq) {
3014
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3016
if (!cwq->nr_active && list_empty(&cwq->delayed_works))
3019
if (++flush_cnt == 10 ||
3020
(flush_cnt % 100 == 0 && flush_cnt <= 1000))
3021
printk(KERN_WARNING "workqueue %s: flush on "
3022
"destruction isn't complete after %u tries\n",
3023
wq->name, flush_cnt);
3028
* wq list is used to freeze wq, remove from list after
3029
* flushing is complete in case freeze races us.
934
3031
spin_lock(&workqueue_lock);
935
3032
list_del(&wq->list);
936
3033
spin_unlock(&workqueue_lock);
938
for_each_cpu(cpu, cpu_map)
939
cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
940
cpu_maps_update_done();
942
free_percpu(wq->cpu_wq);
3036
for_each_cwq_cpu(cpu, wq) {
3037
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3040
for (i = 0; i < WORK_NR_COLORS; i++)
3041
BUG_ON(cwq->nr_in_flight[i]);
3042
BUG_ON(cwq->nr_active);
3043
BUG_ON(!list_empty(&cwq->delayed_works));
3046
if (wq->flags & WQ_RESCUER) {
3047
kthread_stop(wq->rescuer->task);
3048
free_mayday_mask(wq->mayday_mask);
945
3055
EXPORT_SYMBOL_GPL(destroy_workqueue);
3058
* workqueue_set_max_active - adjust max_active of a workqueue
3059
* @wq: target workqueue
3060
* @max_active: new max_active value.
3062
* Set max_active of @wq to @max_active.
3065
* Don't call from IRQ context.
3067
void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3071
max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
3073
spin_lock(&workqueue_lock);
3075
wq->saved_max_active = max_active;
3077
for_each_cwq_cpu(cpu, wq) {
3078
struct global_cwq *gcwq = get_gcwq(cpu);
3080
spin_lock_irq(&gcwq->lock);
3082
if (!(wq->flags & WQ_FREEZABLE) ||
3083
!(gcwq->flags & GCWQ_FREEZING))
3084
get_cwq(gcwq->cpu, wq)->max_active = max_active;
3086
spin_unlock_irq(&gcwq->lock);
3089
spin_unlock(&workqueue_lock);
3091
EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3094
* workqueue_congested - test whether a workqueue is congested
3095
* @cpu: CPU in question
3096
* @wq: target workqueue
3098
* Test whether @wq's cpu workqueue for @cpu is congested. There is
3099
* no synchronization around this function and the test result is
3100
* unreliable and only useful as advisory hints or for debugging.
3103
* %true if congested, %false otherwise.
3105
bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
3107
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3109
return !list_empty(&cwq->delayed_works);
3111
EXPORT_SYMBOL_GPL(workqueue_congested);
3114
* work_cpu - return the last known associated cpu for @work
3115
* @work: the work of interest
3118
* CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
3120
unsigned int work_cpu(struct work_struct *work)
3122
struct global_cwq *gcwq = get_work_gcwq(work);
3124
return gcwq ? gcwq->cpu : WORK_CPU_NONE;
3126
EXPORT_SYMBOL_GPL(work_cpu);
3129
* work_busy - test whether a work is currently pending or running
3130
* @work: the work to be tested
3132
* Test whether @work is currently pending or running. There is no
3133
* synchronization around this function and the test result is
3134
* unreliable and only useful as advisory hints or for debugging.
3135
* Especially for reentrant wqs, the pending state might hide the
3139
* OR'd bitmask of WORK_BUSY_* bits.
3141
unsigned int work_busy(struct work_struct *work)
3143
struct global_cwq *gcwq = get_work_gcwq(work);
3144
unsigned long flags;
3145
unsigned int ret = 0;
3150
spin_lock_irqsave(&gcwq->lock, flags);
3152
if (work_pending(work))
3153
ret |= WORK_BUSY_PENDING;
3154
if (find_worker_executing_work(gcwq, work))
3155
ret |= WORK_BUSY_RUNNING;
3157
spin_unlock_irqrestore(&gcwq->lock, flags);
3161
EXPORT_SYMBOL_GPL(work_busy);
3166
* There are two challenges in supporting CPU hotplug. Firstly, there
3167
* are a lot of assumptions on strong associations among work, cwq and
3168
* gcwq which make migrating pending and scheduled works very
3169
* difficult to implement without impacting hot paths. Secondly,
3170
* gcwqs serve mix of short, long and very long running works making
3171
* blocked draining impractical.
3173
* This is solved by allowing a gcwq to be detached from CPU, running
3174
* it with unbound (rogue) workers and allowing it to be reattached
3175
* later if the cpu comes back online. A separate thread is created
3176
* to govern a gcwq in such state and is called the trustee of the
3179
* Trustee states and their descriptions.
3181
* START Command state used on startup. On CPU_DOWN_PREPARE, a
3182
* new trustee is started with this state.
3184
* IN_CHARGE Once started, trustee will enter this state after
3185
* assuming the manager role and making all existing
3186
* workers rogue. DOWN_PREPARE waits for trustee to
3187
* enter this state. After reaching IN_CHARGE, trustee
3188
* tries to execute the pending worklist until it's empty
3189
* and the state is set to BUTCHER, or the state is set
3192
* BUTCHER Command state which is set by the cpu callback after
3193
* the cpu has went down. Once this state is set trustee
3194
* knows that there will be no new works on the worklist
3195
* and once the worklist is empty it can proceed to
3196
* killing idle workers.
3198
* RELEASE Command state which is set by the cpu callback if the
3199
* cpu down has been canceled or it has come online
3200
* again. After recognizing this state, trustee stops
3201
* trying to drain or butcher and clears ROGUE, rebinds
3202
* all remaining workers back to the cpu and releases
3205
* DONE Trustee will enter this state after BUTCHER or RELEASE
3208
* trustee CPU draining
3209
* took over down complete
3210
* START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
3212
* | CPU is back online v return workers |
3213
* ----------------> RELEASE --------------
3217
* trustee_wait_event_timeout - timed event wait for trustee
3218
* @cond: condition to wait for
3219
* @timeout: timeout in jiffies
3221
* wait_event_timeout() for trustee to use. Handles locking and
3222
* checks for RELEASE request.
3225
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
3226
* multiple times. To be used by trustee.
3229
* Positive indicating left time if @cond is satisfied, 0 if timed
3230
* out, -1 if canceled.
3232
#define trustee_wait_event_timeout(cond, timeout) ({ \
3233
long __ret = (timeout); \
3234
while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
3236
spin_unlock_irq(&gcwq->lock); \
3237
__wait_event_timeout(gcwq->trustee_wait, (cond) || \
3238
(gcwq->trustee_state == TRUSTEE_RELEASE), \
3240
spin_lock_irq(&gcwq->lock); \
3242
gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
3246
* trustee_wait_event - event wait for trustee
3247
* @cond: condition to wait for
3249
* wait_event() for trustee to use. Automatically handles locking and
3250
* checks for CANCEL request.
3253
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
3254
* multiple times. To be used by trustee.
3257
* 0 if @cond is satisfied, -1 if canceled.
3259
#define trustee_wait_event(cond) ({ \
3261
__ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3262
__ret1 < 0 ? -1 : 0; \
3265
static int __cpuinit trustee_thread(void *__gcwq)
3267
struct global_cwq *gcwq = __gcwq;
3268
struct worker *worker;
3269
struct work_struct *work;
3270
struct hlist_node *pos;
3274
BUG_ON(gcwq->cpu != smp_processor_id());
3276
spin_lock_irq(&gcwq->lock);
3278
* Claim the manager position and make all workers rogue.
3279
* Trustee must be bound to the target cpu and can't be
3282
BUG_ON(gcwq->cpu != smp_processor_id());
3283
rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
3286
gcwq->flags |= GCWQ_MANAGING_WORKERS;
3288
list_for_each_entry(worker, &gcwq->idle_list, entry)
3289
worker->flags |= WORKER_ROGUE;
3291
for_each_busy_worker(worker, i, pos, gcwq)
3292
worker->flags |= WORKER_ROGUE;
3295
* Call schedule() so that we cross rq->lock and thus can
3296
* guarantee sched callbacks see the rogue flag. This is
3297
* necessary as scheduler callbacks may be invoked from other
3300
spin_unlock_irq(&gcwq->lock);
3302
spin_lock_irq(&gcwq->lock);
3305
* Sched callbacks are disabled now. Zap nr_running. After
3306
* this, nr_running stays zero and need_more_worker() and
3307
* keep_working() are always true as long as the worklist is
3310
atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
3312
spin_unlock_irq(&gcwq->lock);
3313
del_timer_sync(&gcwq->idle_timer);
3314
spin_lock_irq(&gcwq->lock);
3317
* We're now in charge. Notify and proceed to drain. We need
3318
* to keep the gcwq running during the whole CPU down
3319
* procedure as other cpu hotunplug callbacks may need to
3320
* flush currently running tasks.
3322
gcwq->trustee_state = TRUSTEE_IN_CHARGE;
3323
wake_up_all(&gcwq->trustee_wait);
3326
* The original cpu is in the process of dying and may go away
3327
* anytime now. When that happens, we and all workers would
3328
* be migrated to other cpus. Try draining any left work. We
3329
* want to get it over with ASAP - spam rescuers, wake up as
3330
* many idlers as necessary and create new ones till the
3331
* worklist is empty. Note that if the gcwq is frozen, there
3332
* may be frozen works in freezable cwqs. Don't declare
3333
* completion while frozen.
3335
while (gcwq->nr_workers != gcwq->nr_idle ||
3336
gcwq->flags & GCWQ_FREEZING ||
3337
gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
3340
list_for_each_entry(work, &gcwq->worklist, entry) {
3345
list_for_each_entry(worker, &gcwq->idle_list, entry) {
3348
wake_up_process(worker->task);
3351
if (need_to_create_worker(gcwq)) {
3352
spin_unlock_irq(&gcwq->lock);
3353
worker = create_worker(gcwq, false);
3354
spin_lock_irq(&gcwq->lock);
3356
worker->flags |= WORKER_ROGUE;
3357
start_worker(worker);
3361
/* give a breather */
3362
if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
3367
* Either all works have been scheduled and cpu is down, or
3368
* cpu down has already been canceled. Wait for and butcher
3369
* all workers till we're canceled.
3372
rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
3373
while (!list_empty(&gcwq->idle_list))
3374
destroy_worker(list_first_entry(&gcwq->idle_list,
3375
struct worker, entry));
3376
} while (gcwq->nr_workers && rc >= 0);
3379
* At this point, either draining has completed and no worker
3380
* is left, or cpu down has been canceled or the cpu is being
3381
* brought back up. There shouldn't be any idle one left.
3382
* Tell the remaining busy ones to rebind once it finishes the
3383
* currently scheduled works by scheduling the rebind_work.
3385
WARN_ON(!list_empty(&gcwq->idle_list));
3387
for_each_busy_worker(worker, i, pos, gcwq) {
3388
struct work_struct *rebind_work = &worker->rebind_work;
3391
* Rebind_work may race with future cpu hotplug
3392
* operations. Use a separate flag to mark that
3393
* rebinding is scheduled.
3395
worker->flags |= WORKER_REBIND;
3396
worker->flags &= ~WORKER_ROGUE;
3398
/* queue rebind_work, wq doesn't matter, use the default one */
3399
if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
3400
work_data_bits(rebind_work)))
3403
debug_work_activate(rebind_work);
3404
insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
3405
worker->scheduled.next,
3406
work_color_to_flags(WORK_NO_COLOR));
3409
/* relinquish manager role */
3410
gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
3412
/* notify completion */
3413
gcwq->trustee = NULL;
3414
gcwq->trustee_state = TRUSTEE_DONE;
3415
wake_up_all(&gcwq->trustee_wait);
3416
spin_unlock_irq(&gcwq->lock);
3421
* wait_trustee_state - wait for trustee to enter the specified state
3422
* @gcwq: gcwq the trustee of interest belongs to
3423
* @state: target state to wait for
3425
* Wait for the trustee to reach @state. DONE is already matched.
3428
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
3429
* multiple times. To be used by cpu_callback.
3431
static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3432
__releases(&gcwq->lock)
3433
__acquires(&gcwq->lock)
3435
if (!(gcwq->trustee_state == state ||
3436
gcwq->trustee_state == TRUSTEE_DONE)) {
3437
spin_unlock_irq(&gcwq->lock);
3438
__wait_event(gcwq->trustee_wait,
3439
gcwq->trustee_state == state ||
3440
gcwq->trustee_state == TRUSTEE_DONE);
3441
spin_lock_irq(&gcwq->lock);
947
3445
static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
948
3446
unsigned long action,
951
3449
unsigned int cpu = (unsigned long)hcpu;
952
struct cpu_workqueue_struct *cwq;
953
struct workqueue_struct *wq;
3450
struct global_cwq *gcwq = get_gcwq(cpu);
3451
struct task_struct *new_trustee = NULL;
3452
struct worker *uninitialized_var(new_worker);
3453
unsigned long flags;
956
3455
action &= ~CPU_TASKS_FROZEN;
958
3457
switch (action) {
3458
case CPU_DOWN_PREPARE:
3459
new_trustee = kthread_create(trustee_thread, gcwq,
3460
"workqueue_trustee/%d\n", cpu);
3461
if (IS_ERR(new_trustee))
3462
return notifier_from_errno(PTR_ERR(new_trustee));
3463
kthread_bind(new_trustee, cpu);
959
3465
case CPU_UP_PREPARE:
960
cpumask_set_cpu(cpu, cpu_populated_map);
963
list_for_each_entry(wq, &workqueues, list) {
964
cwq = per_cpu_ptr(wq->cpu_wq, cpu);
968
if (!create_workqueue_thread(cwq, cpu))
970
printk(KERN_ERR "workqueue [%s] for %i failed\n",
972
action = CPU_UP_CANCELED;
977
start_workqueue_thread(cwq, cpu);
980
case CPU_UP_CANCELED:
981
start_workqueue_thread(cwq, -1);
983
cleanup_workqueue_thread(cwq);
3466
BUG_ON(gcwq->first_idle);
3467
new_worker = create_worker(gcwq, false);
3470
kthread_stop(new_trustee);
3475
/* some are called w/ irq disabled, don't disturb irq status */
3476
spin_lock_irqsave(&gcwq->lock, flags);
988
3478
switch (action) {
3479
case CPU_DOWN_PREPARE:
3480
/* initialize trustee and tell it to acquire the gcwq */
3481
BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3482
gcwq->trustee = new_trustee;
3483
gcwq->trustee_state = TRUSTEE_START;
3484
wake_up_process(gcwq->trustee);
3485
wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
3487
case CPU_UP_PREPARE:
3488
BUG_ON(gcwq->first_idle);
3489
gcwq->first_idle = new_worker;
3494
* Before this, the trustee and all workers except for
3495
* the ones which are still executing works from
3496
* before the last CPU down must be on the cpu. After
3497
* this, they'll all be diasporas.
3499
gcwq->flags |= GCWQ_DISASSOCIATED;
3503
gcwq->trustee_state = TRUSTEE_BUTCHER;
989
3505
case CPU_UP_CANCELED:
991
cpumask_clear_cpu(cpu, cpu_populated_map);
3506
destroy_worker(gcwq->first_idle);
3507
gcwq->first_idle = NULL;
3510
case CPU_DOWN_FAILED:
3512
gcwq->flags &= ~GCWQ_DISASSOCIATED;
3513
if (gcwq->trustee_state != TRUSTEE_DONE) {
3514
gcwq->trustee_state = TRUSTEE_RELEASE;
3515
wake_up_process(gcwq->trustee);
3516
wait_trustee_state(gcwq, TRUSTEE_DONE);
3520
* Trustee is done and there might be no worker left.
3521
* Put the first_idle in and request a real manager to
3524
spin_unlock_irq(&gcwq->lock);
3525
kthread_bind(gcwq->first_idle->task, cpu);
3526
spin_lock_irq(&gcwq->lock);
3527
gcwq->flags |= GCWQ_MANAGE_WORKERS;
3528
start_worker(gcwq->first_idle);
3529
gcwq->first_idle = NULL;
3533
spin_unlock_irqrestore(&gcwq->lock, flags);
3535
return notifier_from_errno(0);
997
3538
#ifdef CONFIG_SMP