58
58
static struct completion rcu_barrier_completion;
59
59
int rcu_scheduler_active __read_mostly;
61
static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
62
static struct rcu_head rcu_migrate_head[3];
63
static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
62
66
* Awaken the corresponding synchronize_rcu() instance now that a
63
67
* grace period has elapsed.
129
static inline void wait_migrated_callbacks(void)
131
wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
126
135
* Orchestrate the specified type of RCU barrier, waiting for all
127
136
* RCU callbacks of the specified type to complete.
177
187
EXPORT_SYMBOL_GPL(rcu_barrier_sched);
189
static void rcu_migrate_callback(struct rcu_head *notused)
191
if (atomic_dec_and_test(&rcu_migrate_type_count))
192
wake_up(&rcu_migrate_wq);
195
static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
196
unsigned long action, void *hcpu)
198
if (action == CPU_DYING) {
200
* preempt_disable() in on_each_cpu() prevents stop_machine(),
201
* so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
202
* returns, all online cpus have queued rcu_barrier_func(),
203
* and the dead cpu(if it exist) queues rcu_migrate_callback()s.
205
* These callbacks ensure _rcu_barrier() waits for all
206
* RCU callbacks of the specified type to complete.
208
atomic_set(&rcu_migrate_type_count, 3);
209
call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
210
call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
211
call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
212
} else if (action == CPU_POST_DEAD) {
213
/* rcu_migrate_head is protected by cpu_add_remove_lock */
214
wait_migrated_callbacks();
179
220
void __init rcu_init(void)
223
hotcpu_notifier(rcu_barrier_cpu_hotplug, 0);
184
226
void rcu_scheduler_starting(void)