xref: /openbmc/linux/kernel/stop_machine.c (revision 49531192)
1 /*
2  * kernel/stop_machine.c
3  *
4  * Copyright (C) 2008, 2005	IBM Corporation.
5  * Copyright (C) 2008, 2005	Rusty Russell rusty@rustcorp.com.au
6  * Copyright (C) 2010		SUSE Linux Products GmbH
7  * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
8  *
9  * This file is released under the GPLv2 and any later version.
10  */
11 #include <linux/completion.h>
12 #include <linux/cpu.h>
13 #include <linux/init.h>
14 #include <linux/kthread.h>
15 #include <linux/module.h>
16 #include <linux/percpu.h>
17 #include <linux/sched.h>
18 #include <linux/stop_machine.h>
19 #include <linux/interrupt.h>
20 #include <linux/kallsyms.h>
21 
22 #include <asm/atomic.h>
23 
24 /*
25  * Structure to determine completion condition and record errors.  May
26  * be shared by works on different cpus.
27  */
28 struct cpu_stop_done {
29 	atomic_t		nr_todo;	/* nr left to execute */
30 	bool			executed;	/* actually executed? */
31 	int			ret;		/* collected return value */
32 	struct completion	completion;	/* fired if nr_todo reaches 0 */
33 };
34 
35 /* the actual stopper, one per every possible cpu, enabled on online cpus */
36 struct cpu_stopper {
37 	spinlock_t		lock;
38 	bool			enabled;	/* is this stopper enabled? */
39 	struct list_head	works;		/* list of pending works */
40 	struct task_struct	*thread;	/* stopper thread */
41 };
42 
43 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
44 
45 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
46 {
47 	memset(done, 0, sizeof(*done));
48 	atomic_set(&done->nr_todo, nr_todo);
49 	init_completion(&done->completion);
50 }
51 
52 /* signal completion unless @done is NULL */
53 static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
54 {
55 	if (done) {
56 		if (executed)
57 			done->executed = true;
58 		if (atomic_dec_and_test(&done->nr_todo))
59 			complete(&done->completion);
60 	}
61 }
62 
63 /* queue @work to @stopper.  if offline, @work is completed immediately */
64 static void cpu_stop_queue_work(struct cpu_stopper *stopper,
65 				struct cpu_stop_work *work)
66 {
67 	unsigned long flags;
68 
69 	spin_lock_irqsave(&stopper->lock, flags);
70 
71 	if (stopper->enabled) {
72 		list_add_tail(&work->list, &stopper->works);
73 		wake_up_process(stopper->thread);
74 	} else
75 		cpu_stop_signal_done(work->done, false);
76 
77 	spin_unlock_irqrestore(&stopper->lock, flags);
78 }
79 
80 /**
81  * stop_one_cpu - stop a cpu
82  * @cpu: cpu to stop
83  * @fn: function to execute
84  * @arg: argument to @fn
85  *
86  * Execute @fn(@arg) on @cpu.  @fn is run in a process context with
87  * the highest priority preempting any task on the cpu and
88  * monopolizing it.  This function returns after the execution is
89  * complete.
90  *
91  * This function doesn't guarantee @cpu stays online till @fn
92  * completes.  If @cpu goes down in the middle, execution may happen
93  * partially or fully on different cpus.  @fn should either be ready
94  * for that or the caller should ensure that @cpu stays online until
95  * this function completes.
96  *
97  * CONTEXT:
98  * Might sleep.
99  *
100  * RETURNS:
101  * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
102  * otherwise, the return value of @fn.
103  */
104 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
105 {
106 	struct cpu_stop_done done;
107 	struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
108 
109 	cpu_stop_init_done(&done, 1);
110 	cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work);
111 	wait_for_completion(&done.completion);
112 	return done.executed ? done.ret : -ENOENT;
113 }
114 
115 /**
116  * stop_one_cpu_nowait - stop a cpu but don't wait for completion
117  * @cpu: cpu to stop
118  * @fn: function to execute
119  * @arg: argument to @fn
120  *
121  * Similar to stop_one_cpu() but doesn't wait for completion.  The
122  * caller is responsible for ensuring @work_buf is currently unused
123  * and will remain untouched until stopper starts executing @fn.
124  *
125  * CONTEXT:
126  * Don't care.
127  */
128 void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
129 			struct cpu_stop_work *work_buf)
130 {
131 	*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
132 	cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf);
133 }
134 
135 /* static data for stop_cpus */
136 static DEFINE_MUTEX(stop_cpus_mutex);
137 static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
138 
139 int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
140 {
141 	struct cpu_stop_work *work;
142 	struct cpu_stop_done done;
143 	unsigned int cpu;
144 
145 	/* initialize works and done */
146 	for_each_cpu(cpu, cpumask) {
147 		work = &per_cpu(stop_cpus_work, cpu);
148 		work->fn = fn;
149 		work->arg = arg;
150 		work->done = &done;
151 	}
152 	cpu_stop_init_done(&done, cpumask_weight(cpumask));
153 
154 	/*
155 	 * Disable preemption while queueing to avoid getting
156 	 * preempted by a stopper which might wait for other stoppers
157 	 * to enter @fn which can lead to deadlock.
158 	 */
159 	preempt_disable();
160 	for_each_cpu(cpu, cpumask)
161 		cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
162 				    &per_cpu(stop_cpus_work, cpu));
163 	preempt_enable();
164 
165 	wait_for_completion(&done.completion);
166 	return done.executed ? done.ret : -ENOENT;
167 }
168 
169 /**
170  * stop_cpus - stop multiple cpus
171  * @cpumask: cpus to stop
172  * @fn: function to execute
173  * @arg: argument to @fn
174  *
175  * Execute @fn(@arg) on online cpus in @cpumask.  On each target cpu,
176  * @fn is run in a process context with the highest priority
177  * preempting any task on the cpu and monopolizing it.  This function
178  * returns after all executions are complete.
179  *
180  * This function doesn't guarantee the cpus in @cpumask stay online
181  * till @fn completes.  If some cpus go down in the middle, execution
182  * on the cpu may happen partially or fully on different cpus.  @fn
183  * should either be ready for that or the caller should ensure that
184  * the cpus stay online until this function completes.
185  *
186  * All stop_cpus() calls are serialized making it safe for @fn to wait
187  * for all cpus to start executing it.
188  *
189  * CONTEXT:
190  * Might sleep.
191  *
192  * RETURNS:
193  * -ENOENT if @fn(@arg) was not executed at all because all cpus in
194  * @cpumask were offline; otherwise, 0 if all executions of @fn
195  * returned 0, any non zero return value if any returned non zero.
196  */
197 int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
198 {
199 	int ret;
200 
201 	/* static works are used, process one request at a time */
202 	mutex_lock(&stop_cpus_mutex);
203 	ret = __stop_cpus(cpumask, fn, arg);
204 	mutex_unlock(&stop_cpus_mutex);
205 	return ret;
206 }
207 
208 /**
209  * try_stop_cpus - try to stop multiple cpus
210  * @cpumask: cpus to stop
211  * @fn: function to execute
212  * @arg: argument to @fn
213  *
214  * Identical to stop_cpus() except that it fails with -EAGAIN if
215  * someone else is already using the facility.
216  *
217  * CONTEXT:
218  * Might sleep.
219  *
220  * RETURNS:
221  * -EAGAIN if someone else is already stopping cpus, -ENOENT if
222  * @fn(@arg) was not executed at all because all cpus in @cpumask were
223  * offline; otherwise, 0 if all executions of @fn returned 0, any non
224  * zero return value if any returned non zero.
225  */
226 int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
227 {
228 	int ret;
229 
230 	/* static works are used, process one request at a time */
231 	if (!mutex_trylock(&stop_cpus_mutex))
232 		return -EAGAIN;
233 	ret = __stop_cpus(cpumask, fn, arg);
234 	mutex_unlock(&stop_cpus_mutex);
235 	return ret;
236 }
237 
238 static int cpu_stopper_thread(void *data)
239 {
240 	struct cpu_stopper *stopper = data;
241 	struct cpu_stop_work *work;
242 	int ret;
243 
244 repeat:
245 	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
246 
247 	if (kthread_should_stop()) {
248 		__set_current_state(TASK_RUNNING);
249 		return 0;
250 	}
251 
252 	work = NULL;
253 	spin_lock_irq(&stopper->lock);
254 	if (!list_empty(&stopper->works)) {
255 		work = list_first_entry(&stopper->works,
256 					struct cpu_stop_work, list);
257 		list_del_init(&work->list);
258 	}
259 	spin_unlock_irq(&stopper->lock);
260 
261 	if (work) {
262 		cpu_stop_fn_t fn = work->fn;
263 		void *arg = work->arg;
264 		struct cpu_stop_done *done = work->done;
265 		char ksym_buf[KSYM_NAME_LEN];
266 
267 		__set_current_state(TASK_RUNNING);
268 
269 		/* cpu stop callbacks are not allowed to sleep */
270 		preempt_disable();
271 
272 		ret = fn(arg);
273 		if (ret)
274 			done->ret = ret;
275 
276 		/* restore preemption and check it's still balanced */
277 		preempt_enable();
278 		WARN_ONCE(preempt_count(),
279 			  "cpu_stop: %s(%p) leaked preempt count\n",
280 			  kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
281 					  ksym_buf), arg);
282 
283 		cpu_stop_signal_done(done, true);
284 	} else
285 		schedule();
286 
287 	goto repeat;
288 }
289 
290 /* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */
291 static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
292 					   unsigned long action, void *hcpu)
293 {
294 	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
295 	unsigned int cpu = (unsigned long)hcpu;
296 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
297 	struct task_struct *p;
298 
299 	switch (action & ~CPU_TASKS_FROZEN) {
300 	case CPU_UP_PREPARE:
301 		BUG_ON(stopper->thread || stopper->enabled ||
302 		       !list_empty(&stopper->works));
303 		p = kthread_create(cpu_stopper_thread, stopper, "migration/%d",
304 				   cpu);
305 		if (IS_ERR(p))
306 			return NOTIFY_BAD;
307 		sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
308 		get_task_struct(p);
309 		stopper->thread = p;
310 		break;
311 
312 	case CPU_ONLINE:
313 		kthread_bind(stopper->thread, cpu);
314 		/* strictly unnecessary, as first user will wake it */
315 		wake_up_process(stopper->thread);
316 		/* mark enabled */
317 		spin_lock_irq(&stopper->lock);
318 		stopper->enabled = true;
319 		spin_unlock_irq(&stopper->lock);
320 		break;
321 
322 #ifdef CONFIG_HOTPLUG_CPU
323 	case CPU_UP_CANCELED:
324 	case CPU_POST_DEAD:
325 	{
326 		struct cpu_stop_work *work;
327 
328 		/* kill the stopper */
329 		kthread_stop(stopper->thread);
330 		/* drain remaining works */
331 		spin_lock_irq(&stopper->lock);
332 		list_for_each_entry(work, &stopper->works, list)
333 			cpu_stop_signal_done(work->done, false);
334 		stopper->enabled = false;
335 		spin_unlock_irq(&stopper->lock);
336 		/* release the stopper */
337 		put_task_struct(stopper->thread);
338 		stopper->thread = NULL;
339 		break;
340 	}
341 #endif
342 	}
343 
344 	return NOTIFY_OK;
345 }
346 
347 /*
348  * Give it a higher priority so that cpu stopper is available to other
349  * cpu notifiers.  It currently shares the same priority as sched
350  * migration_notifier.
351  */
352 static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
353 	.notifier_call	= cpu_stop_cpu_callback,
354 	.priority	= 10,
355 };
356 
357 static int __init cpu_stop_init(void)
358 {
359 	void *bcpu = (void *)(long)smp_processor_id();
360 	unsigned int cpu;
361 	int err;
362 
363 	for_each_possible_cpu(cpu) {
364 		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
365 
366 		spin_lock_init(&stopper->lock);
367 		INIT_LIST_HEAD(&stopper->works);
368 	}
369 
370 	/* start one for the boot cpu */
371 	err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE,
372 				    bcpu);
373 	BUG_ON(err == NOTIFY_BAD);
374 	cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
375 	register_cpu_notifier(&cpu_stop_cpu_notifier);
376 
377 	return 0;
378 }
379 early_initcall(cpu_stop_init);
380 
381 #ifdef CONFIG_STOP_MACHINE
382 
383 /* This controls the threads on each CPU. */
384 enum stopmachine_state {
385 	/* Dummy starting state for thread. */
386 	STOPMACHINE_NONE,
387 	/* Awaiting everyone to be scheduled. */
388 	STOPMACHINE_PREPARE,
389 	/* Disable interrupts. */
390 	STOPMACHINE_DISABLE_IRQ,
391 	/* Run the function */
392 	STOPMACHINE_RUN,
393 	/* Exit */
394 	STOPMACHINE_EXIT,
395 };
396 
397 struct stop_machine_data {
398 	int			(*fn)(void *);
399 	void			*data;
400 	/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
401 	unsigned int		num_threads;
402 	const struct cpumask	*active_cpus;
403 
404 	enum stopmachine_state	state;
405 	atomic_t		thread_ack;
406 };
407 
408 static void set_state(struct stop_machine_data *smdata,
409 		      enum stopmachine_state newstate)
410 {
411 	/* Reset ack counter. */
412 	atomic_set(&smdata->thread_ack, smdata->num_threads);
413 	smp_wmb();
414 	smdata->state = newstate;
415 }
416 
417 /* Last one to ack a state moves to the next state. */
418 static void ack_state(struct stop_machine_data *smdata)
419 {
420 	if (atomic_dec_and_test(&smdata->thread_ack))
421 		set_state(smdata, smdata->state + 1);
422 }
423 
424 /* This is the cpu_stop function which stops the CPU. */
425 static int stop_machine_cpu_stop(void *data)
426 {
427 	struct stop_machine_data *smdata = data;
428 	enum stopmachine_state curstate = STOPMACHINE_NONE;
429 	int cpu = smp_processor_id(), err = 0;
430 	bool is_active;
431 
432 	if (!smdata->active_cpus)
433 		is_active = cpu == cpumask_first(cpu_online_mask);
434 	else
435 		is_active = cpumask_test_cpu(cpu, smdata->active_cpus);
436 
437 	/* Simple state machine */
438 	do {
439 		/* Chill out and ensure we re-read stopmachine_state. */
440 		cpu_relax();
441 		if (smdata->state != curstate) {
442 			curstate = smdata->state;
443 			switch (curstate) {
444 			case STOPMACHINE_DISABLE_IRQ:
445 				local_irq_disable();
446 				hard_irq_disable();
447 				break;
448 			case STOPMACHINE_RUN:
449 				if (is_active)
450 					err = smdata->fn(smdata->data);
451 				break;
452 			default:
453 				break;
454 			}
455 			ack_state(smdata);
456 		}
457 	} while (curstate != STOPMACHINE_EXIT);
458 
459 	local_irq_enable();
460 	return err;
461 }
462 
463 int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
464 {
465 	struct stop_machine_data smdata = { .fn = fn, .data = data,
466 					    .num_threads = num_online_cpus(),
467 					    .active_cpus = cpus };
468 
469 	/* Set the initial state and stop all online cpus. */
470 	set_state(&smdata, STOPMACHINE_PREPARE);
471 	return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata);
472 }
473 
474 int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
475 {
476 	int ret;
477 
478 	/* No CPUs can come up or down during this. */
479 	get_online_cpus();
480 	ret = __stop_machine(fn, data, cpus);
481 	put_online_cpus();
482 	return ret;
483 }
484 EXPORT_SYMBOL_GPL(stop_machine);
485 
486 #endif	/* CONFIG_STOP_MACHINE */
487