xref: /openbmc/linux/kernel/stop_machine.c (revision 75f25bd3)
1 /*
2  * kernel/stop_machine.c
3  *
4  * Copyright (C) 2008, 2005	IBM Corporation.
5  * Copyright (C) 2008, 2005	Rusty Russell rusty@rustcorp.com.au
6  * Copyright (C) 2010		SUSE Linux Products GmbH
7  * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
8  *
9  * This file is released under the GPLv2 and any later version.
10  */
11 #include <linux/completion.h>
12 #include <linux/cpu.h>
13 #include <linux/init.h>
14 #include <linux/kthread.h>
15 #include <linux/module.h>
16 #include <linux/percpu.h>
17 #include <linux/sched.h>
18 #include <linux/stop_machine.h>
19 #include <linux/interrupt.h>
20 #include <linux/kallsyms.h>
21 
22 #include <linux/atomic.h>
23 
24 /*
25  * Structure to determine completion condition and record errors.  May
26  * be shared by works on different cpus.
27  */
28 struct cpu_stop_done {
29 	atomic_t		nr_todo;	/* nr left to execute */
30 	bool			executed;	/* actually executed? */
31 	int			ret;		/* collected return value */
32 	struct completion	completion;	/* fired if nr_todo reaches 0 */
33 };
34 
35 /* the actual stopper, one per every possible cpu, enabled on online cpus */
36 struct cpu_stopper {
37 	spinlock_t		lock;
38 	bool			enabled;	/* is this stopper enabled? */
39 	struct list_head	works;		/* list of pending works */
40 	struct task_struct	*thread;	/* stopper thread */
41 };
42 
43 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
44 
45 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
46 {
47 	memset(done, 0, sizeof(*done));
48 	atomic_set(&done->nr_todo, nr_todo);
49 	init_completion(&done->completion);
50 }
51 
52 /* signal completion unless @done is NULL */
53 static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
54 {
55 	if (done) {
56 		if (executed)
57 			done->executed = true;
58 		if (atomic_dec_and_test(&done->nr_todo))
59 			complete(&done->completion);
60 	}
61 }
62 
63 /* queue @work to @stopper.  if offline, @work is completed immediately */
64 static void cpu_stop_queue_work(struct cpu_stopper *stopper,
65 				struct cpu_stop_work *work)
66 {
67 	unsigned long flags;
68 
69 	spin_lock_irqsave(&stopper->lock, flags);
70 
71 	if (stopper->enabled) {
72 		list_add_tail(&work->list, &stopper->works);
73 		wake_up_process(stopper->thread);
74 	} else
75 		cpu_stop_signal_done(work->done, false);
76 
77 	spin_unlock_irqrestore(&stopper->lock, flags);
78 }
79 
80 /**
81  * stop_one_cpu - stop a cpu
82  * @cpu: cpu to stop
83  * @fn: function to execute
84  * @arg: argument to @fn
85  *
86  * Execute @fn(@arg) on @cpu.  @fn is run in a process context with
87  * the highest priority preempting any task on the cpu and
88  * monopolizing it.  This function returns after the execution is
89  * complete.
90  *
91  * This function doesn't guarantee @cpu stays online till @fn
92  * completes.  If @cpu goes down in the middle, execution may happen
93  * partially or fully on different cpus.  @fn should either be ready
94  * for that or the caller should ensure that @cpu stays online until
95  * this function completes.
96  *
97  * CONTEXT:
98  * Might sleep.
99  *
100  * RETURNS:
101  * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
102  * otherwise, the return value of @fn.
103  */
104 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
105 {
106 	struct cpu_stop_done done;
107 	struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
108 
109 	cpu_stop_init_done(&done, 1);
110 	cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work);
111 	wait_for_completion(&done.completion);
112 	return done.executed ? done.ret : -ENOENT;
113 }
114 
115 /**
116  * stop_one_cpu_nowait - stop a cpu but don't wait for completion
117  * @cpu: cpu to stop
118  * @fn: function to execute
119  * @arg: argument to @fn
120  *
121  * Similar to stop_one_cpu() but doesn't wait for completion.  The
122  * caller is responsible for ensuring @work_buf is currently unused
123  * and will remain untouched until stopper starts executing @fn.
124  *
125  * CONTEXT:
126  * Don't care.
127  */
128 void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
129 			struct cpu_stop_work *work_buf)
130 {
131 	*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
132 	cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf);
133 }
134 
135 /* static data for stop_cpus */
136 static DEFINE_MUTEX(stop_cpus_mutex);
137 static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
138 
139 static void queue_stop_cpus_work(const struct cpumask *cpumask,
140 				 cpu_stop_fn_t fn, void *arg,
141 				 struct cpu_stop_done *done)
142 {
143 	struct cpu_stop_work *work;
144 	unsigned int cpu;
145 
146 	/* initialize works and done */
147 	for_each_cpu(cpu, cpumask) {
148 		work = &per_cpu(stop_cpus_work, cpu);
149 		work->fn = fn;
150 		work->arg = arg;
151 		work->done = done;
152 	}
153 
154 	/*
155 	 * Disable preemption while queueing to avoid getting
156 	 * preempted by a stopper which might wait for other stoppers
157 	 * to enter @fn which can lead to deadlock.
158 	 */
159 	preempt_disable();
160 	for_each_cpu(cpu, cpumask)
161 		cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
162 				    &per_cpu(stop_cpus_work, cpu));
163 	preempt_enable();
164 }
165 
166 static int __stop_cpus(const struct cpumask *cpumask,
167 		       cpu_stop_fn_t fn, void *arg)
168 {
169 	struct cpu_stop_done done;
170 
171 	cpu_stop_init_done(&done, cpumask_weight(cpumask));
172 	queue_stop_cpus_work(cpumask, fn, arg, &done);
173 	wait_for_completion(&done.completion);
174 	return done.executed ? done.ret : -ENOENT;
175 }
176 
177 /**
178  * stop_cpus - stop multiple cpus
179  * @cpumask: cpus to stop
180  * @fn: function to execute
181  * @arg: argument to @fn
182  *
183  * Execute @fn(@arg) on online cpus in @cpumask.  On each target cpu,
184  * @fn is run in a process context with the highest priority
185  * preempting any task on the cpu and monopolizing it.  This function
186  * returns after all executions are complete.
187  *
188  * This function doesn't guarantee the cpus in @cpumask stay online
189  * till @fn completes.  If some cpus go down in the middle, execution
190  * on the cpu may happen partially or fully on different cpus.  @fn
191  * should either be ready for that or the caller should ensure that
192  * the cpus stay online until this function completes.
193  *
194  * All stop_cpus() calls are serialized making it safe for @fn to wait
195  * for all cpus to start executing it.
196  *
197  * CONTEXT:
198  * Might sleep.
199  *
200  * RETURNS:
201  * -ENOENT if @fn(@arg) was not executed at all because all cpus in
202  * @cpumask were offline; otherwise, 0 if all executions of @fn
203  * returned 0, any non zero return value if any returned non zero.
204  */
205 int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
206 {
207 	int ret;
208 
209 	/* static works are used, process one request at a time */
210 	mutex_lock(&stop_cpus_mutex);
211 	ret = __stop_cpus(cpumask, fn, arg);
212 	mutex_unlock(&stop_cpus_mutex);
213 	return ret;
214 }
215 
216 /**
217  * try_stop_cpus - try to stop multiple cpus
218  * @cpumask: cpus to stop
219  * @fn: function to execute
220  * @arg: argument to @fn
221  *
222  * Identical to stop_cpus() except that it fails with -EAGAIN if
223  * someone else is already using the facility.
224  *
225  * CONTEXT:
226  * Might sleep.
227  *
228  * RETURNS:
229  * -EAGAIN if someone else is already stopping cpus, -ENOENT if
230  * @fn(@arg) was not executed at all because all cpus in @cpumask were
231  * offline; otherwise, 0 if all executions of @fn returned 0, any non
232  * zero return value if any returned non zero.
233  */
234 int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
235 {
236 	int ret;
237 
238 	/* static works are used, process one request at a time */
239 	if (!mutex_trylock(&stop_cpus_mutex))
240 		return -EAGAIN;
241 	ret = __stop_cpus(cpumask, fn, arg);
242 	mutex_unlock(&stop_cpus_mutex);
243 	return ret;
244 }
245 
246 static int cpu_stopper_thread(void *data)
247 {
248 	struct cpu_stopper *stopper = data;
249 	struct cpu_stop_work *work;
250 	int ret;
251 
252 repeat:
253 	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
254 
255 	if (kthread_should_stop()) {
256 		__set_current_state(TASK_RUNNING);
257 		return 0;
258 	}
259 
260 	work = NULL;
261 	spin_lock_irq(&stopper->lock);
262 	if (!list_empty(&stopper->works)) {
263 		work = list_first_entry(&stopper->works,
264 					struct cpu_stop_work, list);
265 		list_del_init(&work->list);
266 	}
267 	spin_unlock_irq(&stopper->lock);
268 
269 	if (work) {
270 		cpu_stop_fn_t fn = work->fn;
271 		void *arg = work->arg;
272 		struct cpu_stop_done *done = work->done;
273 		char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
274 
275 		__set_current_state(TASK_RUNNING);
276 
277 		/* cpu stop callbacks are not allowed to sleep */
278 		preempt_disable();
279 
280 		ret = fn(arg);
281 		if (ret)
282 			done->ret = ret;
283 
284 		/* restore preemption and check it's still balanced */
285 		preempt_enable();
286 		WARN_ONCE(preempt_count(),
287 			  "cpu_stop: %s(%p) leaked preempt count\n",
288 			  kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
289 					  ksym_buf), arg);
290 
291 		cpu_stop_signal_done(done, true);
292 	} else
293 		schedule();
294 
295 	goto repeat;
296 }
297 
298 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
299 
300 /* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */
301 static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
302 					   unsigned long action, void *hcpu)
303 {
304 	unsigned int cpu = (unsigned long)hcpu;
305 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
306 	struct task_struct *p;
307 
308 	switch (action & ~CPU_TASKS_FROZEN) {
309 	case CPU_UP_PREPARE:
310 		BUG_ON(stopper->thread || stopper->enabled ||
311 		       !list_empty(&stopper->works));
312 		p = kthread_create_on_node(cpu_stopper_thread,
313 					   stopper,
314 					   cpu_to_node(cpu),
315 					   "migration/%d", cpu);
316 		if (IS_ERR(p))
317 			return notifier_from_errno(PTR_ERR(p));
318 		get_task_struct(p);
319 		kthread_bind(p, cpu);
320 		sched_set_stop_task(cpu, p);
321 		stopper->thread = p;
322 		break;
323 
324 	case CPU_ONLINE:
325 		/* strictly unnecessary, as first user will wake it */
326 		wake_up_process(stopper->thread);
327 		/* mark enabled */
328 		spin_lock_irq(&stopper->lock);
329 		stopper->enabled = true;
330 		spin_unlock_irq(&stopper->lock);
331 		break;
332 
333 #ifdef CONFIG_HOTPLUG_CPU
334 	case CPU_UP_CANCELED:
335 	case CPU_POST_DEAD:
336 	{
337 		struct cpu_stop_work *work;
338 
339 		sched_set_stop_task(cpu, NULL);
340 		/* kill the stopper */
341 		kthread_stop(stopper->thread);
342 		/* drain remaining works */
343 		spin_lock_irq(&stopper->lock);
344 		list_for_each_entry(work, &stopper->works, list)
345 			cpu_stop_signal_done(work->done, false);
346 		stopper->enabled = false;
347 		spin_unlock_irq(&stopper->lock);
348 		/* release the stopper */
349 		put_task_struct(stopper->thread);
350 		stopper->thread = NULL;
351 		break;
352 	}
353 #endif
354 	}
355 
356 	return NOTIFY_OK;
357 }
358 
359 /*
360  * Give it a higher priority so that cpu stopper is available to other
361  * cpu notifiers.  It currently shares the same priority as sched
362  * migration_notifier.
363  */
364 static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
365 	.notifier_call	= cpu_stop_cpu_callback,
366 	.priority	= 10,
367 };
368 
369 static int __init cpu_stop_init(void)
370 {
371 	void *bcpu = (void *)(long)smp_processor_id();
372 	unsigned int cpu;
373 	int err;
374 
375 	for_each_possible_cpu(cpu) {
376 		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
377 
378 		spin_lock_init(&stopper->lock);
379 		INIT_LIST_HEAD(&stopper->works);
380 	}
381 
382 	/* start one for the boot cpu */
383 	err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE,
384 				    bcpu);
385 	BUG_ON(err != NOTIFY_OK);
386 	cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
387 	register_cpu_notifier(&cpu_stop_cpu_notifier);
388 
389 	return 0;
390 }
391 early_initcall(cpu_stop_init);
392 
393 #ifdef CONFIG_STOP_MACHINE
394 
395 /* This controls the threads on each CPU. */
396 enum stopmachine_state {
397 	/* Dummy starting state for thread. */
398 	STOPMACHINE_NONE,
399 	/* Awaiting everyone to be scheduled. */
400 	STOPMACHINE_PREPARE,
401 	/* Disable interrupts. */
402 	STOPMACHINE_DISABLE_IRQ,
403 	/* Run the function */
404 	STOPMACHINE_RUN,
405 	/* Exit */
406 	STOPMACHINE_EXIT,
407 };
408 
409 struct stop_machine_data {
410 	int			(*fn)(void *);
411 	void			*data;
412 	/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
413 	unsigned int		num_threads;
414 	const struct cpumask	*active_cpus;
415 
416 	enum stopmachine_state	state;
417 	atomic_t		thread_ack;
418 };
419 
420 static void set_state(struct stop_machine_data *smdata,
421 		      enum stopmachine_state newstate)
422 {
423 	/* Reset ack counter. */
424 	atomic_set(&smdata->thread_ack, smdata->num_threads);
425 	smp_wmb();
426 	smdata->state = newstate;
427 }
428 
429 /* Last one to ack a state moves to the next state. */
430 static void ack_state(struct stop_machine_data *smdata)
431 {
432 	if (atomic_dec_and_test(&smdata->thread_ack))
433 		set_state(smdata, smdata->state + 1);
434 }
435 
436 /* This is the cpu_stop function which stops the CPU. */
437 static int stop_machine_cpu_stop(void *data)
438 {
439 	struct stop_machine_data *smdata = data;
440 	enum stopmachine_state curstate = STOPMACHINE_NONE;
441 	int cpu = smp_processor_id(), err = 0;
442 	unsigned long flags;
443 	bool is_active;
444 
445 	/*
446 	 * When called from stop_machine_from_inactive_cpu(), irq might
447 	 * already be disabled.  Save the state and restore it on exit.
448 	 */
449 	local_save_flags(flags);
450 
451 	if (!smdata->active_cpus)
452 		is_active = cpu == cpumask_first(cpu_online_mask);
453 	else
454 		is_active = cpumask_test_cpu(cpu, smdata->active_cpus);
455 
456 	/* Simple state machine */
457 	do {
458 		/* Chill out and ensure we re-read stopmachine_state. */
459 		cpu_relax();
460 		if (smdata->state != curstate) {
461 			curstate = smdata->state;
462 			switch (curstate) {
463 			case STOPMACHINE_DISABLE_IRQ:
464 				local_irq_disable();
465 				hard_irq_disable();
466 				break;
467 			case STOPMACHINE_RUN:
468 				if (is_active)
469 					err = smdata->fn(smdata->data);
470 				break;
471 			default:
472 				break;
473 			}
474 			ack_state(smdata);
475 		}
476 	} while (curstate != STOPMACHINE_EXIT);
477 
478 	local_irq_restore(flags);
479 	return err;
480 }
481 
482 int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
483 {
484 	struct stop_machine_data smdata = { .fn = fn, .data = data,
485 					    .num_threads = num_online_cpus(),
486 					    .active_cpus = cpus };
487 
488 	/* Set the initial state and stop all online cpus. */
489 	set_state(&smdata, STOPMACHINE_PREPARE);
490 	return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata);
491 }
492 
493 int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
494 {
495 	int ret;
496 
497 	/* No CPUs can come up or down during this. */
498 	get_online_cpus();
499 	ret = __stop_machine(fn, data, cpus);
500 	put_online_cpus();
501 	return ret;
502 }
503 EXPORT_SYMBOL_GPL(stop_machine);
504 
505 /**
506  * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
507  * @fn: the function to run
508  * @data: the data ptr for the @fn()
509  * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
510  *
511  * This is identical to stop_machine() but can be called from a CPU which
512  * is not active.  The local CPU is in the process of hotplug (so no other
513  * CPU hotplug can start) and not marked active and doesn't have enough
514  * context to sleep.
515  *
516  * This function provides stop_machine() functionality for such state by
517  * using busy-wait for synchronization and executing @fn directly for local
518  * CPU.
519  *
520  * CONTEXT:
521  * Local CPU is inactive.  Temporarily stops all active CPUs.
522  *
523  * RETURNS:
524  * 0 if all executions of @fn returned 0, any non zero return value if any
525  * returned non zero.
526  */
527 int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
528 				  const struct cpumask *cpus)
529 {
530 	struct stop_machine_data smdata = { .fn = fn, .data = data,
531 					    .active_cpus = cpus };
532 	struct cpu_stop_done done;
533 	int ret;
534 
535 	/* Local CPU must be inactive and CPU hotplug in progress. */
536 	BUG_ON(cpu_active(raw_smp_processor_id()));
537 	smdata.num_threads = num_active_cpus() + 1;	/* +1 for local */
538 
539 	/* No proper task established and can't sleep - busy wait for lock. */
540 	while (!mutex_trylock(&stop_cpus_mutex))
541 		cpu_relax();
542 
543 	/* Schedule work on other CPUs and execute directly for local CPU */
544 	set_state(&smdata, STOPMACHINE_PREPARE);
545 	cpu_stop_init_done(&done, num_active_cpus());
546 	queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata,
547 			     &done);
548 	ret = stop_machine_cpu_stop(&smdata);
549 
550 	/* Busy wait for completion. */
551 	while (!completion_done(&done.completion))
552 		cpu_relax();
553 
554 	mutex_unlock(&stop_cpus_mutex);
555 	return ret ?: done.ret;
556 }
557 
558 #endif	/* CONFIG_STOP_MACHINE */
559