xref: /openbmc/linux/kernel/stop_machine.c (revision f15cbe6f1a4b4d9df59142fc8e4abb973302cf44)
1 /* Copyright 2008, 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation.
2  * GPL v2 and any later version.
3  */
4 #include <linux/cpu.h>
5 #include <linux/err.h>
6 #include <linux/kthread.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/stop_machine.h>
10 #include <linux/syscalls.h>
11 #include <linux/interrupt.h>
12 
13 #include <asm/atomic.h>
14 #include <asm/uaccess.h>
15 
16 /* This controls the threads on each CPU. */
17 enum stopmachine_state {
18 	/* Dummy starting state for thread. */
19 	STOPMACHINE_NONE,
20 	/* Awaiting everyone to be scheduled. */
21 	STOPMACHINE_PREPARE,
22 	/* Disable interrupts. */
23 	STOPMACHINE_DISABLE_IRQ,
24 	/* Run the function */
25 	STOPMACHINE_RUN,
26 	/* Exit */
27 	STOPMACHINE_EXIT,
28 };
29 static enum stopmachine_state state;
30 
31 struct stop_machine_data {
32 	int (*fn)(void *);
33 	void *data;
34 	int fnret;
35 };
36 
37 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
38 static unsigned int num_threads;
39 static atomic_t thread_ack;
40 static struct completion finished;
41 static DEFINE_MUTEX(lock);
42 
43 static void set_state(enum stopmachine_state newstate)
44 {
45 	/* Reset ack counter. */
46 	atomic_set(&thread_ack, num_threads);
47 	smp_wmb();
48 	state = newstate;
49 }
50 
51 /* Last one to ack a state moves to the next state. */
52 static void ack_state(void)
53 {
54 	if (atomic_dec_and_test(&thread_ack)) {
55 		/* If we're the last one to ack the EXIT, we're finished. */
56 		if (state == STOPMACHINE_EXIT)
57 			complete(&finished);
58 		else
59 			set_state(state + 1);
60 	}
61 }
62 
63 /* This is the actual thread which stops the CPU.  It exits by itself rather
64  * than waiting for kthread_stop(), because it's easier for hotplug CPU. */
65 static int stop_cpu(struct stop_machine_data *smdata)
66 {
67 	enum stopmachine_state curstate = STOPMACHINE_NONE;
68 	int uninitialized_var(ret);
69 
70 	/* Simple state machine */
71 	do {
72 		/* Chill out and ensure we re-read stopmachine_state. */
73 		cpu_relax();
74 		if (state != curstate) {
75 			curstate = state;
76 			switch (curstate) {
77 			case STOPMACHINE_DISABLE_IRQ:
78 				local_irq_disable();
79 				hard_irq_disable();
80 				break;
81 			case STOPMACHINE_RUN:
82 				/* |= allows error detection if functions on
83 				 * multiple CPUs. */
84 				smdata->fnret |= smdata->fn(smdata->data);
85 				break;
86 			default:
87 				break;
88 			}
89 			ack_state();
90 		}
91 	} while (curstate != STOPMACHINE_EXIT);
92 
93 	local_irq_enable();
94 	do_exit(0);
95 }
96 
97 /* Callback for CPUs which aren't supposed to do anything. */
98 static int chill(void *unused)
99 {
100 	return 0;
101 }
102 
103 int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
104 {
105 	int i, err;
106 	struct stop_machine_data active, idle;
107 	struct task_struct **threads;
108 
109 	active.fn = fn;
110 	active.data = data;
111 	active.fnret = 0;
112 	idle.fn = chill;
113 	idle.data = NULL;
114 
115 	/* This could be too big for stack on large machines. */
116 	threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL);
117 	if (!threads)
118 		return -ENOMEM;
119 
120 	/* Set up initial state. */
121 	mutex_lock(&lock);
122 	init_completion(&finished);
123 	num_threads = num_online_cpus();
124 	set_state(STOPMACHINE_PREPARE);
125 
126 	for_each_online_cpu(i) {
127 		struct stop_machine_data *smdata = &idle;
128 		struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
129 
130 		if (!cpus) {
131 			if (i == first_cpu(cpu_online_map))
132 				smdata = &active;
133 		} else {
134 			if (cpu_isset(i, *cpus))
135 				smdata = &active;
136 		}
137 
138 		threads[i] = kthread_create((void *)stop_cpu, smdata, "kstop%u",
139 					    i);
140 		if (IS_ERR(threads[i])) {
141 			err = PTR_ERR(threads[i]);
142 			threads[i] = NULL;
143 			goto kill_threads;
144 		}
145 
146 		/* Place it onto correct cpu. */
147 		kthread_bind(threads[i], i);
148 
149 		/* Make it highest prio. */
150 		if (sched_setscheduler_nocheck(threads[i], SCHED_FIFO, &param))
151 			BUG();
152 	}
153 
154 	/* We've created all the threads.  Wake them all: hold this CPU so one
155 	 * doesn't hit this CPU until we're ready. */
156 	get_cpu();
157 	for_each_online_cpu(i)
158 		wake_up_process(threads[i]);
159 
160 	/* This will release the thread on our CPU. */
161 	put_cpu();
162 	wait_for_completion(&finished);
163 	mutex_unlock(&lock);
164 
165 	kfree(threads);
166 
167 	return active.fnret;
168 
169 kill_threads:
170 	for_each_online_cpu(i)
171 		if (threads[i])
172 			kthread_stop(threads[i]);
173 	mutex_unlock(&lock);
174 
175 	kfree(threads);
176 	return err;
177 }
178 
179 int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
180 {
181 	int ret;
182 
183 	/* No CPUs can come up or down during this. */
184 	get_online_cpus();
185 	ret = __stop_machine(fn, data, cpus);
186 	put_online_cpus();
187 
188 	return ret;
189 }
190 EXPORT_SYMBOL_GPL(stop_machine);
191