xref: /openbmc/linux/kernel/cpu.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /* CPU control.
2  * (C) 2001, 2002, 2003, 2004 Rusty Russell
3  *
4  * This code is licenced under the GPL.
5  */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/kthread.h>
15 #include <linux/stop_machine.h>
16 #include <linux/mutex.h>
17 
18 /* This protects CPUs going up and down... */
19 static DEFINE_MUTEX(cpu_add_remove_lock);
20 static DEFINE_MUTEX(cpu_bitmask_lock);
21 
22 static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
23 
24 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
25  * Should always be manipulated under cpu_add_remove_lock
26  */
27 static int cpu_hotplug_disabled;
28 
29 #ifdef CONFIG_HOTPLUG_CPU
30 
31 /* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */
32 static struct task_struct *recursive;
33 static int recursive_depth;
34 
35 void lock_cpu_hotplug(void)
36 {
37 	struct task_struct *tsk = current;
38 
39 	if (tsk == recursive) {
40 		static int warnings = 10;
41 		if (warnings) {
42 			printk(KERN_ERR "Lukewarm IQ detected in hotplug locking\n");
43 			WARN_ON(1);
44 			warnings--;
45 		}
46 		recursive_depth++;
47 		return;
48 	}
49 	mutex_lock(&cpu_bitmask_lock);
50 	recursive = tsk;
51 }
52 EXPORT_SYMBOL_GPL(lock_cpu_hotplug);
53 
54 void unlock_cpu_hotplug(void)
55 {
56 	WARN_ON(recursive != current);
57 	if (recursive_depth) {
58 		recursive_depth--;
59 		return;
60 	}
61 	recursive = NULL;
62 	mutex_unlock(&cpu_bitmask_lock);
63 }
64 EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
65 
66 #endif	/* CONFIG_HOTPLUG_CPU */
67 
68 /* Need to know about CPUs going up/down? */
69 int __cpuinit register_cpu_notifier(struct notifier_block *nb)
70 {
71 	int ret;
72 	mutex_lock(&cpu_add_remove_lock);
73 	ret = raw_notifier_chain_register(&cpu_chain, nb);
74 	mutex_unlock(&cpu_add_remove_lock);
75 	return ret;
76 }
77 
78 #ifdef CONFIG_HOTPLUG_CPU
79 
80 EXPORT_SYMBOL(register_cpu_notifier);
81 
82 void unregister_cpu_notifier(struct notifier_block *nb)
83 {
84 	mutex_lock(&cpu_add_remove_lock);
85 	raw_notifier_chain_unregister(&cpu_chain, nb);
86 	mutex_unlock(&cpu_add_remove_lock);
87 }
88 EXPORT_SYMBOL(unregister_cpu_notifier);
89 
90 static inline void check_for_tasks(int cpu)
91 {
92 	struct task_struct *p;
93 
94 	write_lock_irq(&tasklist_lock);
95 	for_each_process(p) {
96 		if (task_cpu(p) == cpu &&
97 		    (!cputime_eq(p->utime, cputime_zero) ||
98 		     !cputime_eq(p->stime, cputime_zero)))
99 			printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
100 				(state = %ld, flags = %x) \n",
101 				 p->comm, p->pid, cpu, p->state, p->flags);
102 	}
103 	write_unlock_irq(&tasklist_lock);
104 }
105 
106 /* Take this CPU down. */
107 static int take_cpu_down(void *unused)
108 {
109 	int err;
110 
111 	/* Ensure this CPU doesn't handle any more interrupts. */
112 	err = __cpu_disable();
113 	if (err < 0)
114 		return err;
115 
116 	/* Force idle task to run as soon as we yield: it should
117 	   immediately notice cpu is offline and die quickly. */
118 	sched_idle_next();
119 	return 0;
120 }
121 
122 /* Requires cpu_add_remove_lock to be held */
123 static int _cpu_down(unsigned int cpu, int tasks_frozen)
124 {
125 	int err, nr_calls = 0;
126 	struct task_struct *p;
127 	cpumask_t old_allowed, tmp;
128 	void *hcpu = (void *)(long)cpu;
129 	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
130 
131 	if (num_online_cpus() == 1)
132 		return -EBUSY;
133 
134 	if (!cpu_online(cpu))
135 		return -EINVAL;
136 
137 	raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu);
138 	err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
139 					hcpu, -1, &nr_calls);
140 	if (err == NOTIFY_BAD) {
141 		__raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
142 					  hcpu, nr_calls, NULL);
143 		printk("%s: attempt to take down CPU %u failed\n",
144 				__FUNCTION__, cpu);
145 		err = -EINVAL;
146 		goto out_release;
147 	}
148 
149 	/* Ensure that we are not runnable on dying cpu */
150 	old_allowed = current->cpus_allowed;
151 	tmp = CPU_MASK_ALL;
152 	cpu_clear(cpu, tmp);
153 	set_cpus_allowed(current, tmp);
154 
155 	mutex_lock(&cpu_bitmask_lock);
156 	p = __stop_machine_run(take_cpu_down, NULL, cpu);
157 	mutex_unlock(&cpu_bitmask_lock);
158 
159 	if (IS_ERR(p) || cpu_online(cpu)) {
160 		/* CPU didn't die: tell everyone.  Can't complain. */
161 		if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
162 					    hcpu) == NOTIFY_BAD)
163 			BUG();
164 
165 		if (IS_ERR(p)) {
166 			err = PTR_ERR(p);
167 			goto out_allowed;
168 		}
169 		goto out_thread;
170 	}
171 
172 	/* Wait for it to sleep (leaving idle task). */
173 	while (!idle_cpu(cpu))
174 		yield();
175 
176 	/* This actually kills the CPU. */
177 	__cpu_die(cpu);
178 
179 	/* CPU is completely dead: tell everyone.  Too late to complain. */
180 	if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod,
181 				    hcpu) == NOTIFY_BAD)
182 		BUG();
183 
184 	check_for_tasks(cpu);
185 
186 out_thread:
187 	err = kthread_stop(p);
188 out_allowed:
189 	set_cpus_allowed(current, old_allowed);
190 out_release:
191 	raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu);
192 	return err;
193 }
194 
195 int cpu_down(unsigned int cpu)
196 {
197 	int err = 0;
198 
199 	mutex_lock(&cpu_add_remove_lock);
200 	if (cpu_hotplug_disabled)
201 		err = -EBUSY;
202 	else
203 		err = _cpu_down(cpu, 0);
204 
205 	mutex_unlock(&cpu_add_remove_lock);
206 	return err;
207 }
208 #endif /*CONFIG_HOTPLUG_CPU*/
209 
210 /* Requires cpu_add_remove_lock to be held */
211 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
212 {
213 	int ret, nr_calls = 0;
214 	void *hcpu = (void *)(long)cpu;
215 	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
216 
217 	if (cpu_online(cpu) || !cpu_present(cpu))
218 		return -EINVAL;
219 
220 	raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu);
221 	ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
222 							-1, &nr_calls);
223 	if (ret == NOTIFY_BAD) {
224 		printk("%s: attempt to bring up CPU %u failed\n",
225 				__FUNCTION__, cpu);
226 		ret = -EINVAL;
227 		goto out_notify;
228 	}
229 
230 	/* Arch-specific enabling code. */
231 	mutex_lock(&cpu_bitmask_lock);
232 	ret = __cpu_up(cpu);
233 	mutex_unlock(&cpu_bitmask_lock);
234 	if (ret != 0)
235 		goto out_notify;
236 	BUG_ON(!cpu_online(cpu));
237 
238 	/* Now call notifier in preparation. */
239 	raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
240 
241 out_notify:
242 	if (ret != 0)
243 		__raw_notifier_call_chain(&cpu_chain,
244 				CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
245 	raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu);
246 
247 	return ret;
248 }
249 
250 int __cpuinit cpu_up(unsigned int cpu)
251 {
252 	int err = 0;
253 
254 	mutex_lock(&cpu_add_remove_lock);
255 	if (cpu_hotplug_disabled)
256 		err = -EBUSY;
257 	else
258 		err = _cpu_up(cpu, 0);
259 
260 	mutex_unlock(&cpu_add_remove_lock);
261 	return err;
262 }
263 
264 #ifdef CONFIG_SUSPEND_SMP
265 static cpumask_t frozen_cpus;
266 
267 int disable_nonboot_cpus(void)
268 {
269 	int cpu, first_cpu, error = 0;
270 
271 	mutex_lock(&cpu_add_remove_lock);
272 	first_cpu = first_cpu(cpu_online_map);
273 	/* We take down all of the non-boot CPUs in one shot to avoid races
274 	 * with the userspace trying to use the CPU hotplug at the same time
275 	 */
276 	cpus_clear(frozen_cpus);
277 	printk("Disabling non-boot CPUs ...\n");
278 	for_each_online_cpu(cpu) {
279 		if (cpu == first_cpu)
280 			continue;
281 		error = _cpu_down(cpu, 1);
282 		if (!error) {
283 			cpu_set(cpu, frozen_cpus);
284 			printk("CPU%d is down\n", cpu);
285 		} else {
286 			printk(KERN_ERR "Error taking CPU%d down: %d\n",
287 				cpu, error);
288 			break;
289 		}
290 	}
291 	if (!error) {
292 		BUG_ON(num_online_cpus() > 1);
293 		/* Make sure the CPUs won't be enabled by someone else */
294 		cpu_hotplug_disabled = 1;
295 	} else {
296 		printk(KERN_ERR "Non-boot CPUs are not disabled\n");
297 	}
298 	mutex_unlock(&cpu_add_remove_lock);
299 	return error;
300 }
301 
302 void enable_nonboot_cpus(void)
303 {
304 	int cpu, error;
305 
306 	/* Allow everyone to use the CPU hotplug again */
307 	mutex_lock(&cpu_add_remove_lock);
308 	cpu_hotplug_disabled = 0;
309 	if (cpus_empty(frozen_cpus))
310 		goto out;
311 
312 	printk("Enabling non-boot CPUs ...\n");
313 	for_each_cpu_mask(cpu, frozen_cpus) {
314 		error = _cpu_up(cpu, 1);
315 		if (!error) {
316 			printk("CPU%d is up\n", cpu);
317 			continue;
318 		}
319 		printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
320 	}
321 	cpus_clear(frozen_cpus);
322 out:
323 	mutex_unlock(&cpu_add_remove_lock);
324 }
325 #endif
326