xref: /openbmc/linux/kernel/cpu.c (revision 83268fa6)
1 /* CPU control.
2  * (C) 2001, 2002, 2003, 2004 Rusty Russell
3  *
4  * This code is licenced under the GPL.
5  */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/hotplug.h>
12 #include <linux/sched/task.h>
13 #include <linux/unistd.h>
14 #include <linux/cpu.h>
15 #include <linux/oom.h>
16 #include <linux/rcupdate.h>
17 #include <linux/export.h>
18 #include <linux/bug.h>
19 #include <linux/kthread.h>
20 #include <linux/stop_machine.h>
21 #include <linux/mutex.h>
22 #include <linux/gfp.h>
23 #include <linux/suspend.h>
24 #include <linux/lockdep.h>
25 #include <linux/tick.h>
26 #include <linux/irq.h>
27 #include <linux/nmi.h>
28 #include <linux/smpboot.h>
29 #include <linux/relay.h>
30 #include <linux/slab.h>
31 #include <linux/percpu-rwsem.h>
32 
33 #include <trace/events/power.h>
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/cpuhp.h>
36 
37 #include "smpboot.h"
38 
39 /**
40  * cpuhp_cpu_state - Per cpu hotplug state storage
41  * @state:	The current cpu state
42  * @target:	The target state
43  * @thread:	Pointer to the hotplug thread
44  * @should_run:	Thread should execute
45  * @rollback:	Perform a rollback
46  * @single:	Single callback invocation
47  * @bringup:	Single callback bringup or teardown selector
48  * @cb_state:	The state for a single callback (install/uninstall)
49  * @result:	Result of the operation
50  * @done_up:	Signal completion to the issuer of the task for cpu-up
51  * @done_down:	Signal completion to the issuer of the task for cpu-down
52  */
53 struct cpuhp_cpu_state {
54 	enum cpuhp_state	state;
55 	enum cpuhp_state	target;
56 	enum cpuhp_state	fail;
57 #ifdef CONFIG_SMP
58 	struct task_struct	*thread;
59 	bool			should_run;
60 	bool			rollback;
61 	bool			single;
62 	bool			bringup;
63 	bool			booted_once;
64 	struct hlist_node	*node;
65 	struct hlist_node	*last;
66 	enum cpuhp_state	cb_state;
67 	int			result;
68 	struct completion	done_up;
69 	struct completion	done_down;
70 #endif
71 };
72 
73 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
74 	.fail = CPUHP_INVALID,
75 };
76 
77 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
78 static struct lockdep_map cpuhp_state_up_map =
79 	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
80 static struct lockdep_map cpuhp_state_down_map =
81 	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
82 
83 
84 static inline void cpuhp_lock_acquire(bool bringup)
85 {
86 	lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
87 }
88 
89 static inline void cpuhp_lock_release(bool bringup)
90 {
91 	lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
92 }
93 #else
94 
95 static inline void cpuhp_lock_acquire(bool bringup) { }
96 static inline void cpuhp_lock_release(bool bringup) { }
97 
98 #endif
99 
100 /**
101  * cpuhp_step - Hotplug state machine step
102  * @name:	Name of the step
103  * @startup:	Startup function of the step
104  * @teardown:	Teardown function of the step
105  * @cant_stop:	Bringup/teardown can't be stopped at this step
106  */
107 struct cpuhp_step {
108 	const char		*name;
109 	union {
110 		int		(*single)(unsigned int cpu);
111 		int		(*multi)(unsigned int cpu,
112 					 struct hlist_node *node);
113 	} startup;
114 	union {
115 		int		(*single)(unsigned int cpu);
116 		int		(*multi)(unsigned int cpu,
117 					 struct hlist_node *node);
118 	} teardown;
119 	struct hlist_head	list;
120 	bool			cant_stop;
121 	bool			multi_instance;
122 };
123 
124 static DEFINE_MUTEX(cpuhp_state_mutex);
125 static struct cpuhp_step cpuhp_hp_states[];
126 
127 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
128 {
129 	return cpuhp_hp_states + state;
130 }
131 
132 /**
133  * cpuhp_invoke_callback _ Invoke the callbacks for a given state
134  * @cpu:	The cpu for which the callback should be invoked
135  * @state:	The state to do callbacks for
136  * @bringup:	True if the bringup callback should be invoked
137  * @node:	For multi-instance, do a single entry callback for install/remove
138  * @lastp:	For multi-instance rollback, remember how far we got
139  *
140  * Called from cpu hotplug and from the state register machinery.
141  */
142 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
143 				 bool bringup, struct hlist_node *node,
144 				 struct hlist_node **lastp)
145 {
146 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
147 	struct cpuhp_step *step = cpuhp_get_step(state);
148 	int (*cbm)(unsigned int cpu, struct hlist_node *node);
149 	int (*cb)(unsigned int cpu);
150 	int ret, cnt;
151 
152 	if (st->fail == state) {
153 		st->fail = CPUHP_INVALID;
154 
155 		if (!(bringup ? step->startup.single : step->teardown.single))
156 			return 0;
157 
158 		return -EAGAIN;
159 	}
160 
161 	if (!step->multi_instance) {
162 		WARN_ON_ONCE(lastp && *lastp);
163 		cb = bringup ? step->startup.single : step->teardown.single;
164 		if (!cb)
165 			return 0;
166 		trace_cpuhp_enter(cpu, st->target, state, cb);
167 		ret = cb(cpu);
168 		trace_cpuhp_exit(cpu, st->state, state, ret);
169 		return ret;
170 	}
171 	cbm = bringup ? step->startup.multi : step->teardown.multi;
172 	if (!cbm)
173 		return 0;
174 
175 	/* Single invocation for instance add/remove */
176 	if (node) {
177 		WARN_ON_ONCE(lastp && *lastp);
178 		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
179 		ret = cbm(cpu, node);
180 		trace_cpuhp_exit(cpu, st->state, state, ret);
181 		return ret;
182 	}
183 
184 	/* State transition. Invoke on all instances */
185 	cnt = 0;
186 	hlist_for_each(node, &step->list) {
187 		if (lastp && node == *lastp)
188 			break;
189 
190 		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
191 		ret = cbm(cpu, node);
192 		trace_cpuhp_exit(cpu, st->state, state, ret);
193 		if (ret) {
194 			if (!lastp)
195 				goto err;
196 
197 			*lastp = node;
198 			return ret;
199 		}
200 		cnt++;
201 	}
202 	if (lastp)
203 		*lastp = NULL;
204 	return 0;
205 err:
206 	/* Rollback the instances if one failed */
207 	cbm = !bringup ? step->startup.multi : step->teardown.multi;
208 	if (!cbm)
209 		return ret;
210 
211 	hlist_for_each(node, &step->list) {
212 		if (!cnt--)
213 			break;
214 
215 		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
216 		ret = cbm(cpu, node);
217 		trace_cpuhp_exit(cpu, st->state, state, ret);
218 		/*
219 		 * Rollback must not fail,
220 		 */
221 		WARN_ON_ONCE(ret);
222 	}
223 	return ret;
224 }
225 
226 #ifdef CONFIG_SMP
227 static bool cpuhp_is_ap_state(enum cpuhp_state state)
228 {
229 	/*
230 	 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
231 	 * purposes as that state is handled explicitly in cpu_down.
232 	 */
233 	return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
234 }
235 
236 static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
237 {
238 	struct completion *done = bringup ? &st->done_up : &st->done_down;
239 	wait_for_completion(done);
240 }
241 
242 static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
243 {
244 	struct completion *done = bringup ? &st->done_up : &st->done_down;
245 	complete(done);
246 }
247 
248 /*
249  * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
250  */
251 static bool cpuhp_is_atomic_state(enum cpuhp_state state)
252 {
253 	return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
254 }
255 
256 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
257 static DEFINE_MUTEX(cpu_add_remove_lock);
258 bool cpuhp_tasks_frozen;
259 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
260 
261 /*
262  * The following two APIs (cpu_maps_update_begin/done) must be used when
263  * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
264  */
265 void cpu_maps_update_begin(void)
266 {
267 	mutex_lock(&cpu_add_remove_lock);
268 }
269 
270 void cpu_maps_update_done(void)
271 {
272 	mutex_unlock(&cpu_add_remove_lock);
273 }
274 
275 /*
276  * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
277  * Should always be manipulated under cpu_add_remove_lock
278  */
279 static int cpu_hotplug_disabled;
280 
281 #ifdef CONFIG_HOTPLUG_CPU
282 
283 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
284 
285 void cpus_read_lock(void)
286 {
287 	percpu_down_read(&cpu_hotplug_lock);
288 }
289 EXPORT_SYMBOL_GPL(cpus_read_lock);
290 
291 int cpus_read_trylock(void)
292 {
293 	return percpu_down_read_trylock(&cpu_hotplug_lock);
294 }
295 EXPORT_SYMBOL_GPL(cpus_read_trylock);
296 
297 void cpus_read_unlock(void)
298 {
299 	percpu_up_read(&cpu_hotplug_lock);
300 }
301 EXPORT_SYMBOL_GPL(cpus_read_unlock);
302 
303 void cpus_write_lock(void)
304 {
305 	percpu_down_write(&cpu_hotplug_lock);
306 }
307 
308 void cpus_write_unlock(void)
309 {
310 	percpu_up_write(&cpu_hotplug_lock);
311 }
312 
313 void lockdep_assert_cpus_held(void)
314 {
315 	percpu_rwsem_assert_held(&cpu_hotplug_lock);
316 }
317 
318 static void lockdep_acquire_cpus_lock(void)
319 {
320 	rwsem_acquire(&cpu_hotplug_lock.rw_sem.dep_map, 0, 0, _THIS_IP_);
321 }
322 
323 static void lockdep_release_cpus_lock(void)
324 {
325 	rwsem_release(&cpu_hotplug_lock.rw_sem.dep_map, 1, _THIS_IP_);
326 }
327 
328 /*
329  * Wait for currently running CPU hotplug operations to complete (if any) and
330  * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
331  * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
332  * hotplug path before performing hotplug operations. So acquiring that lock
333  * guarantees mutual exclusion from any currently running hotplug operations.
334  */
335 void cpu_hotplug_disable(void)
336 {
337 	cpu_maps_update_begin();
338 	cpu_hotplug_disabled++;
339 	cpu_maps_update_done();
340 }
341 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
342 
343 static void __cpu_hotplug_enable(void)
344 {
345 	if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
346 		return;
347 	cpu_hotplug_disabled--;
348 }
349 
350 void cpu_hotplug_enable(void)
351 {
352 	cpu_maps_update_begin();
353 	__cpu_hotplug_enable();
354 	cpu_maps_update_done();
355 }
356 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
357 
358 #else
359 
360 static void lockdep_acquire_cpus_lock(void)
361 {
362 }
363 
364 static void lockdep_release_cpus_lock(void)
365 {
366 }
367 
368 #endif	/* CONFIG_HOTPLUG_CPU */
369 
370 #ifdef CONFIG_HOTPLUG_SMT
371 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
372 EXPORT_SYMBOL_GPL(cpu_smt_control);
373 
374 static bool cpu_smt_available __read_mostly;
375 
376 void __init cpu_smt_disable(bool force)
377 {
378 	if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
379 		cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
380 		return;
381 
382 	if (force) {
383 		pr_info("SMT: Force disabled\n");
384 		cpu_smt_control = CPU_SMT_FORCE_DISABLED;
385 	} else {
386 		pr_info("SMT: disabled\n");
387 		cpu_smt_control = CPU_SMT_DISABLED;
388 	}
389 }
390 
391 /*
392  * The decision whether SMT is supported can only be done after the full
393  * CPU identification. Called from architecture code before non boot CPUs
394  * are brought up.
395  */
396 void __init cpu_smt_check_topology_early(void)
397 {
398 	if (!topology_smt_supported())
399 		cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
400 }
401 
402 /*
403  * If SMT was disabled by BIOS, detect it here, after the CPUs have been
404  * brought online. This ensures the smt/l1tf sysfs entries are consistent
405  * with reality. cpu_smt_available is set to true during the bringup of non
406  * boot CPUs when a SMT sibling is detected. Note, this may overwrite
407  * cpu_smt_control's previous setting.
408  */
409 void __init cpu_smt_check_topology(void)
410 {
411 	if (!cpu_smt_available)
412 		cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
413 }
414 
415 static int __init smt_cmdline_disable(char *str)
416 {
417 	cpu_smt_disable(str && !strcmp(str, "force"));
418 	return 0;
419 }
420 early_param("nosmt", smt_cmdline_disable);
421 
422 static inline bool cpu_smt_allowed(unsigned int cpu)
423 {
424 	if (topology_is_primary_thread(cpu))
425 		return true;
426 
427 	/*
428 	 * If the CPU is not a 'primary' thread and the booted_once bit is
429 	 * set then the processor has SMT support. Store this information
430 	 * for the late check of SMT support in cpu_smt_check_topology().
431 	 */
432 	if (per_cpu(cpuhp_state, cpu).booted_once)
433 		cpu_smt_available = true;
434 
435 	if (cpu_smt_control == CPU_SMT_ENABLED)
436 		return true;
437 
438 	/*
439 	 * On x86 it's required to boot all logical CPUs at least once so
440 	 * that the init code can get a chance to set CR4.MCE on each
441 	 * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
442 	 * core will shutdown the machine.
443 	 */
444 	return !per_cpu(cpuhp_state, cpu).booted_once;
445 }
446 #else
447 static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
448 #endif
449 
450 static inline enum cpuhp_state
451 cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
452 {
453 	enum cpuhp_state prev_state = st->state;
454 
455 	st->rollback = false;
456 	st->last = NULL;
457 
458 	st->target = target;
459 	st->single = false;
460 	st->bringup = st->state < target;
461 
462 	return prev_state;
463 }
464 
465 static inline void
466 cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
467 {
468 	st->rollback = true;
469 
470 	/*
471 	 * If we have st->last we need to undo partial multi_instance of this
472 	 * state first. Otherwise start undo at the previous state.
473 	 */
474 	if (!st->last) {
475 		if (st->bringup)
476 			st->state--;
477 		else
478 			st->state++;
479 	}
480 
481 	st->target = prev_state;
482 	st->bringup = !st->bringup;
483 }
484 
485 /* Regular hotplug invocation of the AP hotplug thread */
486 static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
487 {
488 	if (!st->single && st->state == st->target)
489 		return;
490 
491 	st->result = 0;
492 	/*
493 	 * Make sure the above stores are visible before should_run becomes
494 	 * true. Paired with the mb() above in cpuhp_thread_fun()
495 	 */
496 	smp_mb();
497 	st->should_run = true;
498 	wake_up_process(st->thread);
499 	wait_for_ap_thread(st, st->bringup);
500 }
501 
502 static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
503 {
504 	enum cpuhp_state prev_state;
505 	int ret;
506 
507 	prev_state = cpuhp_set_state(st, target);
508 	__cpuhp_kick_ap(st);
509 	if ((ret = st->result)) {
510 		cpuhp_reset_state(st, prev_state);
511 		__cpuhp_kick_ap(st);
512 	}
513 
514 	return ret;
515 }
516 
517 static int bringup_wait_for_ap(unsigned int cpu)
518 {
519 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
520 
521 	/* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
522 	wait_for_ap_thread(st, true);
523 	if (WARN_ON_ONCE((!cpu_online(cpu))))
524 		return -ECANCELED;
525 
526 	/* Unpark the stopper thread and the hotplug thread of the target cpu */
527 	stop_machine_unpark(cpu);
528 	kthread_unpark(st->thread);
529 
530 	/*
531 	 * SMT soft disabling on X86 requires to bring the CPU out of the
532 	 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
533 	 * CPU marked itself as booted_once in cpu_notify_starting() so the
534 	 * cpu_smt_allowed() check will now return false if this is not the
535 	 * primary sibling.
536 	 */
537 	if (!cpu_smt_allowed(cpu))
538 		return -ECANCELED;
539 
540 	if (st->target <= CPUHP_AP_ONLINE_IDLE)
541 		return 0;
542 
543 	return cpuhp_kick_ap(st, st->target);
544 }
545 
546 static int bringup_cpu(unsigned int cpu)
547 {
548 	struct task_struct *idle = idle_thread_get(cpu);
549 	int ret;
550 
551 	/*
552 	 * Some architectures have to walk the irq descriptors to
553 	 * setup the vector space for the cpu which comes online.
554 	 * Prevent irq alloc/free across the bringup.
555 	 */
556 	irq_lock_sparse();
557 
558 	/* Arch-specific enabling code. */
559 	ret = __cpu_up(cpu, idle);
560 	irq_unlock_sparse();
561 	if (ret)
562 		return ret;
563 	return bringup_wait_for_ap(cpu);
564 }
565 
566 /*
567  * Hotplug state machine related functions
568  */
569 
570 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
571 {
572 	for (st->state--; st->state > st->target; st->state--)
573 		cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
574 }
575 
576 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
577 			      enum cpuhp_state target)
578 {
579 	enum cpuhp_state prev_state = st->state;
580 	int ret = 0;
581 
582 	while (st->state < target) {
583 		st->state++;
584 		ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
585 		if (ret) {
586 			st->target = prev_state;
587 			undo_cpu_up(cpu, st);
588 			break;
589 		}
590 	}
591 	return ret;
592 }
593 
594 /*
595  * The cpu hotplug threads manage the bringup and teardown of the cpus
596  */
597 static void cpuhp_create(unsigned int cpu)
598 {
599 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
600 
601 	init_completion(&st->done_up);
602 	init_completion(&st->done_down);
603 }
604 
605 static int cpuhp_should_run(unsigned int cpu)
606 {
607 	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
608 
609 	return st->should_run;
610 }
611 
612 /*
613  * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
614  * callbacks when a state gets [un]installed at runtime.
615  *
616  * Each invocation of this function by the smpboot thread does a single AP
617  * state callback.
618  *
619  * It has 3 modes of operation:
620  *  - single: runs st->cb_state
621  *  - up:     runs ++st->state, while st->state < st->target
622  *  - down:   runs st->state--, while st->state > st->target
623  *
624  * When complete or on error, should_run is cleared and the completion is fired.
625  */
626 static void cpuhp_thread_fun(unsigned int cpu)
627 {
628 	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
629 	bool bringup = st->bringup;
630 	enum cpuhp_state state;
631 
632 	if (WARN_ON_ONCE(!st->should_run))
633 		return;
634 
635 	/*
636 	 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
637 	 * that if we see ->should_run we also see the rest of the state.
638 	 */
639 	smp_mb();
640 
641 	/*
642 	 * The BP holds the hotplug lock, but we're now running on the AP,
643 	 * ensure that anybody asserting the lock is held, will actually find
644 	 * it so.
645 	 */
646 	lockdep_acquire_cpus_lock();
647 	cpuhp_lock_acquire(bringup);
648 
649 	if (st->single) {
650 		state = st->cb_state;
651 		st->should_run = false;
652 	} else {
653 		if (bringup) {
654 			st->state++;
655 			state = st->state;
656 			st->should_run = (st->state < st->target);
657 			WARN_ON_ONCE(st->state > st->target);
658 		} else {
659 			state = st->state;
660 			st->state--;
661 			st->should_run = (st->state > st->target);
662 			WARN_ON_ONCE(st->state < st->target);
663 		}
664 	}
665 
666 	WARN_ON_ONCE(!cpuhp_is_ap_state(state));
667 
668 	if (cpuhp_is_atomic_state(state)) {
669 		local_irq_disable();
670 		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
671 		local_irq_enable();
672 
673 		/*
674 		 * STARTING/DYING must not fail!
675 		 */
676 		WARN_ON_ONCE(st->result);
677 	} else {
678 		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
679 	}
680 
681 	if (st->result) {
682 		/*
683 		 * If we fail on a rollback, we're up a creek without no
684 		 * paddle, no way forward, no way back. We loose, thanks for
685 		 * playing.
686 		 */
687 		WARN_ON_ONCE(st->rollback);
688 		st->should_run = false;
689 	}
690 
691 	cpuhp_lock_release(bringup);
692 	lockdep_release_cpus_lock();
693 
694 	if (!st->should_run)
695 		complete_ap_thread(st, bringup);
696 }
697 
698 /* Invoke a single callback on a remote cpu */
699 static int
700 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
701 			 struct hlist_node *node)
702 {
703 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
704 	int ret;
705 
706 	if (!cpu_online(cpu))
707 		return 0;
708 
709 	cpuhp_lock_acquire(false);
710 	cpuhp_lock_release(false);
711 
712 	cpuhp_lock_acquire(true);
713 	cpuhp_lock_release(true);
714 
715 	/*
716 	 * If we are up and running, use the hotplug thread. For early calls
717 	 * we invoke the thread function directly.
718 	 */
719 	if (!st->thread)
720 		return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
721 
722 	st->rollback = false;
723 	st->last = NULL;
724 
725 	st->node = node;
726 	st->bringup = bringup;
727 	st->cb_state = state;
728 	st->single = true;
729 
730 	__cpuhp_kick_ap(st);
731 
732 	/*
733 	 * If we failed and did a partial, do a rollback.
734 	 */
735 	if ((ret = st->result) && st->last) {
736 		st->rollback = true;
737 		st->bringup = !bringup;
738 
739 		__cpuhp_kick_ap(st);
740 	}
741 
742 	/*
743 	 * Clean up the leftovers so the next hotplug operation wont use stale
744 	 * data.
745 	 */
746 	st->node = st->last = NULL;
747 	return ret;
748 }
749 
750 static int cpuhp_kick_ap_work(unsigned int cpu)
751 {
752 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
753 	enum cpuhp_state prev_state = st->state;
754 	int ret;
755 
756 	cpuhp_lock_acquire(false);
757 	cpuhp_lock_release(false);
758 
759 	cpuhp_lock_acquire(true);
760 	cpuhp_lock_release(true);
761 
762 	trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
763 	ret = cpuhp_kick_ap(st, st->target);
764 	trace_cpuhp_exit(cpu, st->state, prev_state, ret);
765 
766 	return ret;
767 }
768 
769 static struct smp_hotplug_thread cpuhp_threads = {
770 	.store			= &cpuhp_state.thread,
771 	.create			= &cpuhp_create,
772 	.thread_should_run	= cpuhp_should_run,
773 	.thread_fn		= cpuhp_thread_fun,
774 	.thread_comm		= "cpuhp/%u",
775 	.selfparking		= true,
776 };
777 
778 void __init cpuhp_threads_init(void)
779 {
780 	BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
781 	kthread_unpark(this_cpu_read(cpuhp_state.thread));
782 }
783 
784 #ifdef CONFIG_HOTPLUG_CPU
785 /**
786  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
787  * @cpu: a CPU id
788  *
789  * This function walks all processes, finds a valid mm struct for each one and
790  * then clears a corresponding bit in mm's cpumask.  While this all sounds
791  * trivial, there are various non-obvious corner cases, which this function
792  * tries to solve in a safe manner.
793  *
794  * Also note that the function uses a somewhat relaxed locking scheme, so it may
795  * be called only for an already offlined CPU.
796  */
797 void clear_tasks_mm_cpumask(int cpu)
798 {
799 	struct task_struct *p;
800 
801 	/*
802 	 * This function is called after the cpu is taken down and marked
803 	 * offline, so its not like new tasks will ever get this cpu set in
804 	 * their mm mask. -- Peter Zijlstra
805 	 * Thus, we may use rcu_read_lock() here, instead of grabbing
806 	 * full-fledged tasklist_lock.
807 	 */
808 	WARN_ON(cpu_online(cpu));
809 	rcu_read_lock();
810 	for_each_process(p) {
811 		struct task_struct *t;
812 
813 		/*
814 		 * Main thread might exit, but other threads may still have
815 		 * a valid mm. Find one.
816 		 */
817 		t = find_lock_task_mm(p);
818 		if (!t)
819 			continue;
820 		cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
821 		task_unlock(t);
822 	}
823 	rcu_read_unlock();
824 }
825 
826 /* Take this CPU down. */
827 static int take_cpu_down(void *_param)
828 {
829 	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
830 	enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
831 	int err, cpu = smp_processor_id();
832 	int ret;
833 
834 	/* Ensure this CPU doesn't handle any more interrupts. */
835 	err = __cpu_disable();
836 	if (err < 0)
837 		return err;
838 
839 	/*
840 	 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
841 	 * do this step again.
842 	 */
843 	WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
844 	st->state--;
845 	/* Invoke the former CPU_DYING callbacks */
846 	for (; st->state > target; st->state--) {
847 		ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
848 		/*
849 		 * DYING must not fail!
850 		 */
851 		WARN_ON_ONCE(ret);
852 	}
853 
854 	/* Give up timekeeping duties */
855 	tick_handover_do_timer();
856 	/* Park the stopper thread */
857 	stop_machine_park(cpu);
858 	return 0;
859 }
860 
861 static int takedown_cpu(unsigned int cpu)
862 {
863 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
864 	int err;
865 
866 	/* Park the smpboot threads */
867 	kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
868 
869 	/*
870 	 * Prevent irq alloc/free while the dying cpu reorganizes the
871 	 * interrupt affinities.
872 	 */
873 	irq_lock_sparse();
874 
875 	/*
876 	 * So now all preempt/rcu users must observe !cpu_active().
877 	 */
878 	err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
879 	if (err) {
880 		/* CPU refused to die */
881 		irq_unlock_sparse();
882 		/* Unpark the hotplug thread so we can rollback there */
883 		kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
884 		return err;
885 	}
886 	BUG_ON(cpu_online(cpu));
887 
888 	/*
889 	 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
890 	 * all runnable tasks from the CPU, there's only the idle task left now
891 	 * that the migration thread is done doing the stop_machine thing.
892 	 *
893 	 * Wait for the stop thread to go away.
894 	 */
895 	wait_for_ap_thread(st, false);
896 	BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
897 
898 	/* Interrupts are moved away from the dying cpu, reenable alloc/free */
899 	irq_unlock_sparse();
900 
901 	hotplug_cpu__broadcast_tick_pull(cpu);
902 	/* This actually kills the CPU. */
903 	__cpu_die(cpu);
904 
905 	tick_cleanup_dead_cpu(cpu);
906 	rcutree_migrate_callbacks(cpu);
907 	return 0;
908 }
909 
910 static void cpuhp_complete_idle_dead(void *arg)
911 {
912 	struct cpuhp_cpu_state *st = arg;
913 
914 	complete_ap_thread(st, false);
915 }
916 
917 void cpuhp_report_idle_dead(void)
918 {
919 	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
920 
921 	BUG_ON(st->state != CPUHP_AP_OFFLINE);
922 	rcu_report_dead(smp_processor_id());
923 	st->state = CPUHP_AP_IDLE_DEAD;
924 	/*
925 	 * We cannot call complete after rcu_report_dead() so we delegate it
926 	 * to an online cpu.
927 	 */
928 	smp_call_function_single(cpumask_first(cpu_online_mask),
929 				 cpuhp_complete_idle_dead, st, 0);
930 }
931 
932 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
933 {
934 	for (st->state++; st->state < st->target; st->state++)
935 		cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
936 }
937 
938 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
939 				enum cpuhp_state target)
940 {
941 	enum cpuhp_state prev_state = st->state;
942 	int ret = 0;
943 
944 	for (; st->state > target; st->state--) {
945 		ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
946 		if (ret) {
947 			st->target = prev_state;
948 			if (st->state < prev_state)
949 				undo_cpu_down(cpu, st);
950 			break;
951 		}
952 	}
953 	return ret;
954 }
955 
956 /* Requires cpu_add_remove_lock to be held */
957 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
958 			   enum cpuhp_state target)
959 {
960 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
961 	int prev_state, ret = 0;
962 
963 	if (num_online_cpus() == 1)
964 		return -EBUSY;
965 
966 	if (!cpu_present(cpu))
967 		return -EINVAL;
968 
969 	cpus_write_lock();
970 
971 	cpuhp_tasks_frozen = tasks_frozen;
972 
973 	prev_state = cpuhp_set_state(st, target);
974 	/*
975 	 * If the current CPU state is in the range of the AP hotplug thread,
976 	 * then we need to kick the thread.
977 	 */
978 	if (st->state > CPUHP_TEARDOWN_CPU) {
979 		st->target = max((int)target, CPUHP_TEARDOWN_CPU);
980 		ret = cpuhp_kick_ap_work(cpu);
981 		/*
982 		 * The AP side has done the error rollback already. Just
983 		 * return the error code..
984 		 */
985 		if (ret)
986 			goto out;
987 
988 		/*
989 		 * We might have stopped still in the range of the AP hotplug
990 		 * thread. Nothing to do anymore.
991 		 */
992 		if (st->state > CPUHP_TEARDOWN_CPU)
993 			goto out;
994 
995 		st->target = target;
996 	}
997 	/*
998 	 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
999 	 * to do the further cleanups.
1000 	 */
1001 	ret = cpuhp_down_callbacks(cpu, st, target);
1002 	if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
1003 		cpuhp_reset_state(st, prev_state);
1004 		__cpuhp_kick_ap(st);
1005 	}
1006 
1007 out:
1008 	cpus_write_unlock();
1009 	/*
1010 	 * Do post unplug cleanup. This is still protected against
1011 	 * concurrent CPU hotplug via cpu_add_remove_lock.
1012 	 */
1013 	lockup_detector_cleanup();
1014 	return ret;
1015 }
1016 
1017 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1018 {
1019 	if (cpu_hotplug_disabled)
1020 		return -EBUSY;
1021 	return _cpu_down(cpu, 0, target);
1022 }
1023 
1024 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
1025 {
1026 	int err;
1027 
1028 	cpu_maps_update_begin();
1029 	err = cpu_down_maps_locked(cpu, target);
1030 	cpu_maps_update_done();
1031 	return err;
1032 }
1033 
1034 int cpu_down(unsigned int cpu)
1035 {
1036 	return do_cpu_down(cpu, CPUHP_OFFLINE);
1037 }
1038 EXPORT_SYMBOL(cpu_down);
1039 
1040 #else
1041 #define takedown_cpu		NULL
1042 #endif /*CONFIG_HOTPLUG_CPU*/
1043 
1044 /**
1045  * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1046  * @cpu: cpu that just started
1047  *
1048  * It must be called by the arch code on the new cpu, before the new cpu
1049  * enables interrupts and before the "boot" cpu returns from __cpu_up().
1050  */
1051 void notify_cpu_starting(unsigned int cpu)
1052 {
1053 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1054 	enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1055 	int ret;
1056 
1057 	rcu_cpu_starting(cpu);	/* Enables RCU usage on this CPU. */
1058 	st->booted_once = true;
1059 	while (st->state < target) {
1060 		st->state++;
1061 		ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
1062 		/*
1063 		 * STARTING must not fail!
1064 		 */
1065 		WARN_ON_ONCE(ret);
1066 	}
1067 }
1068 
1069 /*
1070  * Called from the idle task. Wake up the controlling task which brings the
1071  * stopper and the hotplug thread of the upcoming CPU up and then delegates
1072  * the rest of the online bringup to the hotplug thread.
1073  */
1074 void cpuhp_online_idle(enum cpuhp_state state)
1075 {
1076 	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1077 
1078 	/* Happens for the boot cpu */
1079 	if (state != CPUHP_AP_ONLINE_IDLE)
1080 		return;
1081 
1082 	st->state = CPUHP_AP_ONLINE_IDLE;
1083 	complete_ap_thread(st, true);
1084 }
1085 
1086 /* Requires cpu_add_remove_lock to be held */
1087 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1088 {
1089 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1090 	struct task_struct *idle;
1091 	int ret = 0;
1092 
1093 	cpus_write_lock();
1094 
1095 	if (!cpu_present(cpu)) {
1096 		ret = -EINVAL;
1097 		goto out;
1098 	}
1099 
1100 	/*
1101 	 * The caller of do_cpu_up might have raced with another
1102 	 * caller. Ignore it for now.
1103 	 */
1104 	if (st->state >= target)
1105 		goto out;
1106 
1107 	if (st->state == CPUHP_OFFLINE) {
1108 		/* Let it fail before we try to bring the cpu up */
1109 		idle = idle_thread_get(cpu);
1110 		if (IS_ERR(idle)) {
1111 			ret = PTR_ERR(idle);
1112 			goto out;
1113 		}
1114 	}
1115 
1116 	cpuhp_tasks_frozen = tasks_frozen;
1117 
1118 	cpuhp_set_state(st, target);
1119 	/*
1120 	 * If the current CPU state is in the range of the AP hotplug thread,
1121 	 * then we need to kick the thread once more.
1122 	 */
1123 	if (st->state > CPUHP_BRINGUP_CPU) {
1124 		ret = cpuhp_kick_ap_work(cpu);
1125 		/*
1126 		 * The AP side has done the error rollback already. Just
1127 		 * return the error code..
1128 		 */
1129 		if (ret)
1130 			goto out;
1131 	}
1132 
1133 	/*
1134 	 * Try to reach the target state. We max out on the BP at
1135 	 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1136 	 * responsible for bringing it up to the target state.
1137 	 */
1138 	target = min((int)target, CPUHP_BRINGUP_CPU);
1139 	ret = cpuhp_up_callbacks(cpu, st, target);
1140 out:
1141 	cpus_write_unlock();
1142 	return ret;
1143 }
1144 
1145 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
1146 {
1147 	int err = 0;
1148 
1149 	if (!cpu_possible(cpu)) {
1150 		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1151 		       cpu);
1152 #if defined(CONFIG_IA64)
1153 		pr_err("please check additional_cpus= boot parameter\n");
1154 #endif
1155 		return -EINVAL;
1156 	}
1157 
1158 	err = try_online_node(cpu_to_node(cpu));
1159 	if (err)
1160 		return err;
1161 
1162 	cpu_maps_update_begin();
1163 
1164 	if (cpu_hotplug_disabled) {
1165 		err = -EBUSY;
1166 		goto out;
1167 	}
1168 	if (!cpu_smt_allowed(cpu)) {
1169 		err = -EPERM;
1170 		goto out;
1171 	}
1172 
1173 	err = _cpu_up(cpu, 0, target);
1174 out:
1175 	cpu_maps_update_done();
1176 	return err;
1177 }
1178 
1179 int cpu_up(unsigned int cpu)
1180 {
1181 	return do_cpu_up(cpu, CPUHP_ONLINE);
1182 }
1183 EXPORT_SYMBOL_GPL(cpu_up);
1184 
1185 #ifdef CONFIG_PM_SLEEP_SMP
1186 static cpumask_var_t frozen_cpus;
1187 
1188 int freeze_secondary_cpus(int primary)
1189 {
1190 	int cpu, error = 0;
1191 
1192 	cpu_maps_update_begin();
1193 	if (!cpu_online(primary))
1194 		primary = cpumask_first(cpu_online_mask);
1195 	/*
1196 	 * We take down all of the non-boot CPUs in one shot to avoid races
1197 	 * with the userspace trying to use the CPU hotplug at the same time
1198 	 */
1199 	cpumask_clear(frozen_cpus);
1200 
1201 	pr_info("Disabling non-boot CPUs ...\n");
1202 	for_each_online_cpu(cpu) {
1203 		if (cpu == primary)
1204 			continue;
1205 		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1206 		error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1207 		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1208 		if (!error)
1209 			cpumask_set_cpu(cpu, frozen_cpus);
1210 		else {
1211 			pr_err("Error taking CPU%d down: %d\n", cpu, error);
1212 			break;
1213 		}
1214 	}
1215 
1216 	if (!error)
1217 		BUG_ON(num_online_cpus() > 1);
1218 	else
1219 		pr_err("Non-boot CPUs are not disabled\n");
1220 
1221 	/*
1222 	 * Make sure the CPUs won't be enabled by someone else. We need to do
1223 	 * this even in case of failure as all disable_nonboot_cpus() users are
1224 	 * supposed to do enable_nonboot_cpus() on the failure path.
1225 	 */
1226 	cpu_hotplug_disabled++;
1227 
1228 	cpu_maps_update_done();
1229 	return error;
1230 }
1231 
1232 void __weak arch_enable_nonboot_cpus_begin(void)
1233 {
1234 }
1235 
1236 void __weak arch_enable_nonboot_cpus_end(void)
1237 {
1238 }
1239 
1240 void enable_nonboot_cpus(void)
1241 {
1242 	int cpu, error;
1243 
1244 	/* Allow everyone to use the CPU hotplug again */
1245 	cpu_maps_update_begin();
1246 	__cpu_hotplug_enable();
1247 	if (cpumask_empty(frozen_cpus))
1248 		goto out;
1249 
1250 	pr_info("Enabling non-boot CPUs ...\n");
1251 
1252 	arch_enable_nonboot_cpus_begin();
1253 
1254 	for_each_cpu(cpu, frozen_cpus) {
1255 		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1256 		error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1257 		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1258 		if (!error) {
1259 			pr_info("CPU%d is up\n", cpu);
1260 			continue;
1261 		}
1262 		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1263 	}
1264 
1265 	arch_enable_nonboot_cpus_end();
1266 
1267 	cpumask_clear(frozen_cpus);
1268 out:
1269 	cpu_maps_update_done();
1270 }
1271 
1272 static int __init alloc_frozen_cpus(void)
1273 {
1274 	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1275 		return -ENOMEM;
1276 	return 0;
1277 }
1278 core_initcall(alloc_frozen_cpus);
1279 
1280 /*
1281  * When callbacks for CPU hotplug notifications are being executed, we must
1282  * ensure that the state of the system with respect to the tasks being frozen
1283  * or not, as reported by the notification, remains unchanged *throughout the
1284  * duration* of the execution of the callbacks.
1285  * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1286  *
1287  * This synchronization is implemented by mutually excluding regular CPU
1288  * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1289  * Hibernate notifications.
1290  */
1291 static int
1292 cpu_hotplug_pm_callback(struct notifier_block *nb,
1293 			unsigned long action, void *ptr)
1294 {
1295 	switch (action) {
1296 
1297 	case PM_SUSPEND_PREPARE:
1298 	case PM_HIBERNATION_PREPARE:
1299 		cpu_hotplug_disable();
1300 		break;
1301 
1302 	case PM_POST_SUSPEND:
1303 	case PM_POST_HIBERNATION:
1304 		cpu_hotplug_enable();
1305 		break;
1306 
1307 	default:
1308 		return NOTIFY_DONE;
1309 	}
1310 
1311 	return NOTIFY_OK;
1312 }
1313 
1314 
1315 static int __init cpu_hotplug_pm_sync_init(void)
1316 {
1317 	/*
1318 	 * cpu_hotplug_pm_callback has higher priority than x86
1319 	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1320 	 * to disable cpu hotplug to avoid cpu hotplug race.
1321 	 */
1322 	pm_notifier(cpu_hotplug_pm_callback, 0);
1323 	return 0;
1324 }
1325 core_initcall(cpu_hotplug_pm_sync_init);
1326 
1327 #endif /* CONFIG_PM_SLEEP_SMP */
1328 
1329 int __boot_cpu_id;
1330 
1331 #endif /* CONFIG_SMP */
1332 
1333 /* Boot processor state steps */
1334 static struct cpuhp_step cpuhp_hp_states[] = {
1335 	[CPUHP_OFFLINE] = {
1336 		.name			= "offline",
1337 		.startup.single		= NULL,
1338 		.teardown.single	= NULL,
1339 	},
1340 #ifdef CONFIG_SMP
1341 	[CPUHP_CREATE_THREADS]= {
1342 		.name			= "threads:prepare",
1343 		.startup.single		= smpboot_create_threads,
1344 		.teardown.single	= NULL,
1345 		.cant_stop		= true,
1346 	},
1347 	[CPUHP_PERF_PREPARE] = {
1348 		.name			= "perf:prepare",
1349 		.startup.single		= perf_event_init_cpu,
1350 		.teardown.single	= perf_event_exit_cpu,
1351 	},
1352 	[CPUHP_WORKQUEUE_PREP] = {
1353 		.name			= "workqueue:prepare",
1354 		.startup.single		= workqueue_prepare_cpu,
1355 		.teardown.single	= NULL,
1356 	},
1357 	[CPUHP_HRTIMERS_PREPARE] = {
1358 		.name			= "hrtimers:prepare",
1359 		.startup.single		= hrtimers_prepare_cpu,
1360 		.teardown.single	= hrtimers_dead_cpu,
1361 	},
1362 	[CPUHP_SMPCFD_PREPARE] = {
1363 		.name			= "smpcfd:prepare",
1364 		.startup.single		= smpcfd_prepare_cpu,
1365 		.teardown.single	= smpcfd_dead_cpu,
1366 	},
1367 	[CPUHP_RELAY_PREPARE] = {
1368 		.name			= "relay:prepare",
1369 		.startup.single		= relay_prepare_cpu,
1370 		.teardown.single	= NULL,
1371 	},
1372 	[CPUHP_SLAB_PREPARE] = {
1373 		.name			= "slab:prepare",
1374 		.startup.single		= slab_prepare_cpu,
1375 		.teardown.single	= slab_dead_cpu,
1376 	},
1377 	[CPUHP_RCUTREE_PREP] = {
1378 		.name			= "RCU/tree:prepare",
1379 		.startup.single		= rcutree_prepare_cpu,
1380 		.teardown.single	= rcutree_dead_cpu,
1381 	},
1382 	/*
1383 	 * On the tear-down path, timers_dead_cpu() must be invoked
1384 	 * before blk_mq_queue_reinit_notify() from notify_dead(),
1385 	 * otherwise a RCU stall occurs.
1386 	 */
1387 	[CPUHP_TIMERS_PREPARE] = {
1388 		.name			= "timers:prepare",
1389 		.startup.single		= timers_prepare_cpu,
1390 		.teardown.single	= timers_dead_cpu,
1391 	},
1392 	/* Kicks the plugged cpu into life */
1393 	[CPUHP_BRINGUP_CPU] = {
1394 		.name			= "cpu:bringup",
1395 		.startup.single		= bringup_cpu,
1396 		.teardown.single	= NULL,
1397 		.cant_stop		= true,
1398 	},
1399 	/* Final state before CPU kills itself */
1400 	[CPUHP_AP_IDLE_DEAD] = {
1401 		.name			= "idle:dead",
1402 	},
1403 	/*
1404 	 * Last state before CPU enters the idle loop to die. Transient state
1405 	 * for synchronization.
1406 	 */
1407 	[CPUHP_AP_OFFLINE] = {
1408 		.name			= "ap:offline",
1409 		.cant_stop		= true,
1410 	},
1411 	/* First state is scheduler control. Interrupts are disabled */
1412 	[CPUHP_AP_SCHED_STARTING] = {
1413 		.name			= "sched:starting",
1414 		.startup.single		= sched_cpu_starting,
1415 		.teardown.single	= sched_cpu_dying,
1416 	},
1417 	[CPUHP_AP_RCUTREE_DYING] = {
1418 		.name			= "RCU/tree:dying",
1419 		.startup.single		= NULL,
1420 		.teardown.single	= rcutree_dying_cpu,
1421 	},
1422 	[CPUHP_AP_SMPCFD_DYING] = {
1423 		.name			= "smpcfd:dying",
1424 		.startup.single		= NULL,
1425 		.teardown.single	= smpcfd_dying_cpu,
1426 	},
1427 	/* Entry state on starting. Interrupts enabled from here on. Transient
1428 	 * state for synchronsization */
1429 	[CPUHP_AP_ONLINE] = {
1430 		.name			= "ap:online",
1431 	},
1432 	/*
1433 	 * Handled on controll processor until the plugged processor manages
1434 	 * this itself.
1435 	 */
1436 	[CPUHP_TEARDOWN_CPU] = {
1437 		.name			= "cpu:teardown",
1438 		.startup.single		= NULL,
1439 		.teardown.single	= takedown_cpu,
1440 		.cant_stop		= true,
1441 	},
1442 	/* Handle smpboot threads park/unpark */
1443 	[CPUHP_AP_SMPBOOT_THREADS] = {
1444 		.name			= "smpboot/threads:online",
1445 		.startup.single		= smpboot_unpark_threads,
1446 		.teardown.single	= smpboot_park_threads,
1447 	},
1448 	[CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1449 		.name			= "irq/affinity:online",
1450 		.startup.single		= irq_affinity_online_cpu,
1451 		.teardown.single	= NULL,
1452 	},
1453 	[CPUHP_AP_PERF_ONLINE] = {
1454 		.name			= "perf:online",
1455 		.startup.single		= perf_event_init_cpu,
1456 		.teardown.single	= perf_event_exit_cpu,
1457 	},
1458 	[CPUHP_AP_WATCHDOG_ONLINE] = {
1459 		.name			= "lockup_detector:online",
1460 		.startup.single		= lockup_detector_online_cpu,
1461 		.teardown.single	= lockup_detector_offline_cpu,
1462 	},
1463 	[CPUHP_AP_WORKQUEUE_ONLINE] = {
1464 		.name			= "workqueue:online",
1465 		.startup.single		= workqueue_online_cpu,
1466 		.teardown.single	= workqueue_offline_cpu,
1467 	},
1468 	[CPUHP_AP_RCUTREE_ONLINE] = {
1469 		.name			= "RCU/tree:online",
1470 		.startup.single		= rcutree_online_cpu,
1471 		.teardown.single	= rcutree_offline_cpu,
1472 	},
1473 #endif
1474 	/*
1475 	 * The dynamically registered state space is here
1476 	 */
1477 
1478 #ifdef CONFIG_SMP
1479 	/* Last state is scheduler control setting the cpu active */
1480 	[CPUHP_AP_ACTIVE] = {
1481 		.name			= "sched:active",
1482 		.startup.single		= sched_cpu_activate,
1483 		.teardown.single	= sched_cpu_deactivate,
1484 	},
1485 #endif
1486 
1487 	/* CPU is fully up and running. */
1488 	[CPUHP_ONLINE] = {
1489 		.name			= "online",
1490 		.startup.single		= NULL,
1491 		.teardown.single	= NULL,
1492 	},
1493 };
1494 
1495 /* Sanity check for callbacks */
1496 static int cpuhp_cb_check(enum cpuhp_state state)
1497 {
1498 	if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1499 		return -EINVAL;
1500 	return 0;
1501 }
1502 
1503 /*
1504  * Returns a free for dynamic slot assignment of the Online state. The states
1505  * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1506  * by having no name assigned.
1507  */
1508 static int cpuhp_reserve_state(enum cpuhp_state state)
1509 {
1510 	enum cpuhp_state i, end;
1511 	struct cpuhp_step *step;
1512 
1513 	switch (state) {
1514 	case CPUHP_AP_ONLINE_DYN:
1515 		step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
1516 		end = CPUHP_AP_ONLINE_DYN_END;
1517 		break;
1518 	case CPUHP_BP_PREPARE_DYN:
1519 		step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
1520 		end = CPUHP_BP_PREPARE_DYN_END;
1521 		break;
1522 	default:
1523 		return -EINVAL;
1524 	}
1525 
1526 	for (i = state; i <= end; i++, step++) {
1527 		if (!step->name)
1528 			return i;
1529 	}
1530 	WARN(1, "No more dynamic states available for CPU hotplug\n");
1531 	return -ENOSPC;
1532 }
1533 
1534 static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1535 				 int (*startup)(unsigned int cpu),
1536 				 int (*teardown)(unsigned int cpu),
1537 				 bool multi_instance)
1538 {
1539 	/* (Un)Install the callbacks for further cpu hotplug operations */
1540 	struct cpuhp_step *sp;
1541 	int ret = 0;
1542 
1543 	/*
1544 	 * If name is NULL, then the state gets removed.
1545 	 *
1546 	 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1547 	 * the first allocation from these dynamic ranges, so the removal
1548 	 * would trigger a new allocation and clear the wrong (already
1549 	 * empty) state, leaving the callbacks of the to be cleared state
1550 	 * dangling, which causes wreckage on the next hotplug operation.
1551 	 */
1552 	if (name && (state == CPUHP_AP_ONLINE_DYN ||
1553 		     state == CPUHP_BP_PREPARE_DYN)) {
1554 		ret = cpuhp_reserve_state(state);
1555 		if (ret < 0)
1556 			return ret;
1557 		state = ret;
1558 	}
1559 	sp = cpuhp_get_step(state);
1560 	if (name && sp->name)
1561 		return -EBUSY;
1562 
1563 	sp->startup.single = startup;
1564 	sp->teardown.single = teardown;
1565 	sp->name = name;
1566 	sp->multi_instance = multi_instance;
1567 	INIT_HLIST_HEAD(&sp->list);
1568 	return ret;
1569 }
1570 
1571 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1572 {
1573 	return cpuhp_get_step(state)->teardown.single;
1574 }
1575 
1576 /*
1577  * Call the startup/teardown function for a step either on the AP or
1578  * on the current CPU.
1579  */
1580 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1581 			    struct hlist_node *node)
1582 {
1583 	struct cpuhp_step *sp = cpuhp_get_step(state);
1584 	int ret;
1585 
1586 	/*
1587 	 * If there's nothing to do, we done.
1588 	 * Relies on the union for multi_instance.
1589 	 */
1590 	if ((bringup && !sp->startup.single) ||
1591 	    (!bringup && !sp->teardown.single))
1592 		return 0;
1593 	/*
1594 	 * The non AP bound callbacks can fail on bringup. On teardown
1595 	 * e.g. module removal we crash for now.
1596 	 */
1597 #ifdef CONFIG_SMP
1598 	if (cpuhp_is_ap_state(state))
1599 		ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1600 	else
1601 		ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1602 #else
1603 	ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1604 #endif
1605 	BUG_ON(ret && !bringup);
1606 	return ret;
1607 }
1608 
1609 /*
1610  * Called from __cpuhp_setup_state on a recoverable failure.
1611  *
1612  * Note: The teardown callbacks for rollback are not allowed to fail!
1613  */
1614 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1615 				   struct hlist_node *node)
1616 {
1617 	int cpu;
1618 
1619 	/* Roll back the already executed steps on the other cpus */
1620 	for_each_present_cpu(cpu) {
1621 		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1622 		int cpustate = st->state;
1623 
1624 		if (cpu >= failedcpu)
1625 			break;
1626 
1627 		/* Did we invoke the startup call on that cpu ? */
1628 		if (cpustate >= state)
1629 			cpuhp_issue_call(cpu, state, false, node);
1630 	}
1631 }
1632 
1633 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1634 					  struct hlist_node *node,
1635 					  bool invoke)
1636 {
1637 	struct cpuhp_step *sp;
1638 	int cpu;
1639 	int ret;
1640 
1641 	lockdep_assert_cpus_held();
1642 
1643 	sp = cpuhp_get_step(state);
1644 	if (sp->multi_instance == false)
1645 		return -EINVAL;
1646 
1647 	mutex_lock(&cpuhp_state_mutex);
1648 
1649 	if (!invoke || !sp->startup.multi)
1650 		goto add_node;
1651 
1652 	/*
1653 	 * Try to call the startup callback for each present cpu
1654 	 * depending on the hotplug state of the cpu.
1655 	 */
1656 	for_each_present_cpu(cpu) {
1657 		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1658 		int cpustate = st->state;
1659 
1660 		if (cpustate < state)
1661 			continue;
1662 
1663 		ret = cpuhp_issue_call(cpu, state, true, node);
1664 		if (ret) {
1665 			if (sp->teardown.multi)
1666 				cpuhp_rollback_install(cpu, state, node);
1667 			goto unlock;
1668 		}
1669 	}
1670 add_node:
1671 	ret = 0;
1672 	hlist_add_head(node, &sp->list);
1673 unlock:
1674 	mutex_unlock(&cpuhp_state_mutex);
1675 	return ret;
1676 }
1677 
1678 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1679 			       bool invoke)
1680 {
1681 	int ret;
1682 
1683 	cpus_read_lock();
1684 	ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
1685 	cpus_read_unlock();
1686 	return ret;
1687 }
1688 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1689 
1690 /**
1691  * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
1692  * @state:		The state to setup
1693  * @invoke:		If true, the startup function is invoked for cpus where
1694  *			cpu state >= @state
1695  * @startup:		startup callback function
1696  * @teardown:		teardown callback function
1697  * @multi_instance:	State is set up for multiple instances which get
1698  *			added afterwards.
1699  *
1700  * The caller needs to hold cpus read locked while calling this function.
1701  * Returns:
1702  *   On success:
1703  *      Positive state number if @state is CPUHP_AP_ONLINE_DYN
1704  *      0 for all other states
1705  *   On failure: proper (negative) error code
1706  */
1707 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1708 				   const char *name, bool invoke,
1709 				   int (*startup)(unsigned int cpu),
1710 				   int (*teardown)(unsigned int cpu),
1711 				   bool multi_instance)
1712 {
1713 	int cpu, ret = 0;
1714 	bool dynstate;
1715 
1716 	lockdep_assert_cpus_held();
1717 
1718 	if (cpuhp_cb_check(state) || !name)
1719 		return -EINVAL;
1720 
1721 	mutex_lock(&cpuhp_state_mutex);
1722 
1723 	ret = cpuhp_store_callbacks(state, name, startup, teardown,
1724 				    multi_instance);
1725 
1726 	dynstate = state == CPUHP_AP_ONLINE_DYN;
1727 	if (ret > 0 && dynstate) {
1728 		state = ret;
1729 		ret = 0;
1730 	}
1731 
1732 	if (ret || !invoke || !startup)
1733 		goto out;
1734 
1735 	/*
1736 	 * Try to call the startup callback for each present cpu
1737 	 * depending on the hotplug state of the cpu.
1738 	 */
1739 	for_each_present_cpu(cpu) {
1740 		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1741 		int cpustate = st->state;
1742 
1743 		if (cpustate < state)
1744 			continue;
1745 
1746 		ret = cpuhp_issue_call(cpu, state, true, NULL);
1747 		if (ret) {
1748 			if (teardown)
1749 				cpuhp_rollback_install(cpu, state, NULL);
1750 			cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1751 			goto out;
1752 		}
1753 	}
1754 out:
1755 	mutex_unlock(&cpuhp_state_mutex);
1756 	/*
1757 	 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1758 	 * dynamically allocated state in case of success.
1759 	 */
1760 	if (!ret && dynstate)
1761 		return state;
1762 	return ret;
1763 }
1764 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
1765 
1766 int __cpuhp_setup_state(enum cpuhp_state state,
1767 			const char *name, bool invoke,
1768 			int (*startup)(unsigned int cpu),
1769 			int (*teardown)(unsigned int cpu),
1770 			bool multi_instance)
1771 {
1772 	int ret;
1773 
1774 	cpus_read_lock();
1775 	ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
1776 					     teardown, multi_instance);
1777 	cpus_read_unlock();
1778 	return ret;
1779 }
1780 EXPORT_SYMBOL(__cpuhp_setup_state);
1781 
1782 int __cpuhp_state_remove_instance(enum cpuhp_state state,
1783 				  struct hlist_node *node, bool invoke)
1784 {
1785 	struct cpuhp_step *sp = cpuhp_get_step(state);
1786 	int cpu;
1787 
1788 	BUG_ON(cpuhp_cb_check(state));
1789 
1790 	if (!sp->multi_instance)
1791 		return -EINVAL;
1792 
1793 	cpus_read_lock();
1794 	mutex_lock(&cpuhp_state_mutex);
1795 
1796 	if (!invoke || !cpuhp_get_teardown_cb(state))
1797 		goto remove;
1798 	/*
1799 	 * Call the teardown callback for each present cpu depending
1800 	 * on the hotplug state of the cpu. This function is not
1801 	 * allowed to fail currently!
1802 	 */
1803 	for_each_present_cpu(cpu) {
1804 		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1805 		int cpustate = st->state;
1806 
1807 		if (cpustate >= state)
1808 			cpuhp_issue_call(cpu, state, false, node);
1809 	}
1810 
1811 remove:
1812 	hlist_del(node);
1813 	mutex_unlock(&cpuhp_state_mutex);
1814 	cpus_read_unlock();
1815 
1816 	return 0;
1817 }
1818 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
1819 
1820 /**
1821  * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
1822  * @state:	The state to remove
1823  * @invoke:	If true, the teardown function is invoked for cpus where
1824  *		cpu state >= @state
1825  *
1826  * The caller needs to hold cpus read locked while calling this function.
1827  * The teardown callback is currently not allowed to fail. Think
1828  * about module removal!
1829  */
1830 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
1831 {
1832 	struct cpuhp_step *sp = cpuhp_get_step(state);
1833 	int cpu;
1834 
1835 	BUG_ON(cpuhp_cb_check(state));
1836 
1837 	lockdep_assert_cpus_held();
1838 
1839 	mutex_lock(&cpuhp_state_mutex);
1840 	if (sp->multi_instance) {
1841 		WARN(!hlist_empty(&sp->list),
1842 		     "Error: Removing state %d which has instances left.\n",
1843 		     state);
1844 		goto remove;
1845 	}
1846 
1847 	if (!invoke || !cpuhp_get_teardown_cb(state))
1848 		goto remove;
1849 
1850 	/*
1851 	 * Call the teardown callback for each present cpu depending
1852 	 * on the hotplug state of the cpu. This function is not
1853 	 * allowed to fail currently!
1854 	 */
1855 	for_each_present_cpu(cpu) {
1856 		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1857 		int cpustate = st->state;
1858 
1859 		if (cpustate >= state)
1860 			cpuhp_issue_call(cpu, state, false, NULL);
1861 	}
1862 remove:
1863 	cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1864 	mutex_unlock(&cpuhp_state_mutex);
1865 }
1866 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
1867 
1868 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1869 {
1870 	cpus_read_lock();
1871 	__cpuhp_remove_state_cpuslocked(state, invoke);
1872 	cpus_read_unlock();
1873 }
1874 EXPORT_SYMBOL(__cpuhp_remove_state);
1875 
1876 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1877 static ssize_t show_cpuhp_state(struct device *dev,
1878 				struct device_attribute *attr, char *buf)
1879 {
1880 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1881 
1882 	return sprintf(buf, "%d\n", st->state);
1883 }
1884 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1885 
1886 static ssize_t write_cpuhp_target(struct device *dev,
1887 				  struct device_attribute *attr,
1888 				  const char *buf, size_t count)
1889 {
1890 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1891 	struct cpuhp_step *sp;
1892 	int target, ret;
1893 
1894 	ret = kstrtoint(buf, 10, &target);
1895 	if (ret)
1896 		return ret;
1897 
1898 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1899 	if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1900 		return -EINVAL;
1901 #else
1902 	if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1903 		return -EINVAL;
1904 #endif
1905 
1906 	ret = lock_device_hotplug_sysfs();
1907 	if (ret)
1908 		return ret;
1909 
1910 	mutex_lock(&cpuhp_state_mutex);
1911 	sp = cpuhp_get_step(target);
1912 	ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1913 	mutex_unlock(&cpuhp_state_mutex);
1914 	if (ret)
1915 		goto out;
1916 
1917 	if (st->state < target)
1918 		ret = do_cpu_up(dev->id, target);
1919 	else
1920 		ret = do_cpu_down(dev->id, target);
1921 out:
1922 	unlock_device_hotplug();
1923 	return ret ? ret : count;
1924 }
1925 
1926 static ssize_t show_cpuhp_target(struct device *dev,
1927 				 struct device_attribute *attr, char *buf)
1928 {
1929 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1930 
1931 	return sprintf(buf, "%d\n", st->target);
1932 }
1933 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
1934 
1935 
1936 static ssize_t write_cpuhp_fail(struct device *dev,
1937 				struct device_attribute *attr,
1938 				const char *buf, size_t count)
1939 {
1940 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1941 	struct cpuhp_step *sp;
1942 	int fail, ret;
1943 
1944 	ret = kstrtoint(buf, 10, &fail);
1945 	if (ret)
1946 		return ret;
1947 
1948 	/*
1949 	 * Cannot fail STARTING/DYING callbacks.
1950 	 */
1951 	if (cpuhp_is_atomic_state(fail))
1952 		return -EINVAL;
1953 
1954 	/*
1955 	 * Cannot fail anything that doesn't have callbacks.
1956 	 */
1957 	mutex_lock(&cpuhp_state_mutex);
1958 	sp = cpuhp_get_step(fail);
1959 	if (!sp->startup.single && !sp->teardown.single)
1960 		ret = -EINVAL;
1961 	mutex_unlock(&cpuhp_state_mutex);
1962 	if (ret)
1963 		return ret;
1964 
1965 	st->fail = fail;
1966 
1967 	return count;
1968 }
1969 
1970 static ssize_t show_cpuhp_fail(struct device *dev,
1971 			       struct device_attribute *attr, char *buf)
1972 {
1973 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1974 
1975 	return sprintf(buf, "%d\n", st->fail);
1976 }
1977 
1978 static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
1979 
1980 static struct attribute *cpuhp_cpu_attrs[] = {
1981 	&dev_attr_state.attr,
1982 	&dev_attr_target.attr,
1983 	&dev_attr_fail.attr,
1984 	NULL
1985 };
1986 
1987 static const struct attribute_group cpuhp_cpu_attr_group = {
1988 	.attrs = cpuhp_cpu_attrs,
1989 	.name = "hotplug",
1990 	NULL
1991 };
1992 
1993 static ssize_t show_cpuhp_states(struct device *dev,
1994 				 struct device_attribute *attr, char *buf)
1995 {
1996 	ssize_t cur, res = 0;
1997 	int i;
1998 
1999 	mutex_lock(&cpuhp_state_mutex);
2000 	for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2001 		struct cpuhp_step *sp = cpuhp_get_step(i);
2002 
2003 		if (sp->name) {
2004 			cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2005 			buf += cur;
2006 			res += cur;
2007 		}
2008 	}
2009 	mutex_unlock(&cpuhp_state_mutex);
2010 	return res;
2011 }
2012 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
2013 
2014 static struct attribute *cpuhp_cpu_root_attrs[] = {
2015 	&dev_attr_states.attr,
2016 	NULL
2017 };
2018 
2019 static const struct attribute_group cpuhp_cpu_root_attr_group = {
2020 	.attrs = cpuhp_cpu_root_attrs,
2021 	.name = "hotplug",
2022 	NULL
2023 };
2024 
2025 #ifdef CONFIG_HOTPLUG_SMT
2026 
2027 static const char *smt_states[] = {
2028 	[CPU_SMT_ENABLED]		= "on",
2029 	[CPU_SMT_DISABLED]		= "off",
2030 	[CPU_SMT_FORCE_DISABLED]	= "forceoff",
2031 	[CPU_SMT_NOT_SUPPORTED]		= "notsupported",
2032 };
2033 
2034 static ssize_t
2035 show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
2036 {
2037 	return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
2038 }
2039 
2040 static void cpuhp_offline_cpu_device(unsigned int cpu)
2041 {
2042 	struct device *dev = get_cpu_device(cpu);
2043 
2044 	dev->offline = true;
2045 	/* Tell user space about the state change */
2046 	kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2047 }
2048 
2049 static void cpuhp_online_cpu_device(unsigned int cpu)
2050 {
2051 	struct device *dev = get_cpu_device(cpu);
2052 
2053 	dev->offline = false;
2054 	/* Tell user space about the state change */
2055 	kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2056 }
2057 
2058 /*
2059  * Architectures that need SMT-specific errata handling during SMT hotplug
2060  * should override this.
2061  */
2062 void __weak arch_smt_update(void) { };
2063 
2064 static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2065 {
2066 	int cpu, ret = 0;
2067 
2068 	cpu_maps_update_begin();
2069 	for_each_online_cpu(cpu) {
2070 		if (topology_is_primary_thread(cpu))
2071 			continue;
2072 		ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2073 		if (ret)
2074 			break;
2075 		/*
2076 		 * As this needs to hold the cpu maps lock it's impossible
2077 		 * to call device_offline() because that ends up calling
2078 		 * cpu_down() which takes cpu maps lock. cpu maps lock
2079 		 * needs to be held as this might race against in kernel
2080 		 * abusers of the hotplug machinery (thermal management).
2081 		 *
2082 		 * So nothing would update device:offline state. That would
2083 		 * leave the sysfs entry stale and prevent onlining after
2084 		 * smt control has been changed to 'off' again. This is
2085 		 * called under the sysfs hotplug lock, so it is properly
2086 		 * serialized against the regular offline usage.
2087 		 */
2088 		cpuhp_offline_cpu_device(cpu);
2089 	}
2090 	if (!ret) {
2091 		cpu_smt_control = ctrlval;
2092 		arch_smt_update();
2093 	}
2094 	cpu_maps_update_done();
2095 	return ret;
2096 }
2097 
2098 static int cpuhp_smt_enable(void)
2099 {
2100 	int cpu, ret = 0;
2101 
2102 	cpu_maps_update_begin();
2103 	cpu_smt_control = CPU_SMT_ENABLED;
2104 	arch_smt_update();
2105 	for_each_present_cpu(cpu) {
2106 		/* Skip online CPUs and CPUs on offline nodes */
2107 		if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2108 			continue;
2109 		ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2110 		if (ret)
2111 			break;
2112 		/* See comment in cpuhp_smt_disable() */
2113 		cpuhp_online_cpu_device(cpu);
2114 	}
2115 	cpu_maps_update_done();
2116 	return ret;
2117 }
2118 
2119 static ssize_t
2120 store_smt_control(struct device *dev, struct device_attribute *attr,
2121 		  const char *buf, size_t count)
2122 {
2123 	int ctrlval, ret;
2124 
2125 	if (sysfs_streq(buf, "on"))
2126 		ctrlval = CPU_SMT_ENABLED;
2127 	else if (sysfs_streq(buf, "off"))
2128 		ctrlval = CPU_SMT_DISABLED;
2129 	else if (sysfs_streq(buf, "forceoff"))
2130 		ctrlval = CPU_SMT_FORCE_DISABLED;
2131 	else
2132 		return -EINVAL;
2133 
2134 	if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2135 		return -EPERM;
2136 
2137 	if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2138 		return -ENODEV;
2139 
2140 	ret = lock_device_hotplug_sysfs();
2141 	if (ret)
2142 		return ret;
2143 
2144 	if (ctrlval != cpu_smt_control) {
2145 		switch (ctrlval) {
2146 		case CPU_SMT_ENABLED:
2147 			ret = cpuhp_smt_enable();
2148 			break;
2149 		case CPU_SMT_DISABLED:
2150 		case CPU_SMT_FORCE_DISABLED:
2151 			ret = cpuhp_smt_disable(ctrlval);
2152 			break;
2153 		}
2154 	}
2155 
2156 	unlock_device_hotplug();
2157 	return ret ? ret : count;
2158 }
2159 static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2160 
2161 static ssize_t
2162 show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2163 {
2164 	bool active = topology_max_smt_threads() > 1;
2165 
2166 	return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
2167 }
2168 static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2169 
2170 static struct attribute *cpuhp_smt_attrs[] = {
2171 	&dev_attr_control.attr,
2172 	&dev_attr_active.attr,
2173 	NULL
2174 };
2175 
2176 static const struct attribute_group cpuhp_smt_attr_group = {
2177 	.attrs = cpuhp_smt_attrs,
2178 	.name = "smt",
2179 	NULL
2180 };
2181 
2182 static int __init cpu_smt_state_init(void)
2183 {
2184 	return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2185 				  &cpuhp_smt_attr_group);
2186 }
2187 
2188 #else
2189 static inline int cpu_smt_state_init(void) { return 0; }
2190 #endif
2191 
2192 static int __init cpuhp_sysfs_init(void)
2193 {
2194 	int cpu, ret;
2195 
2196 	ret = cpu_smt_state_init();
2197 	if (ret)
2198 		return ret;
2199 
2200 	ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2201 				 &cpuhp_cpu_root_attr_group);
2202 	if (ret)
2203 		return ret;
2204 
2205 	for_each_possible_cpu(cpu) {
2206 		struct device *dev = get_cpu_device(cpu);
2207 
2208 		if (!dev)
2209 			continue;
2210 		ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2211 		if (ret)
2212 			return ret;
2213 	}
2214 	return 0;
2215 }
2216 device_initcall(cpuhp_sysfs_init);
2217 #endif
2218 
2219 /*
2220  * cpu_bit_bitmap[] is a special, "compressed" data structure that
2221  * represents all NR_CPUS bits binary values of 1<<nr.
2222  *
2223  * It is used by cpumask_of() to get a constant address to a CPU
2224  * mask value that has a single bit set only.
2225  */
2226 
2227 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2228 #define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
2229 #define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2230 #define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2231 #define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2232 
2233 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2234 
2235 	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
2236 	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
2237 #if BITS_PER_LONG > 32
2238 	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
2239 	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
2240 #endif
2241 };
2242 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2243 
2244 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2245 EXPORT_SYMBOL(cpu_all_bits);
2246 
2247 #ifdef CONFIG_INIT_ALL_POSSIBLE
2248 struct cpumask __cpu_possible_mask __read_mostly
2249 	= {CPU_BITS_ALL};
2250 #else
2251 struct cpumask __cpu_possible_mask __read_mostly;
2252 #endif
2253 EXPORT_SYMBOL(__cpu_possible_mask);
2254 
2255 struct cpumask __cpu_online_mask __read_mostly;
2256 EXPORT_SYMBOL(__cpu_online_mask);
2257 
2258 struct cpumask __cpu_present_mask __read_mostly;
2259 EXPORT_SYMBOL(__cpu_present_mask);
2260 
2261 struct cpumask __cpu_active_mask __read_mostly;
2262 EXPORT_SYMBOL(__cpu_active_mask);
2263 
2264 void init_cpu_present(const struct cpumask *src)
2265 {
2266 	cpumask_copy(&__cpu_present_mask, src);
2267 }
2268 
2269 void init_cpu_possible(const struct cpumask *src)
2270 {
2271 	cpumask_copy(&__cpu_possible_mask, src);
2272 }
2273 
2274 void init_cpu_online(const struct cpumask *src)
2275 {
2276 	cpumask_copy(&__cpu_online_mask, src);
2277 }
2278 
2279 /*
2280  * Activate the first processor.
2281  */
2282 void __init boot_cpu_init(void)
2283 {
2284 	int cpu = smp_processor_id();
2285 
2286 	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
2287 	set_cpu_online(cpu, true);
2288 	set_cpu_active(cpu, true);
2289 	set_cpu_present(cpu, true);
2290 	set_cpu_possible(cpu, true);
2291 
2292 #ifdef CONFIG_SMP
2293 	__boot_cpu_id = cpu;
2294 #endif
2295 }
2296 
2297 /*
2298  * Must be called _AFTER_ setting up the per_cpu areas
2299  */
2300 void __init boot_cpu_hotplug_init(void)
2301 {
2302 #ifdef CONFIG_SMP
2303 	this_cpu_write(cpuhp_state.booted_once, true);
2304 #endif
2305 	this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
2306 }
2307