11da177e4SLinus Torvalds /* CPU control.
21da177e4SLinus Torvalds * (C) 2001, 2002, 2003, 2004 Rusty Russell
31da177e4SLinus Torvalds *
41da177e4SLinus Torvalds * This code is licenced under the GPL.
51da177e4SLinus Torvalds */
6bf2c59fcSPeter Zijlstra #include <linux/sched/mm.h>
71da177e4SLinus Torvalds #include <linux/proc_fs.h>
81da177e4SLinus Torvalds #include <linux/smp.h>
91da177e4SLinus Torvalds #include <linux/init.h>
101da177e4SLinus Torvalds #include <linux/notifier.h>
113f07c014SIngo Molnar #include <linux/sched/signal.h>
12ef8bd77fSIngo Molnar #include <linux/sched/hotplug.h>
139ca12ac0SNicholas Piggin #include <linux/sched/isolation.h>
1429930025SIngo Molnar #include <linux/sched/task.h>
15a74cfffbSThomas Gleixner #include <linux/sched/smt.h>
161da177e4SLinus Torvalds #include <linux/unistd.h>
171da177e4SLinus Torvalds #include <linux/cpu.h>
18cb79295eSAnton Vorontsov #include <linux/oom.h>
19cb79295eSAnton Vorontsov #include <linux/rcupdate.h>
206f062123SThomas Gleixner #include <linux/delay.h>
219984de1aSPaul Gortmaker #include <linux/export.h>
22e4cc2f87SAnton Vorontsov #include <linux/bug.h>
231da177e4SLinus Torvalds #include <linux/kthread.h>
241da177e4SLinus Torvalds #include <linux/stop_machine.h>
2581615b62SIngo Molnar #include <linux/mutex.h>
265a0e3ad6STejun Heo #include <linux/gfp.h>
2779cfbdfaSSrivatsa S. Bhat #include <linux/suspend.h>
28a19423b9SGautham R. Shenoy #include <linux/lockdep.h>
29345527b1SPreeti U Murthy #include <linux/tick.h>
30a8994181SThomas Gleixner #include <linux/irq.h>
31941154bdSThomas Gleixner #include <linux/nmi.h>
324cb28cedSThomas Gleixner #include <linux/smpboot.h>
33e6d4989aSRichard Weinberger #include <linux/relay.h>
346731d4f1SSebastian Andrzej Siewior #include <linux/slab.h>
35dce1ca05SMark Rutland #include <linux/scs.h>
36fc8dffd3SThomas Gleixner #include <linux/percpu-rwsem.h>
37b22afcdfSThomas Gleixner #include <linux/cpuset.h>
383191dd5aSJason A. Donenfeld #include <linux/random.h>
39bae1a962SKuppuswamy Sathyanarayanan #include <linux/cc_platform.h>
40cff7d378SThomas Gleixner
41bb3632c6STodd E Brandt #include <trace/events/power.h>
42cff7d378SThomas Gleixner #define CREATE_TRACE_POINTS
43cff7d378SThomas Gleixner #include <trace/events/cpuhp.h>
441da177e4SLinus Torvalds
4538498a67SThomas Gleixner #include "smpboot.h"
4638498a67SThomas Gleixner
47cff7d378SThomas Gleixner /**
4811bc021dSRandy Dunlap * struct cpuhp_cpu_state - Per cpu hotplug state storage
49cff7d378SThomas Gleixner * @state: The current cpu state
50cff7d378SThomas Gleixner * @target: The target state
5111bc021dSRandy Dunlap * @fail: Current CPU hotplug callback state
524cb28cedSThomas Gleixner * @thread: Pointer to the hotplug thread
534cb28cedSThomas Gleixner * @should_run: Thread should execute
543b9d6da6SSebastian Andrzej Siewior * @rollback: Perform a rollback
55a724632cSThomas Gleixner * @single: Single callback invocation
56a724632cSThomas Gleixner * @bringup: Single callback bringup or teardown selector
5711bc021dSRandy Dunlap * @cpu: CPU number
5811bc021dSRandy Dunlap * @node: Remote CPU node; for multi-instance, do a
5911bc021dSRandy Dunlap * single entry callback for install/remove
6011bc021dSRandy Dunlap * @last: For multi-instance rollback, remember how far we got
61a724632cSThomas Gleixner * @cb_state: The state for a single callback (install/uninstall)
624cb28cedSThomas Gleixner * @result: Result of the operation
636f062123SThomas Gleixner * @ap_sync_state: State for AP synchronization
645ebe7742SPeter Zijlstra * @done_up: Signal completion to the issuer of the task for cpu-up
655ebe7742SPeter Zijlstra * @done_down: Signal completion to the issuer of the task for cpu-down
66cff7d378SThomas Gleixner */
67cff7d378SThomas Gleixner struct cpuhp_cpu_state {
68cff7d378SThomas Gleixner enum cpuhp_state state;
69cff7d378SThomas Gleixner enum cpuhp_state target;
701db49484SPeter Zijlstra enum cpuhp_state fail;
714cb28cedSThomas Gleixner #ifdef CONFIG_SMP
724cb28cedSThomas Gleixner struct task_struct *thread;
734cb28cedSThomas Gleixner bool should_run;
743b9d6da6SSebastian Andrzej Siewior bool rollback;
75a724632cSThomas Gleixner bool single;
76a724632cSThomas Gleixner bool bringup;
77cf392d10SThomas Gleixner struct hlist_node *node;
784dddfb5fSPeter Zijlstra struct hlist_node *last;
794cb28cedSThomas Gleixner enum cpuhp_state cb_state;
804cb28cedSThomas Gleixner int result;
816f062123SThomas Gleixner atomic_t ap_sync_state;
825ebe7742SPeter Zijlstra struct completion done_up;
835ebe7742SPeter Zijlstra struct completion done_down;
844cb28cedSThomas Gleixner #endif
85cff7d378SThomas Gleixner };
86cff7d378SThomas Gleixner
871db49484SPeter Zijlstra static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
881db49484SPeter Zijlstra .fail = CPUHP_INVALID,
891db49484SPeter Zijlstra };
90cff7d378SThomas Gleixner
91e797bda3SThomas Gleixner #ifdef CONFIG_SMP
92e797bda3SThomas Gleixner cpumask_t cpus_booted_once_mask;
93e797bda3SThomas Gleixner #endif
94e797bda3SThomas Gleixner
9549dfe2a6SThomas Gleixner #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
965f4b55e1SPeter Zijlstra static struct lockdep_map cpuhp_state_up_map =
975f4b55e1SPeter Zijlstra STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
985f4b55e1SPeter Zijlstra static struct lockdep_map cpuhp_state_down_map =
995f4b55e1SPeter Zijlstra STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
1005f4b55e1SPeter Zijlstra
1015f4b55e1SPeter Zijlstra
cpuhp_lock_acquire(bool bringup)10276dc6c09SMathieu Malaterre static inline void cpuhp_lock_acquire(bool bringup)
1035f4b55e1SPeter Zijlstra {
1045f4b55e1SPeter Zijlstra lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
1055f4b55e1SPeter Zijlstra }
1065f4b55e1SPeter Zijlstra
cpuhp_lock_release(bool bringup)10776dc6c09SMathieu Malaterre static inline void cpuhp_lock_release(bool bringup)
1085f4b55e1SPeter Zijlstra {
1095f4b55e1SPeter Zijlstra lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
1105f4b55e1SPeter Zijlstra }
1115f4b55e1SPeter Zijlstra #else
1125f4b55e1SPeter Zijlstra
cpuhp_lock_acquire(bool bringup)11376dc6c09SMathieu Malaterre static inline void cpuhp_lock_acquire(bool bringup) { }
cpuhp_lock_release(bool bringup)11476dc6c09SMathieu Malaterre static inline void cpuhp_lock_release(bool bringup) { }
1155f4b55e1SPeter Zijlstra
11649dfe2a6SThomas Gleixner #endif
11749dfe2a6SThomas Gleixner
118cff7d378SThomas Gleixner /**
11911bc021dSRandy Dunlap * struct cpuhp_step - Hotplug state machine step
120cff7d378SThomas Gleixner * @name: Name of the step
121cff7d378SThomas Gleixner * @startup: Startup function of the step
122cff7d378SThomas Gleixner * @teardown: Teardown function of the step
123757c989bSThomas Gleixner * @cant_stop: Bringup/teardown can't be stopped at this step
12411bc021dSRandy Dunlap * @multi_instance: State has multiple instances which get added afterwards
125cff7d378SThomas Gleixner */
126cff7d378SThomas Gleixner struct cpuhp_step {
127cff7d378SThomas Gleixner const char *name;
128cf392d10SThomas Gleixner union {
1293c1627e9SThomas Gleixner int (*single)(unsigned int cpu);
1303c1627e9SThomas Gleixner int (*multi)(unsigned int cpu,
131cf392d10SThomas Gleixner struct hlist_node *node);
1323c1627e9SThomas Gleixner } startup;
133cf392d10SThomas Gleixner union {
1343c1627e9SThomas Gleixner int (*single)(unsigned int cpu);
1353c1627e9SThomas Gleixner int (*multi)(unsigned int cpu,
136cf392d10SThomas Gleixner struct hlist_node *node);
1373c1627e9SThomas Gleixner } teardown;
13811bc021dSRandy Dunlap /* private: */
139cf392d10SThomas Gleixner struct hlist_head list;
14011bc021dSRandy Dunlap /* public: */
141757c989bSThomas Gleixner bool cant_stop;
142cf392d10SThomas Gleixner bool multi_instance;
143cff7d378SThomas Gleixner };
144cff7d378SThomas Gleixner
14598f8cdceSThomas Gleixner static DEFINE_MUTEX(cpuhp_state_mutex);
14617a2f1ceSLai Jiangshan static struct cpuhp_step cpuhp_hp_states[];
147cff7d378SThomas Gleixner
cpuhp_get_step(enum cpuhp_state state)148a724632cSThomas Gleixner static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
149a724632cSThomas Gleixner {
15017a2f1ceSLai Jiangshan return cpuhp_hp_states + state;
151a724632cSThomas Gleixner }
152a724632cSThomas Gleixner
cpuhp_step_empty(bool bringup,struct cpuhp_step * step)153453e4108SVincent Donnefort static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
154453e4108SVincent Donnefort {
155453e4108SVincent Donnefort return bringup ? !step->startup.single : !step->teardown.single;
156453e4108SVincent Donnefort }
157453e4108SVincent Donnefort
158cff7d378SThomas Gleixner /**
15911bc021dSRandy Dunlap * cpuhp_invoke_callback - Invoke the callbacks for a given state
160cff7d378SThomas Gleixner * @cpu: The cpu for which the callback should be invoked
16196abb968SPeter Zijlstra * @state: The state to do callbacks for
162a724632cSThomas Gleixner * @bringup: True if the bringup callback should be invoked
16396abb968SPeter Zijlstra * @node: For multi-instance, do a single entry callback for install/remove
16496abb968SPeter Zijlstra * @lastp: For multi-instance rollback, remember how far we got
165cff7d378SThomas Gleixner *
166cf392d10SThomas Gleixner * Called from cpu hotplug and from the state register machinery.
16711bc021dSRandy Dunlap *
16811bc021dSRandy Dunlap * Return: %0 on success or a negative errno code
169cff7d378SThomas Gleixner */
cpuhp_invoke_callback(unsigned int cpu,enum cpuhp_state state,bool bringup,struct hlist_node * node,struct hlist_node ** lastp)170a724632cSThomas Gleixner static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
17196abb968SPeter Zijlstra bool bringup, struct hlist_node *node,
17296abb968SPeter Zijlstra struct hlist_node **lastp)
173cff7d378SThomas Gleixner {
174cff7d378SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
175a724632cSThomas Gleixner struct cpuhp_step *step = cpuhp_get_step(state);
176cf392d10SThomas Gleixner int (*cbm)(unsigned int cpu, struct hlist_node *node);
177cf392d10SThomas Gleixner int (*cb)(unsigned int cpu);
178cf392d10SThomas Gleixner int ret, cnt;
179cff7d378SThomas Gleixner
1801db49484SPeter Zijlstra if (st->fail == state) {
1811db49484SPeter Zijlstra st->fail = CPUHP_INVALID;
1821db49484SPeter Zijlstra return -EAGAIN;
1831db49484SPeter Zijlstra }
1841db49484SPeter Zijlstra
185453e4108SVincent Donnefort if (cpuhp_step_empty(bringup, step)) {
186453e4108SVincent Donnefort WARN_ON_ONCE(1);
187453e4108SVincent Donnefort return 0;
188453e4108SVincent Donnefort }
189453e4108SVincent Donnefort
190cf392d10SThomas Gleixner if (!step->multi_instance) {
19196abb968SPeter Zijlstra WARN_ON_ONCE(lastp && *lastp);
1923c1627e9SThomas Gleixner cb = bringup ? step->startup.single : step->teardown.single;
193453e4108SVincent Donnefort
194a724632cSThomas Gleixner trace_cpuhp_enter(cpu, st->target, state, cb);
195cff7d378SThomas Gleixner ret = cb(cpu);
196a724632cSThomas Gleixner trace_cpuhp_exit(cpu, st->state, state, ret);
197cf392d10SThomas Gleixner return ret;
198cf392d10SThomas Gleixner }
1993c1627e9SThomas Gleixner cbm = bringup ? step->startup.multi : step->teardown.multi;
200cf392d10SThomas Gleixner
201cf392d10SThomas Gleixner /* Single invocation for instance add/remove */
202cf392d10SThomas Gleixner if (node) {
20396abb968SPeter Zijlstra WARN_ON_ONCE(lastp && *lastp);
204cf392d10SThomas Gleixner trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
205cf392d10SThomas Gleixner ret = cbm(cpu, node);
206cf392d10SThomas Gleixner trace_cpuhp_exit(cpu, st->state, state, ret);
207cf392d10SThomas Gleixner return ret;
208cf392d10SThomas Gleixner }
209cf392d10SThomas Gleixner
210cf392d10SThomas Gleixner /* State transition. Invoke on all instances */
211cf392d10SThomas Gleixner cnt = 0;
212cf392d10SThomas Gleixner hlist_for_each(node, &step->list) {
21396abb968SPeter Zijlstra if (lastp && node == *lastp)
21496abb968SPeter Zijlstra break;
21596abb968SPeter Zijlstra
216cf392d10SThomas Gleixner trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
217cf392d10SThomas Gleixner ret = cbm(cpu, node);
218cf392d10SThomas Gleixner trace_cpuhp_exit(cpu, st->state, state, ret);
21996abb968SPeter Zijlstra if (ret) {
22096abb968SPeter Zijlstra if (!lastp)
221cf392d10SThomas Gleixner goto err;
22296abb968SPeter Zijlstra
22396abb968SPeter Zijlstra *lastp = node;
22496abb968SPeter Zijlstra return ret;
22596abb968SPeter Zijlstra }
226cf392d10SThomas Gleixner cnt++;
227cf392d10SThomas Gleixner }
22896abb968SPeter Zijlstra if (lastp)
22996abb968SPeter Zijlstra *lastp = NULL;
230cf392d10SThomas Gleixner return 0;
231cf392d10SThomas Gleixner err:
232cf392d10SThomas Gleixner /* Rollback the instances if one failed */
2333c1627e9SThomas Gleixner cbm = !bringup ? step->startup.multi : step->teardown.multi;
234cf392d10SThomas Gleixner if (!cbm)
235cf392d10SThomas Gleixner return ret;
236cf392d10SThomas Gleixner
237cf392d10SThomas Gleixner hlist_for_each(node, &step->list) {
238cf392d10SThomas Gleixner if (!cnt--)
239cf392d10SThomas Gleixner break;
240724a8688SPeter Zijlstra
241724a8688SPeter Zijlstra trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
242724a8688SPeter Zijlstra ret = cbm(cpu, node);
243724a8688SPeter Zijlstra trace_cpuhp_exit(cpu, st->state, state, ret);
244724a8688SPeter Zijlstra /*
245724a8688SPeter Zijlstra * Rollback must not fail,
246724a8688SPeter Zijlstra */
247724a8688SPeter Zijlstra WARN_ON_ONCE(ret);
248cff7d378SThomas Gleixner }
249cff7d378SThomas Gleixner return ret;
250cff7d378SThomas Gleixner }
251cff7d378SThomas Gleixner
25298a79d6aSRusty Russell #ifdef CONFIG_SMP
cpuhp_is_ap_state(enum cpuhp_state state)253fcb3029aSArnd Bergmann static bool cpuhp_is_ap_state(enum cpuhp_state state)
254fcb3029aSArnd Bergmann {
255fcb3029aSArnd Bergmann /*
256fcb3029aSArnd Bergmann * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
257fcb3029aSArnd Bergmann * purposes as that state is handled explicitly in cpu_down.
258fcb3029aSArnd Bergmann */
259fcb3029aSArnd Bergmann return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
260fcb3029aSArnd Bergmann }
261fcb3029aSArnd Bergmann
wait_for_ap_thread(struct cpuhp_cpu_state * st,bool bringup)2625ebe7742SPeter Zijlstra static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
2635ebe7742SPeter Zijlstra {
2645ebe7742SPeter Zijlstra struct completion *done = bringup ? &st->done_up : &st->done_down;
2655ebe7742SPeter Zijlstra wait_for_completion(done);
2665ebe7742SPeter Zijlstra }
2675ebe7742SPeter Zijlstra
complete_ap_thread(struct cpuhp_cpu_state * st,bool bringup)2685ebe7742SPeter Zijlstra static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
2695ebe7742SPeter Zijlstra {
2705ebe7742SPeter Zijlstra struct completion *done = bringup ? &st->done_up : &st->done_down;
2715ebe7742SPeter Zijlstra complete(done);
2725ebe7742SPeter Zijlstra }
2735ebe7742SPeter Zijlstra
2745ebe7742SPeter Zijlstra /*
2755ebe7742SPeter Zijlstra * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
2765ebe7742SPeter Zijlstra */
cpuhp_is_atomic_state(enum cpuhp_state state)2775ebe7742SPeter Zijlstra static bool cpuhp_is_atomic_state(enum cpuhp_state state)
2785ebe7742SPeter Zijlstra {
2795ebe7742SPeter Zijlstra return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
2805ebe7742SPeter Zijlstra }
2815ebe7742SPeter Zijlstra
2826f062123SThomas Gleixner /* Synchronization state management */
2836f062123SThomas Gleixner enum cpuhp_sync_state {
2846f062123SThomas Gleixner SYNC_STATE_DEAD,
2856f062123SThomas Gleixner SYNC_STATE_KICKED,
2866f062123SThomas Gleixner SYNC_STATE_SHOULD_DIE,
2876f062123SThomas Gleixner SYNC_STATE_ALIVE,
2886f062123SThomas Gleixner SYNC_STATE_SHOULD_ONLINE,
2896f062123SThomas Gleixner SYNC_STATE_ONLINE,
2906f062123SThomas Gleixner };
2916f062123SThomas Gleixner
2926f062123SThomas Gleixner #ifdef CONFIG_HOTPLUG_CORE_SYNC
2936f062123SThomas Gleixner /**
2946f062123SThomas Gleixner * cpuhp_ap_update_sync_state - Update synchronization state during bringup/teardown
2956f062123SThomas Gleixner * @state: The synchronization state to set
2966f062123SThomas Gleixner *
2976f062123SThomas Gleixner * No synchronization point. Just update of the synchronization state, but implies
2986f062123SThomas Gleixner * a full barrier so that the AP changes are visible before the control CPU proceeds.
2996f062123SThomas Gleixner */
cpuhp_ap_update_sync_state(enum cpuhp_sync_state state)3006f062123SThomas Gleixner static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state)
3016f062123SThomas Gleixner {
3026f062123SThomas Gleixner atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state);
3036f062123SThomas Gleixner
3046f062123SThomas Gleixner (void)atomic_xchg(st, state);
3056f062123SThomas Gleixner }
3066f062123SThomas Gleixner
arch_cpuhp_sync_state_poll(void)3076f062123SThomas Gleixner void __weak arch_cpuhp_sync_state_poll(void) { cpu_relax(); }
3086f062123SThomas Gleixner
cpuhp_wait_for_sync_state(unsigned int cpu,enum cpuhp_sync_state state,enum cpuhp_sync_state next_state)3096f062123SThomas Gleixner static bool cpuhp_wait_for_sync_state(unsigned int cpu, enum cpuhp_sync_state state,
3106f062123SThomas Gleixner enum cpuhp_sync_state next_state)
3116f062123SThomas Gleixner {
3126f062123SThomas Gleixner atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
3136f062123SThomas Gleixner ktime_t now, end, start = ktime_get();
3146f062123SThomas Gleixner int sync;
3156f062123SThomas Gleixner
3166f062123SThomas Gleixner end = start + 10ULL * NSEC_PER_SEC;
3176f062123SThomas Gleixner
3186f062123SThomas Gleixner sync = atomic_read(st);
3196f062123SThomas Gleixner while (1) {
3206f062123SThomas Gleixner if (sync == state) {
3216f062123SThomas Gleixner if (!atomic_try_cmpxchg(st, &sync, next_state))
3226f062123SThomas Gleixner continue;
3236f062123SThomas Gleixner return true;
3246f062123SThomas Gleixner }
3256f062123SThomas Gleixner
3266f062123SThomas Gleixner now = ktime_get();
3276f062123SThomas Gleixner if (now > end) {
3286f062123SThomas Gleixner /* Timeout. Leave the state unchanged */
3296f062123SThomas Gleixner return false;
3306f062123SThomas Gleixner } else if (now - start < NSEC_PER_MSEC) {
3316f062123SThomas Gleixner /* Poll for one millisecond */
3326f062123SThomas Gleixner arch_cpuhp_sync_state_poll();
3336f062123SThomas Gleixner } else {
3346f062123SThomas Gleixner usleep_range_state(USEC_PER_MSEC, 2 * USEC_PER_MSEC, TASK_UNINTERRUPTIBLE);
3356f062123SThomas Gleixner }
3366f062123SThomas Gleixner sync = atomic_read(st);
3376f062123SThomas Gleixner }
3386f062123SThomas Gleixner return true;
3396f062123SThomas Gleixner }
3406f062123SThomas Gleixner #else /* CONFIG_HOTPLUG_CORE_SYNC */
cpuhp_ap_update_sync_state(enum cpuhp_sync_state state)3416f062123SThomas Gleixner static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state) { }
3426f062123SThomas Gleixner #endif /* !CONFIG_HOTPLUG_CORE_SYNC */
3436f062123SThomas Gleixner
3446f062123SThomas Gleixner #ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
3456f062123SThomas Gleixner /**
3466f062123SThomas Gleixner * cpuhp_ap_report_dead - Update synchronization state to DEAD
3476f062123SThomas Gleixner *
3486f062123SThomas Gleixner * No synchronization point. Just update of the synchronization state.
3496f062123SThomas Gleixner */
cpuhp_ap_report_dead(void)3506f062123SThomas Gleixner void cpuhp_ap_report_dead(void)
3516f062123SThomas Gleixner {
3526f062123SThomas Gleixner cpuhp_ap_update_sync_state(SYNC_STATE_DEAD);
3536f062123SThomas Gleixner }
3546f062123SThomas Gleixner
arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)3556f062123SThomas Gleixner void __weak arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { }
3566f062123SThomas Gleixner
3576f062123SThomas Gleixner /*
3586f062123SThomas Gleixner * Late CPU shutdown synchronization point. Cannot use cpuhp_state::done_down
3596f062123SThomas Gleixner * because the AP cannot issue complete() at this stage.
3606f062123SThomas Gleixner */
cpuhp_bp_sync_dead(unsigned int cpu)3616f062123SThomas Gleixner static void cpuhp_bp_sync_dead(unsigned int cpu)
3626f062123SThomas Gleixner {
3636f062123SThomas Gleixner atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
3646f062123SThomas Gleixner int sync = atomic_read(st);
3656f062123SThomas Gleixner
3666f062123SThomas Gleixner do {
3676f062123SThomas Gleixner /* CPU can have reported dead already. Don't overwrite that! */
3686f062123SThomas Gleixner if (sync == SYNC_STATE_DEAD)
3696f062123SThomas Gleixner break;
3706f062123SThomas Gleixner } while (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_SHOULD_DIE));
3716f062123SThomas Gleixner
3726f062123SThomas Gleixner if (cpuhp_wait_for_sync_state(cpu, SYNC_STATE_DEAD, SYNC_STATE_DEAD)) {
3736f062123SThomas Gleixner /* CPU reached dead state. Invoke the cleanup function */
3746f062123SThomas Gleixner arch_cpuhp_cleanup_dead_cpu(cpu);
3756f062123SThomas Gleixner return;
3766f062123SThomas Gleixner }
3776f062123SThomas Gleixner
3786f062123SThomas Gleixner /* No further action possible. Emit message and give up. */
3796f062123SThomas Gleixner pr_err("CPU%u failed to report dead state\n", cpu);
3806f062123SThomas Gleixner }
3816f062123SThomas Gleixner #else /* CONFIG_HOTPLUG_CORE_SYNC_DEAD */
cpuhp_bp_sync_dead(unsigned int cpu)3826f062123SThomas Gleixner static inline void cpuhp_bp_sync_dead(unsigned int cpu) { }
3836f062123SThomas Gleixner #endif /* !CONFIG_HOTPLUG_CORE_SYNC_DEAD */
3846f062123SThomas Gleixner
3856f062123SThomas Gleixner #ifdef CONFIG_HOTPLUG_CORE_SYNC_FULL
3866f062123SThomas Gleixner /**
3876f062123SThomas Gleixner * cpuhp_ap_sync_alive - Synchronize AP with the control CPU once it is alive
3886f062123SThomas Gleixner *
3896f062123SThomas Gleixner * Updates the AP synchronization state to SYNC_STATE_ALIVE and waits
3906f062123SThomas Gleixner * for the BP to release it.
3916f062123SThomas Gleixner */
cpuhp_ap_sync_alive(void)3926f062123SThomas Gleixner void cpuhp_ap_sync_alive(void)
3936f062123SThomas Gleixner {
3946f062123SThomas Gleixner atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state);
3956f062123SThomas Gleixner
3966f062123SThomas Gleixner cpuhp_ap_update_sync_state(SYNC_STATE_ALIVE);
3976f062123SThomas Gleixner
3986f062123SThomas Gleixner /* Wait for the control CPU to release it. */
3996f062123SThomas Gleixner while (atomic_read(st) != SYNC_STATE_SHOULD_ONLINE)
4006f062123SThomas Gleixner cpu_relax();
4016f062123SThomas Gleixner }
4026f062123SThomas Gleixner
cpuhp_can_boot_ap(unsigned int cpu)4036f062123SThomas Gleixner static bool cpuhp_can_boot_ap(unsigned int cpu)
4046f062123SThomas Gleixner {
4056f062123SThomas Gleixner atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
4066f062123SThomas Gleixner int sync = atomic_read(st);
4076f062123SThomas Gleixner
4086f062123SThomas Gleixner again:
4096f062123SThomas Gleixner switch (sync) {
4106f062123SThomas Gleixner case SYNC_STATE_DEAD:
4116f062123SThomas Gleixner /* CPU is properly dead */
4126f062123SThomas Gleixner break;
4136f062123SThomas Gleixner case SYNC_STATE_KICKED:
4146f062123SThomas Gleixner /* CPU did not come up in previous attempt */
4156f062123SThomas Gleixner break;
4166f062123SThomas Gleixner case SYNC_STATE_ALIVE:
4176f062123SThomas Gleixner /* CPU is stuck cpuhp_ap_sync_alive(). */
4186f062123SThomas Gleixner break;
4196f062123SThomas Gleixner default:
4206f062123SThomas Gleixner /* CPU failed to report online or dead and is in limbo state. */
4216f062123SThomas Gleixner return false;
4226f062123SThomas Gleixner }
4236f062123SThomas Gleixner
4246f062123SThomas Gleixner /* Prepare for booting */
4256f062123SThomas Gleixner if (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_KICKED))
4266f062123SThomas Gleixner goto again;
4276f062123SThomas Gleixner
4286f062123SThomas Gleixner return true;
4296f062123SThomas Gleixner }
4306f062123SThomas Gleixner
arch_cpuhp_cleanup_kick_cpu(unsigned int cpu)4316f062123SThomas Gleixner void __weak arch_cpuhp_cleanup_kick_cpu(unsigned int cpu) { }
4326f062123SThomas Gleixner
4336f062123SThomas Gleixner /*
4346f062123SThomas Gleixner * Early CPU bringup synchronization point. Cannot use cpuhp_state::done_up
4356f062123SThomas Gleixner * because the AP cannot issue complete() so early in the bringup.
4366f062123SThomas Gleixner */
cpuhp_bp_sync_alive(unsigned int cpu)4376f062123SThomas Gleixner static int cpuhp_bp_sync_alive(unsigned int cpu)
4386f062123SThomas Gleixner {
4396f062123SThomas Gleixner int ret = 0;
4406f062123SThomas Gleixner
4416f062123SThomas Gleixner if (!IS_ENABLED(CONFIG_HOTPLUG_CORE_SYNC_FULL))
4426f062123SThomas Gleixner return 0;
4436f062123SThomas Gleixner
4446f062123SThomas Gleixner if (!cpuhp_wait_for_sync_state(cpu, SYNC_STATE_ALIVE, SYNC_STATE_SHOULD_ONLINE)) {
4456f062123SThomas Gleixner pr_err("CPU%u failed to report alive state\n", cpu);
4466f062123SThomas Gleixner ret = -EIO;
4476f062123SThomas Gleixner }
4486f062123SThomas Gleixner
4496f062123SThomas Gleixner /* Let the architecture cleanup the kick alive mechanics. */
4506f062123SThomas Gleixner arch_cpuhp_cleanup_kick_cpu(cpu);
4516f062123SThomas Gleixner return ret;
4526f062123SThomas Gleixner }
4536f062123SThomas Gleixner #else /* CONFIG_HOTPLUG_CORE_SYNC_FULL */
cpuhp_bp_sync_alive(unsigned int cpu)4546f062123SThomas Gleixner static inline int cpuhp_bp_sync_alive(unsigned int cpu) { return 0; }
cpuhp_can_boot_ap(unsigned int cpu)4556f062123SThomas Gleixner static inline bool cpuhp_can_boot_ap(unsigned int cpu) { return true; }
4566f062123SThomas Gleixner #endif /* !CONFIG_HOTPLUG_CORE_SYNC_FULL */
4576f062123SThomas Gleixner
458b3199c02SRusty Russell /* Serializes the updates to cpu_online_mask, cpu_present_mask */
459aa953877SLinus Torvalds static DEFINE_MUTEX(cpu_add_remove_lock);
460090e77c3SThomas Gleixner bool cpuhp_tasks_frozen;
461090e77c3SThomas Gleixner EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
4621da177e4SLinus Torvalds
46379a6cdebSLai Jiangshan /*
46493ae4f97SSrivatsa S. Bhat * The following two APIs (cpu_maps_update_begin/done) must be used when
46593ae4f97SSrivatsa S. Bhat * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
46679a6cdebSLai Jiangshan */
cpu_maps_update_begin(void)46779a6cdebSLai Jiangshan void cpu_maps_update_begin(void)
46879a6cdebSLai Jiangshan {
46979a6cdebSLai Jiangshan mutex_lock(&cpu_add_remove_lock);
47079a6cdebSLai Jiangshan }
47179a6cdebSLai Jiangshan
cpu_maps_update_done(void)47279a6cdebSLai Jiangshan void cpu_maps_update_done(void)
47379a6cdebSLai Jiangshan {
47479a6cdebSLai Jiangshan mutex_unlock(&cpu_add_remove_lock);
47579a6cdebSLai Jiangshan }
4761da177e4SLinus Torvalds
477fc8dffd3SThomas Gleixner /*
478fc8dffd3SThomas Gleixner * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
479e3920fb4SRafael J. Wysocki * Should always be manipulated under cpu_add_remove_lock
480e3920fb4SRafael J. Wysocki */
481e3920fb4SRafael J. Wysocki static int cpu_hotplug_disabled;
482e3920fb4SRafael J. Wysocki
48379a6cdebSLai Jiangshan #ifdef CONFIG_HOTPLUG_CPU
48479a6cdebSLai Jiangshan
485fc8dffd3SThomas Gleixner DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
486a19423b9SGautham R. Shenoy
cpus_read_lock(void)4878f553c49SThomas Gleixner void cpus_read_lock(void)
488a9d9baa1SAshok Raj {
489fc8dffd3SThomas Gleixner percpu_down_read(&cpu_hotplug_lock);
490a9d9baa1SAshok Raj }
4918f553c49SThomas Gleixner EXPORT_SYMBOL_GPL(cpus_read_lock);
492a9d9baa1SAshok Raj
cpus_read_trylock(void)4936f4ceee9SWaiman Long int cpus_read_trylock(void)
4946f4ceee9SWaiman Long {
4956f4ceee9SWaiman Long return percpu_down_read_trylock(&cpu_hotplug_lock);
4966f4ceee9SWaiman Long }
4976f4ceee9SWaiman Long EXPORT_SYMBOL_GPL(cpus_read_trylock);
4986f4ceee9SWaiman Long
cpus_read_unlock(void)4998f553c49SThomas Gleixner void cpus_read_unlock(void)
500a9d9baa1SAshok Raj {
501fc8dffd3SThomas Gleixner percpu_up_read(&cpu_hotplug_lock);
502a9d9baa1SAshok Raj }
5038f553c49SThomas Gleixner EXPORT_SYMBOL_GPL(cpus_read_unlock);
504a9d9baa1SAshok Raj
cpus_write_lock(void)5058f553c49SThomas Gleixner void cpus_write_lock(void)
506d221938cSGautham R Shenoy {
507fc8dffd3SThomas Gleixner percpu_down_write(&cpu_hotplug_lock);
508d221938cSGautham R Shenoy }
509d221938cSGautham R Shenoy
cpus_write_unlock(void)5108f553c49SThomas Gleixner void cpus_write_unlock(void)
511d221938cSGautham R Shenoy {
512fc8dffd3SThomas Gleixner percpu_up_write(&cpu_hotplug_lock);
513fc8dffd3SThomas Gleixner }
514fc8dffd3SThomas Gleixner
lockdep_assert_cpus_held(void)515fc8dffd3SThomas Gleixner void lockdep_assert_cpus_held(void)
516fc8dffd3SThomas Gleixner {
517ce48c457SValentin Schneider /*
518ce48c457SValentin Schneider * We can't have hotplug operations before userspace starts running,
519ce48c457SValentin Schneider * and some init codepaths will knowingly not take the hotplug lock.
520ce48c457SValentin Schneider * This is all valid, so mute lockdep until it makes sense to report
521ce48c457SValentin Schneider * unheld locks.
522ce48c457SValentin Schneider */
523ce48c457SValentin Schneider if (system_state < SYSTEM_RUNNING)
524ce48c457SValentin Schneider return;
525ce48c457SValentin Schneider
526fc8dffd3SThomas Gleixner percpu_rwsem_assert_held(&cpu_hotplug_lock);
527d221938cSGautham R Shenoy }
52879a6cdebSLai Jiangshan
52943759fe5SFrederic Weisbecker #ifdef CONFIG_LOCKDEP
lockdep_is_cpus_held(void)53043759fe5SFrederic Weisbecker int lockdep_is_cpus_held(void)
53143759fe5SFrederic Weisbecker {
53243759fe5SFrederic Weisbecker return percpu_rwsem_is_held(&cpu_hotplug_lock);
53343759fe5SFrederic Weisbecker }
53443759fe5SFrederic Weisbecker #endif
53543759fe5SFrederic Weisbecker
lockdep_acquire_cpus_lock(void)536cb92173dSPeter Zijlstra static void lockdep_acquire_cpus_lock(void)
537cb92173dSPeter Zijlstra {
5381751060eSPeter Zijlstra rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
539cb92173dSPeter Zijlstra }
540cb92173dSPeter Zijlstra
lockdep_release_cpus_lock(void)541cb92173dSPeter Zijlstra static void lockdep_release_cpus_lock(void)
542cb92173dSPeter Zijlstra {
5431751060eSPeter Zijlstra rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
544cb92173dSPeter Zijlstra }
545cb92173dSPeter Zijlstra
54616e53dbfSSrivatsa S. Bhat /*
54716e53dbfSSrivatsa S. Bhat * Wait for currently running CPU hotplug operations to complete (if any) and
54816e53dbfSSrivatsa S. Bhat * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
54916e53dbfSSrivatsa S. Bhat * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
55016e53dbfSSrivatsa S. Bhat * hotplug path before performing hotplug operations. So acquiring that lock
55116e53dbfSSrivatsa S. Bhat * guarantees mutual exclusion from any currently running hotplug operations.
55216e53dbfSSrivatsa S. Bhat */
cpu_hotplug_disable(void)55316e53dbfSSrivatsa S. Bhat void cpu_hotplug_disable(void)
55416e53dbfSSrivatsa S. Bhat {
55516e53dbfSSrivatsa S. Bhat cpu_maps_update_begin();
55689af7ba5SVitaly Kuznetsov cpu_hotplug_disabled++;
55716e53dbfSSrivatsa S. Bhat cpu_maps_update_done();
55816e53dbfSSrivatsa S. Bhat }
55932145c46SVitaly Kuznetsov EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
56016e53dbfSSrivatsa S. Bhat
__cpu_hotplug_enable(void)56101b41159SLianwei Wang static void __cpu_hotplug_enable(void)
56201b41159SLianwei Wang {
56301b41159SLianwei Wang if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
56401b41159SLianwei Wang return;
56501b41159SLianwei Wang cpu_hotplug_disabled--;
56601b41159SLianwei Wang }
56701b41159SLianwei Wang
cpu_hotplug_enable(void)56816e53dbfSSrivatsa S. Bhat void cpu_hotplug_enable(void)
56916e53dbfSSrivatsa S. Bhat {
57016e53dbfSSrivatsa S. Bhat cpu_maps_update_begin();
57101b41159SLianwei Wang __cpu_hotplug_enable();
57216e53dbfSSrivatsa S. Bhat cpu_maps_update_done();
57316e53dbfSSrivatsa S. Bhat }
57432145c46SVitaly Kuznetsov EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
575cb92173dSPeter Zijlstra
576cb92173dSPeter Zijlstra #else
577cb92173dSPeter Zijlstra
lockdep_acquire_cpus_lock(void)578cb92173dSPeter Zijlstra static void lockdep_acquire_cpus_lock(void)
579cb92173dSPeter Zijlstra {
580cb92173dSPeter Zijlstra }
581cb92173dSPeter Zijlstra
lockdep_release_cpus_lock(void)582cb92173dSPeter Zijlstra static void lockdep_release_cpus_lock(void)
583cb92173dSPeter Zijlstra {
584cb92173dSPeter Zijlstra }
585cb92173dSPeter Zijlstra
586b9d10be7SToshi Kani #endif /* CONFIG_HOTPLUG_CPU */
58779a6cdebSLai Jiangshan
588a74cfffbSThomas Gleixner /*
589a74cfffbSThomas Gleixner * Architectures that need SMT-specific errata handling during SMT hotplug
590a74cfffbSThomas Gleixner * should override this.
591a74cfffbSThomas Gleixner */
arch_smt_update(void)592a74cfffbSThomas Gleixner void __weak arch_smt_update(void) { }
593a74cfffbSThomas Gleixner
5940cc3cd21SThomas Gleixner #ifdef CONFIG_HOTPLUG_SMT
5953f916919SMichael Ellerman
5960cc3cd21SThomas Gleixner enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
597447ae4acSMichael Ellerman static unsigned int cpu_smt_max_threads __ro_after_init;
598447ae4acSMichael Ellerman unsigned int cpu_smt_num_threads __read_mostly = UINT_MAX;
599bc2d8d26SThomas Gleixner
cpu_smt_disable(bool force)6008e1b706bSJiri Kosina void __init cpu_smt_disable(bool force)
6010cc3cd21SThomas Gleixner {
602e1572f1dSVitaly Kuznetsov if (!cpu_smt_possible())
6038e1b706bSJiri Kosina return;
6048e1b706bSJiri Kosina
6058e1b706bSJiri Kosina if (force) {
6060cc3cd21SThomas Gleixner pr_info("SMT: Force disabled\n");
6070cc3cd21SThomas Gleixner cpu_smt_control = CPU_SMT_FORCE_DISABLED;
6088e1b706bSJiri Kosina } else {
609d0e7d144SBorislav Petkov pr_info("SMT: disabled\n");
6108e1b706bSJiri Kosina cpu_smt_control = CPU_SMT_DISABLED;
6110cc3cd21SThomas Gleixner }
612447ae4acSMichael Ellerman cpu_smt_num_threads = 1;
6138e1b706bSJiri Kosina }
6148e1b706bSJiri Kosina
615fee0aedeSThomas Gleixner /*
616fee0aedeSThomas Gleixner * The decision whether SMT is supported can only be done after the full
617b284909aSJosh Poimboeuf * CPU identification. Called from architecture code.
618fee0aedeSThomas Gleixner */
cpu_smt_set_num_threads(unsigned int num_threads,unsigned int max_threads)619447ae4acSMichael Ellerman void __init cpu_smt_set_num_threads(unsigned int num_threads,
620447ae4acSMichael Ellerman unsigned int max_threads)
621fee0aedeSThomas Gleixner {
622447ae4acSMichael Ellerman WARN_ON(!num_threads || (num_threads > max_threads));
623447ae4acSMichael Ellerman
62491b4a7dbSLaurent Dufour if (max_threads == 1)
625fee0aedeSThomas Gleixner cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
626447ae4acSMichael Ellerman
627447ae4acSMichael Ellerman cpu_smt_max_threads = max_threads;
628447ae4acSMichael Ellerman
629447ae4acSMichael Ellerman /*
630447ae4acSMichael Ellerman * If SMT has been disabled via the kernel command line or SMT is
631447ae4acSMichael Ellerman * not supported, set cpu_smt_num_threads to 1 for consistency.
632447ae4acSMichael Ellerman * If enabled, take the architecture requested number of threads
633447ae4acSMichael Ellerman * to bring up into account.
634447ae4acSMichael Ellerman */
635447ae4acSMichael Ellerman if (cpu_smt_control != CPU_SMT_ENABLED)
636447ae4acSMichael Ellerman cpu_smt_num_threads = 1;
637447ae4acSMichael Ellerman else if (num_threads < cpu_smt_num_threads)
638447ae4acSMichael Ellerman cpu_smt_num_threads = num_threads;
639fee0aedeSThomas Gleixner }
640fee0aedeSThomas Gleixner
smt_cmdline_disable(char * str)6418e1b706bSJiri Kosina static int __init smt_cmdline_disable(char *str)
6428e1b706bSJiri Kosina {
6438e1b706bSJiri Kosina cpu_smt_disable(str && !strcmp(str, "force"));
6440cc3cd21SThomas Gleixner return 0;
6450cc3cd21SThomas Gleixner }
6460cc3cd21SThomas Gleixner early_param("nosmt", smt_cmdline_disable);
6470cc3cd21SThomas Gleixner
64838253464SMichael Ellerman /*
64938253464SMichael Ellerman * For Archicture supporting partial SMT states check if the thread is allowed.
65038253464SMichael Ellerman * Otherwise this has already been checked through cpu_smt_max_threads when
65138253464SMichael Ellerman * setting the SMT level.
65238253464SMichael Ellerman */
cpu_smt_thread_allowed(unsigned int cpu)65338253464SMichael Ellerman static inline bool cpu_smt_thread_allowed(unsigned int cpu)
65438253464SMichael Ellerman {
65538253464SMichael Ellerman #ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC
65638253464SMichael Ellerman return topology_smt_thread_allowed(cpu);
65738253464SMichael Ellerman #else
65838253464SMichael Ellerman return true;
65938253464SMichael Ellerman #endif
66038253464SMichael Ellerman }
66138253464SMichael Ellerman
cpu_bootable(unsigned int cpu)66260edbe8eSThomas Gleixner static inline bool cpu_bootable(unsigned int cpu)
6630cc3cd21SThomas Gleixner {
66438253464SMichael Ellerman if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
6650cc3cd21SThomas Gleixner return true;
6660cc3cd21SThomas Gleixner
66760edbe8eSThomas Gleixner /* All CPUs are bootable if controls are not configured */
66860edbe8eSThomas Gleixner if (cpu_smt_control == CPU_SMT_NOT_IMPLEMENTED)
66960edbe8eSThomas Gleixner return true;
67060edbe8eSThomas Gleixner
67160edbe8eSThomas Gleixner /* All CPUs are bootable if CPU is not SMT capable */
67260edbe8eSThomas Gleixner if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
67360edbe8eSThomas Gleixner return true;
67460edbe8eSThomas Gleixner
675b284909aSJosh Poimboeuf if (topology_is_primary_thread(cpu))
6760cc3cd21SThomas Gleixner return true;
6770cc3cd21SThomas Gleixner
6780cc3cd21SThomas Gleixner /*
6790cc3cd21SThomas Gleixner * On x86 it's required to boot all logical CPUs at least once so
6800cc3cd21SThomas Gleixner * that the init code can get a chance to set CR4.MCE on each
681182e073fSEthon Paul * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
6820cc3cd21SThomas Gleixner * core will shutdown the machine.
6830cc3cd21SThomas Gleixner */
684e797bda3SThomas Gleixner return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
6850cc3cd21SThomas Gleixner }
686e1572f1dSVitaly Kuznetsov
68752b38b7aSZhang Rui /* Returns true if SMT is supported and not forcefully (irreversibly) disabled */
cpu_smt_possible(void)688e1572f1dSVitaly Kuznetsov bool cpu_smt_possible(void)
689e1572f1dSVitaly Kuznetsov {
690e1572f1dSVitaly Kuznetsov return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
691e1572f1dSVitaly Kuznetsov cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
692e1572f1dSVitaly Kuznetsov }
693e1572f1dSVitaly Kuznetsov EXPORT_SYMBOL_GPL(cpu_smt_possible);
69418415f33SThomas Gleixner
6950cc3cd21SThomas Gleixner #else
cpu_bootable(unsigned int cpu)69660edbe8eSThomas Gleixner static inline bool cpu_bootable(unsigned int cpu) { return true; }
6970cc3cd21SThomas Gleixner #endif
6980cc3cd21SThomas Gleixner
6994dddfb5fSPeter Zijlstra static inline enum cpuhp_state
cpuhp_set_state(int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)700b7ba6d8dSSteven Price cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
7014dddfb5fSPeter Zijlstra {
7024dddfb5fSPeter Zijlstra enum cpuhp_state prev_state = st->state;
7032ea46c6fSPeter Zijlstra bool bringup = st->state < target;
7044dddfb5fSPeter Zijlstra
7054dddfb5fSPeter Zijlstra st->rollback = false;
7064dddfb5fSPeter Zijlstra st->last = NULL;
7074dddfb5fSPeter Zijlstra
7084dddfb5fSPeter Zijlstra st->target = target;
7094dddfb5fSPeter Zijlstra st->single = false;
7102ea46c6fSPeter Zijlstra st->bringup = bringup;
711b7ba6d8dSSteven Price if (cpu_dying(cpu) != !bringup)
712b7ba6d8dSSteven Price set_cpu_dying(cpu, !bringup);
7134dddfb5fSPeter Zijlstra
7144dddfb5fSPeter Zijlstra return prev_state;
7154dddfb5fSPeter Zijlstra }
7164dddfb5fSPeter Zijlstra
7174dddfb5fSPeter Zijlstra static inline void
cpuhp_reset_state(int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state prev_state)718b7ba6d8dSSteven Price cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
719b7ba6d8dSSteven Price enum cpuhp_state prev_state)
7204dddfb5fSPeter Zijlstra {
7212ea46c6fSPeter Zijlstra bool bringup = !st->bringup;
7222ea46c6fSPeter Zijlstra
723453e4108SVincent Donnefort st->target = prev_state;
724453e4108SVincent Donnefort
725453e4108SVincent Donnefort /*
726453e4108SVincent Donnefort * Already rolling back. No need invert the bringup value or to change
727453e4108SVincent Donnefort * the current state.
728453e4108SVincent Donnefort */
729453e4108SVincent Donnefort if (st->rollback)
730453e4108SVincent Donnefort return;
731453e4108SVincent Donnefort
7324dddfb5fSPeter Zijlstra st->rollback = true;
7334dddfb5fSPeter Zijlstra
7344dddfb5fSPeter Zijlstra /*
7354dddfb5fSPeter Zijlstra * If we have st->last we need to undo partial multi_instance of this
7364dddfb5fSPeter Zijlstra * state first. Otherwise start undo at the previous state.
7374dddfb5fSPeter Zijlstra */
7384dddfb5fSPeter Zijlstra if (!st->last) {
7394dddfb5fSPeter Zijlstra if (st->bringup)
7404dddfb5fSPeter Zijlstra st->state--;
7414dddfb5fSPeter Zijlstra else
7424dddfb5fSPeter Zijlstra st->state++;
7434dddfb5fSPeter Zijlstra }
7444dddfb5fSPeter Zijlstra
7452ea46c6fSPeter Zijlstra st->bringup = bringup;
746b7ba6d8dSSteven Price if (cpu_dying(cpu) != !bringup)
747b7ba6d8dSSteven Price set_cpu_dying(cpu, !bringup);
7484dddfb5fSPeter Zijlstra }
7494dddfb5fSPeter Zijlstra
7504dddfb5fSPeter Zijlstra /* Regular hotplug invocation of the AP hotplug thread */
__cpuhp_kick_ap(struct cpuhp_cpu_state * st)7514dddfb5fSPeter Zijlstra static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
7524dddfb5fSPeter Zijlstra {
7534dddfb5fSPeter Zijlstra if (!st->single && st->state == st->target)
7544dddfb5fSPeter Zijlstra return;
7554dddfb5fSPeter Zijlstra
7564dddfb5fSPeter Zijlstra st->result = 0;
7574dddfb5fSPeter Zijlstra /*
7584dddfb5fSPeter Zijlstra * Make sure the above stores are visible before should_run becomes
7594dddfb5fSPeter Zijlstra * true. Paired with the mb() above in cpuhp_thread_fun()
7604dddfb5fSPeter Zijlstra */
7614dddfb5fSPeter Zijlstra smp_mb();
7624dddfb5fSPeter Zijlstra st->should_run = true;
7634dddfb5fSPeter Zijlstra wake_up_process(st->thread);
7645ebe7742SPeter Zijlstra wait_for_ap_thread(st, st->bringup);
7654dddfb5fSPeter Zijlstra }
7664dddfb5fSPeter Zijlstra
cpuhp_kick_ap(int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)767b7ba6d8dSSteven Price static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
768b7ba6d8dSSteven Price enum cpuhp_state target)
7694dddfb5fSPeter Zijlstra {
7704dddfb5fSPeter Zijlstra enum cpuhp_state prev_state;
7714dddfb5fSPeter Zijlstra int ret;
7724dddfb5fSPeter Zijlstra
773b7ba6d8dSSteven Price prev_state = cpuhp_set_state(cpu, st, target);
7744dddfb5fSPeter Zijlstra __cpuhp_kick_ap(st);
7754dddfb5fSPeter Zijlstra if ((ret = st->result)) {
776b7ba6d8dSSteven Price cpuhp_reset_state(cpu, st, prev_state);
7774dddfb5fSPeter Zijlstra __cpuhp_kick_ap(st);
7784dddfb5fSPeter Zijlstra }
7794dddfb5fSPeter Zijlstra
7804dddfb5fSPeter Zijlstra return ret;
7814dddfb5fSPeter Zijlstra }
7829cd4f1a4SThomas Gleixner
bringup_wait_for_ap_online(unsigned int cpu)78322b612e2SThomas Gleixner static int bringup_wait_for_ap_online(unsigned int cpu)
7848df3e07eSThomas Gleixner {
7858df3e07eSThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
7868df3e07eSThomas Gleixner
7879cd4f1a4SThomas Gleixner /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
7885ebe7742SPeter Zijlstra wait_for_ap_thread(st, true);
789dea1d0f5SThomas Gleixner if (WARN_ON_ONCE((!cpu_online(cpu))))
790dea1d0f5SThomas Gleixner return -ECANCELED;
7919cd4f1a4SThomas Gleixner
79245178ac0SPeter Zijlstra /* Unpark the hotplug thread of the target cpu */
7939cd4f1a4SThomas Gleixner kthread_unpark(st->thread);
7949cd4f1a4SThomas Gleixner
7950cc3cd21SThomas Gleixner /*
7960cc3cd21SThomas Gleixner * SMT soft disabling on X86 requires to bring the CPU out of the
7970cc3cd21SThomas Gleixner * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
798f5602011SJiri Kosina * CPU marked itself as booted_once in notify_cpu_starting() so the
79960edbe8eSThomas Gleixner * cpu_bootable() check will now return false if this is not the
8000cc3cd21SThomas Gleixner * primary sibling.
8010cc3cd21SThomas Gleixner */
80260edbe8eSThomas Gleixner if (!cpu_bootable(cpu))
8030cc3cd21SThomas Gleixner return -ECANCELED;
8044dddfb5fSPeter Zijlstra return 0;
8058df3e07eSThomas Gleixner }
8068df3e07eSThomas Gleixner
807a631be92SThomas Gleixner #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP
cpuhp_kick_ap_alive(unsigned int cpu)808a631be92SThomas Gleixner static int cpuhp_kick_ap_alive(unsigned int cpu)
809a631be92SThomas Gleixner {
810a631be92SThomas Gleixner if (!cpuhp_can_boot_ap(cpu))
811a631be92SThomas Gleixner return -EAGAIN;
812a631be92SThomas Gleixner
813a631be92SThomas Gleixner return arch_cpuhp_kick_ap_alive(cpu, idle_thread_get(cpu));
814a631be92SThomas Gleixner }
815a631be92SThomas Gleixner
cpuhp_bringup_ap(unsigned int cpu)816a631be92SThomas Gleixner static int cpuhp_bringup_ap(unsigned int cpu)
817a631be92SThomas Gleixner {
818a631be92SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
819a631be92SThomas Gleixner int ret;
820a631be92SThomas Gleixner
821a631be92SThomas Gleixner /*
822a631be92SThomas Gleixner * Some architectures have to walk the irq descriptors to
823a631be92SThomas Gleixner * setup the vector space for the cpu which comes online.
824a631be92SThomas Gleixner * Prevent irq alloc/free across the bringup.
825a631be92SThomas Gleixner */
826a631be92SThomas Gleixner irq_lock_sparse();
827a631be92SThomas Gleixner
828a631be92SThomas Gleixner ret = cpuhp_bp_sync_alive(cpu);
829a631be92SThomas Gleixner if (ret)
830a631be92SThomas Gleixner goto out_unlock;
831a631be92SThomas Gleixner
832a631be92SThomas Gleixner ret = bringup_wait_for_ap_online(cpu);
833a631be92SThomas Gleixner if (ret)
834a631be92SThomas Gleixner goto out_unlock;
835a631be92SThomas Gleixner
836a631be92SThomas Gleixner irq_unlock_sparse();
837a631be92SThomas Gleixner
838a631be92SThomas Gleixner if (st->target <= CPUHP_AP_ONLINE_IDLE)
839a631be92SThomas Gleixner return 0;
840a631be92SThomas Gleixner
841a631be92SThomas Gleixner return cpuhp_kick_ap(cpu, st, st->target);
842a631be92SThomas Gleixner
843a631be92SThomas Gleixner out_unlock:
844a631be92SThomas Gleixner irq_unlock_sparse();
845a631be92SThomas Gleixner return ret;
846a631be92SThomas Gleixner }
847a631be92SThomas Gleixner #else
bringup_cpu(unsigned int cpu)848ba997462SThomas Gleixner static int bringup_cpu(unsigned int cpu)
849ba997462SThomas Gleixner {
85022b612e2SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
851ba997462SThomas Gleixner struct task_struct *idle = idle_thread_get(cpu);
852ba997462SThomas Gleixner int ret;
853ba997462SThomas Gleixner
8546f062123SThomas Gleixner if (!cpuhp_can_boot_ap(cpu))
8556f062123SThomas Gleixner return -EAGAIN;
8566f062123SThomas Gleixner
857aa877175SBoris Ostrovsky /*
858aa877175SBoris Ostrovsky * Some architectures have to walk the irq descriptors to
859aa877175SBoris Ostrovsky * setup the vector space for the cpu which comes online.
86022b612e2SThomas Gleixner *
86122b612e2SThomas Gleixner * Prevent irq alloc/free across the bringup by acquiring the
86222b612e2SThomas Gleixner * sparse irq lock. Hold it until the upcoming CPU completes the
86322b612e2SThomas Gleixner * startup in cpuhp_online_idle() which allows to avoid
86422b612e2SThomas Gleixner * intermediate synchronization points in the architecture code.
865aa877175SBoris Ostrovsky */
866aa877175SBoris Ostrovsky irq_lock_sparse();
867aa877175SBoris Ostrovsky
868ba997462SThomas Gleixner ret = __cpu_up(cpu, idle);
869530e9b76SThomas Gleixner if (ret)
87022b612e2SThomas Gleixner goto out_unlock;
87122b612e2SThomas Gleixner
8726f062123SThomas Gleixner ret = cpuhp_bp_sync_alive(cpu);
8736f062123SThomas Gleixner if (ret)
8746f062123SThomas Gleixner goto out_unlock;
8756f062123SThomas Gleixner
87622b612e2SThomas Gleixner ret = bringup_wait_for_ap_online(cpu);
87722b612e2SThomas Gleixner if (ret)
87822b612e2SThomas Gleixner goto out_unlock;
87922b612e2SThomas Gleixner
88022b612e2SThomas Gleixner irq_unlock_sparse();
88122b612e2SThomas Gleixner
88222b612e2SThomas Gleixner if (st->target <= CPUHP_AP_ONLINE_IDLE)
88322b612e2SThomas Gleixner return 0;
88422b612e2SThomas Gleixner
88522b612e2SThomas Gleixner return cpuhp_kick_ap(cpu, st, st->target);
88622b612e2SThomas Gleixner
88722b612e2SThomas Gleixner out_unlock:
88822b612e2SThomas Gleixner irq_unlock_sparse();
889ba997462SThomas Gleixner return ret;
890ba997462SThomas Gleixner }
891a631be92SThomas Gleixner #endif
892ba997462SThomas Gleixner
finish_cpu(unsigned int cpu)893bf2c59fcSPeter Zijlstra static int finish_cpu(unsigned int cpu)
894bf2c59fcSPeter Zijlstra {
895bf2c59fcSPeter Zijlstra struct task_struct *idle = idle_thread_get(cpu);
896bf2c59fcSPeter Zijlstra struct mm_struct *mm = idle->active_mm;
897bf2c59fcSPeter Zijlstra
898bf2c59fcSPeter Zijlstra /*
899bf2c59fcSPeter Zijlstra * idle_task_exit() will have switched to &init_mm, now
900bf2c59fcSPeter Zijlstra * clean up any remaining active_mm state.
901bf2c59fcSPeter Zijlstra */
902bf2c59fcSPeter Zijlstra if (mm != &init_mm)
903bf2c59fcSPeter Zijlstra idle->active_mm = &init_mm;
904aa464ba9SNicholas Piggin mmdrop_lazy_tlb(mm);
905bf2c59fcSPeter Zijlstra return 0;
906bf2c59fcSPeter Zijlstra }
907bf2c59fcSPeter Zijlstra
9082e1a3483SThomas Gleixner /*
9092e1a3483SThomas Gleixner * Hotplug state machine related functions
9102e1a3483SThomas Gleixner */
9112e1a3483SThomas Gleixner
912453e4108SVincent Donnefort /*
913453e4108SVincent Donnefort * Get the next state to run. Empty ones will be skipped. Returns true if a
914453e4108SVincent Donnefort * state must be run.
915453e4108SVincent Donnefort *
916453e4108SVincent Donnefort * st->state will be modified ahead of time, to match state_to_run, as if it
917453e4108SVincent Donnefort * has already ran.
918453e4108SVincent Donnefort */
cpuhp_next_state(bool bringup,enum cpuhp_state * state_to_run,struct cpuhp_cpu_state * st,enum cpuhp_state target)919453e4108SVincent Donnefort static bool cpuhp_next_state(bool bringup,
920453e4108SVincent Donnefort enum cpuhp_state *state_to_run,
921453e4108SVincent Donnefort struct cpuhp_cpu_state *st,
922453e4108SVincent Donnefort enum cpuhp_state target)
9232e1a3483SThomas Gleixner {
924453e4108SVincent Donnefort do {
925453e4108SVincent Donnefort if (bringup) {
926453e4108SVincent Donnefort if (st->state >= target)
927453e4108SVincent Donnefort return false;
928453e4108SVincent Donnefort
929453e4108SVincent Donnefort *state_to_run = ++st->state;
930453e4108SVincent Donnefort } else {
931453e4108SVincent Donnefort if (st->state <= target)
932453e4108SVincent Donnefort return false;
933453e4108SVincent Donnefort
934453e4108SVincent Donnefort *state_to_run = st->state--;
935453e4108SVincent Donnefort }
936453e4108SVincent Donnefort
937453e4108SVincent Donnefort if (!cpuhp_step_empty(bringup, cpuhp_get_step(*state_to_run)))
938453e4108SVincent Donnefort break;
939453e4108SVincent Donnefort } while (true);
940453e4108SVincent Donnefort
941453e4108SVincent Donnefort return true;
942453e4108SVincent Donnefort }
943453e4108SVincent Donnefort
__cpuhp_invoke_callback_range(bool bringup,unsigned int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target,bool nofail)9446f855b39SVincent Donnefort static int __cpuhp_invoke_callback_range(bool bringup,
9456f855b39SVincent Donnefort unsigned int cpu,
9466f855b39SVincent Donnefort struct cpuhp_cpu_state *st,
9476f855b39SVincent Donnefort enum cpuhp_state target,
9486f855b39SVincent Donnefort bool nofail)
9496f855b39SVincent Donnefort {
9506f855b39SVincent Donnefort enum cpuhp_state state;
9516f855b39SVincent Donnefort int ret = 0;
9526f855b39SVincent Donnefort
9536f855b39SVincent Donnefort while (cpuhp_next_state(bringup, &state, st, target)) {
9546f855b39SVincent Donnefort int err;
9556f855b39SVincent Donnefort
9566f855b39SVincent Donnefort err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
9576f855b39SVincent Donnefort if (!err)
9586f855b39SVincent Donnefort continue;
9596f855b39SVincent Donnefort
9606f855b39SVincent Donnefort if (nofail) {
9616f855b39SVincent Donnefort pr_warn("CPU %u %s state %s (%d) failed (%d)\n",
9626f855b39SVincent Donnefort cpu, bringup ? "UP" : "DOWN",
9636f855b39SVincent Donnefort cpuhp_get_step(st->state)->name,
9646f855b39SVincent Donnefort st->state, err);
9656f855b39SVincent Donnefort ret = -1;
9666f855b39SVincent Donnefort } else {
9676f855b39SVincent Donnefort ret = err;
9686f855b39SVincent Donnefort break;
9696f855b39SVincent Donnefort }
9706f855b39SVincent Donnefort }
9716f855b39SVincent Donnefort
9726f855b39SVincent Donnefort return ret;
9736f855b39SVincent Donnefort }
9746f855b39SVincent Donnefort
cpuhp_invoke_callback_range(bool bringup,unsigned int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)9756f855b39SVincent Donnefort static inline int cpuhp_invoke_callback_range(bool bringup,
976453e4108SVincent Donnefort unsigned int cpu,
977453e4108SVincent Donnefort struct cpuhp_cpu_state *st,
978453e4108SVincent Donnefort enum cpuhp_state target)
979453e4108SVincent Donnefort {
9806f855b39SVincent Donnefort return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false);
981453e4108SVincent Donnefort }
982453e4108SVincent Donnefort
cpuhp_invoke_callback_range_nofail(bool bringup,unsigned int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)9836f855b39SVincent Donnefort static inline void cpuhp_invoke_callback_range_nofail(bool bringup,
9846f855b39SVincent Donnefort unsigned int cpu,
9856f855b39SVincent Donnefort struct cpuhp_cpu_state *st,
9866f855b39SVincent Donnefort enum cpuhp_state target)
9876f855b39SVincent Donnefort {
9886f855b39SVincent Donnefort __cpuhp_invoke_callback_range(bringup, cpu, st, target, true);
9892e1a3483SThomas Gleixner }
9902e1a3483SThomas Gleixner
can_rollback_cpu(struct cpuhp_cpu_state * st)991206b9235SThomas Gleixner static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
992206b9235SThomas Gleixner {
993206b9235SThomas Gleixner if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
994206b9235SThomas Gleixner return true;
995206b9235SThomas Gleixner /*
996206b9235SThomas Gleixner * When CPU hotplug is disabled, then taking the CPU down is not
997206b9235SThomas Gleixner * possible because takedown_cpu() and the architecture and
998206b9235SThomas Gleixner * subsystem specific mechanisms are not available. So the CPU
999206b9235SThomas Gleixner * which would be completely unplugged again needs to stay around
1000206b9235SThomas Gleixner * in the current state.
1001206b9235SThomas Gleixner */
1002206b9235SThomas Gleixner return st->state <= CPUHP_BRINGUP_CPU;
1003206b9235SThomas Gleixner }
1004206b9235SThomas Gleixner
cpuhp_up_callbacks(unsigned int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)10052e1a3483SThomas Gleixner static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1006a724632cSThomas Gleixner enum cpuhp_state target)
10072e1a3483SThomas Gleixner {
10082e1a3483SThomas Gleixner enum cpuhp_state prev_state = st->state;
10092e1a3483SThomas Gleixner int ret = 0;
10102e1a3483SThomas Gleixner
1011453e4108SVincent Donnefort ret = cpuhp_invoke_callback_range(true, cpu, st, target);
10122e1a3483SThomas Gleixner if (ret) {
1013ebca71a8SDongli Zhang pr_debug("CPU UP failed (%d) CPU %u state %s (%d)\n",
1014ebca71a8SDongli Zhang ret, cpu, cpuhp_get_step(st->state)->name,
1015ebca71a8SDongli Zhang st->state);
1016ebca71a8SDongli Zhang
1017b7ba6d8dSSteven Price cpuhp_reset_state(cpu, st, prev_state);
1018453e4108SVincent Donnefort if (can_rollback_cpu(st))
1019453e4108SVincent Donnefort WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
1020453e4108SVincent Donnefort prev_state));
10212e1a3483SThomas Gleixner }
10222e1a3483SThomas Gleixner return ret;
10232e1a3483SThomas Gleixner }
10242e1a3483SThomas Gleixner
10254cb28cedSThomas Gleixner /*
10264cb28cedSThomas Gleixner * The cpu hotplug threads manage the bringup and teardown of the cpus
10274cb28cedSThomas Gleixner */
cpuhp_should_run(unsigned int cpu)10284cb28cedSThomas Gleixner static int cpuhp_should_run(unsigned int cpu)
10294cb28cedSThomas Gleixner {
10304cb28cedSThomas Gleixner struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
10314cb28cedSThomas Gleixner
10324cb28cedSThomas Gleixner return st->should_run;
10334cb28cedSThomas Gleixner }
10344cb28cedSThomas Gleixner
10354cb28cedSThomas Gleixner /*
10364cb28cedSThomas Gleixner * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
10374cb28cedSThomas Gleixner * callbacks when a state gets [un]installed at runtime.
10384dddfb5fSPeter Zijlstra *
10394dddfb5fSPeter Zijlstra * Each invocation of this function by the smpboot thread does a single AP
10404dddfb5fSPeter Zijlstra * state callback.
10414dddfb5fSPeter Zijlstra *
10424dddfb5fSPeter Zijlstra * It has 3 modes of operation:
10434dddfb5fSPeter Zijlstra * - single: runs st->cb_state
10444dddfb5fSPeter Zijlstra * - up: runs ++st->state, while st->state < st->target
10454dddfb5fSPeter Zijlstra * - down: runs st->state--, while st->state > st->target
10464dddfb5fSPeter Zijlstra *
10474dddfb5fSPeter Zijlstra * When complete or on error, should_run is cleared and the completion is fired.
10484cb28cedSThomas Gleixner */
cpuhp_thread_fun(unsigned int cpu)10494cb28cedSThomas Gleixner static void cpuhp_thread_fun(unsigned int cpu)
10504cb28cedSThomas Gleixner {
10514cb28cedSThomas Gleixner struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
10524dddfb5fSPeter Zijlstra bool bringup = st->bringup;
10534dddfb5fSPeter Zijlstra enum cpuhp_state state;
10544cb28cedSThomas Gleixner
1055f8b7530aSNeeraj Upadhyay if (WARN_ON_ONCE(!st->should_run))
1056f8b7530aSNeeraj Upadhyay return;
1057f8b7530aSNeeraj Upadhyay
10584cb28cedSThomas Gleixner /*
10594dddfb5fSPeter Zijlstra * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
10604dddfb5fSPeter Zijlstra * that if we see ->should_run we also see the rest of the state.
10614cb28cedSThomas Gleixner */
10624cb28cedSThomas Gleixner smp_mb();
10634dddfb5fSPeter Zijlstra
1064cb92173dSPeter Zijlstra /*
1065cb92173dSPeter Zijlstra * The BP holds the hotplug lock, but we're now running on the AP,
1066cb92173dSPeter Zijlstra * ensure that anybody asserting the lock is held, will actually find
1067cb92173dSPeter Zijlstra * it so.
1068cb92173dSPeter Zijlstra */
1069cb92173dSPeter Zijlstra lockdep_acquire_cpus_lock();
10705f4b55e1SPeter Zijlstra cpuhp_lock_acquire(bringup);
10714dddfb5fSPeter Zijlstra
1072a724632cSThomas Gleixner if (st->single) {
10734dddfb5fSPeter Zijlstra state = st->cb_state;
10744dddfb5fSPeter Zijlstra st->should_run = false;
10754dddfb5fSPeter Zijlstra } else {
1076453e4108SVincent Donnefort st->should_run = cpuhp_next_state(bringup, &state, st, st->target);
1077453e4108SVincent Donnefort if (!st->should_run)
1078453e4108SVincent Donnefort goto end;
10794dddfb5fSPeter Zijlstra }
10804dddfb5fSPeter Zijlstra
10814dddfb5fSPeter Zijlstra WARN_ON_ONCE(!cpuhp_is_ap_state(state));
10824dddfb5fSPeter Zijlstra
10834dddfb5fSPeter Zijlstra if (cpuhp_is_atomic_state(state)) {
10844cb28cedSThomas Gleixner local_irq_disable();
10854dddfb5fSPeter Zijlstra st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
10864cb28cedSThomas Gleixner local_irq_enable();
10873b9d6da6SSebastian Andrzej Siewior
10884dddfb5fSPeter Zijlstra /*
10894dddfb5fSPeter Zijlstra * STARTING/DYING must not fail!
10904dddfb5fSPeter Zijlstra */
10914dddfb5fSPeter Zijlstra WARN_ON_ONCE(st->result);
10924cb28cedSThomas Gleixner } else {
10934dddfb5fSPeter Zijlstra st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
10944cb28cedSThomas Gleixner }
10954dddfb5fSPeter Zijlstra
10964dddfb5fSPeter Zijlstra if (st->result) {
10974dddfb5fSPeter Zijlstra /*
10984dddfb5fSPeter Zijlstra * If we fail on a rollback, we're up a creek without no
10994dddfb5fSPeter Zijlstra * paddle, no way forward, no way back. We loose, thanks for
11004dddfb5fSPeter Zijlstra * playing.
11014dddfb5fSPeter Zijlstra */
11024dddfb5fSPeter Zijlstra WARN_ON_ONCE(st->rollback);
11034dddfb5fSPeter Zijlstra st->should_run = false;
11044dddfb5fSPeter Zijlstra }
11054dddfb5fSPeter Zijlstra
1106453e4108SVincent Donnefort end:
11075f4b55e1SPeter Zijlstra cpuhp_lock_release(bringup);
1108cb92173dSPeter Zijlstra lockdep_release_cpus_lock();
11094dddfb5fSPeter Zijlstra
11104dddfb5fSPeter Zijlstra if (!st->should_run)
11115ebe7742SPeter Zijlstra complete_ap_thread(st, bringup);
11124cb28cedSThomas Gleixner }
11134cb28cedSThomas Gleixner
11144cb28cedSThomas Gleixner /* Invoke a single callback on a remote cpu */
1115a724632cSThomas Gleixner static int
cpuhp_invoke_ap_callback(int cpu,enum cpuhp_state state,bool bringup,struct hlist_node * node)1116cf392d10SThomas Gleixner cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
1117cf392d10SThomas Gleixner struct hlist_node *node)
11184cb28cedSThomas Gleixner {
11194cb28cedSThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
11204dddfb5fSPeter Zijlstra int ret;
11214cb28cedSThomas Gleixner
11224cb28cedSThomas Gleixner if (!cpu_online(cpu))
11234cb28cedSThomas Gleixner return 0;
11244cb28cedSThomas Gleixner
11255f4b55e1SPeter Zijlstra cpuhp_lock_acquire(false);
11265f4b55e1SPeter Zijlstra cpuhp_lock_release(false);
11275f4b55e1SPeter Zijlstra
11285f4b55e1SPeter Zijlstra cpuhp_lock_acquire(true);
11295f4b55e1SPeter Zijlstra cpuhp_lock_release(true);
113049dfe2a6SThomas Gleixner
11316a4e2451SThomas Gleixner /*
11326a4e2451SThomas Gleixner * If we are up and running, use the hotplug thread. For early calls
11336a4e2451SThomas Gleixner * we invoke the thread function directly.
11346a4e2451SThomas Gleixner */
11356a4e2451SThomas Gleixner if (!st->thread)
113696abb968SPeter Zijlstra return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
11376a4e2451SThomas Gleixner
11384dddfb5fSPeter Zijlstra st->rollback = false;
11394dddfb5fSPeter Zijlstra st->last = NULL;
11404dddfb5fSPeter Zijlstra
11414dddfb5fSPeter Zijlstra st->node = node;
11424dddfb5fSPeter Zijlstra st->bringup = bringup;
11434cb28cedSThomas Gleixner st->cb_state = state;
1144a724632cSThomas Gleixner st->single = true;
11454dddfb5fSPeter Zijlstra
11464dddfb5fSPeter Zijlstra __cpuhp_kick_ap(st);
1147a724632cSThomas Gleixner
11484cb28cedSThomas Gleixner /*
11494dddfb5fSPeter Zijlstra * If we failed and did a partial, do a rollback.
11504cb28cedSThomas Gleixner */
11514dddfb5fSPeter Zijlstra if ((ret = st->result) && st->last) {
11524dddfb5fSPeter Zijlstra st->rollback = true;
11534dddfb5fSPeter Zijlstra st->bringup = !bringup;
11544dddfb5fSPeter Zijlstra
11554dddfb5fSPeter Zijlstra __cpuhp_kick_ap(st);
11564cb28cedSThomas Gleixner }
11574cb28cedSThomas Gleixner
11581f7c70d6SThomas Gleixner /*
11591f7c70d6SThomas Gleixner * Clean up the leftovers so the next hotplug operation wont use stale
11601f7c70d6SThomas Gleixner * data.
11611f7c70d6SThomas Gleixner */
11621f7c70d6SThomas Gleixner st->node = st->last = NULL;
11634dddfb5fSPeter Zijlstra return ret;
11641cf4f629SThomas Gleixner }
11651cf4f629SThomas Gleixner
cpuhp_kick_ap_work(unsigned int cpu)11661cf4f629SThomas Gleixner static int cpuhp_kick_ap_work(unsigned int cpu)
11671cf4f629SThomas Gleixner {
11681cf4f629SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
11694dddfb5fSPeter Zijlstra enum cpuhp_state prev_state = st->state;
11704dddfb5fSPeter Zijlstra int ret;
11711cf4f629SThomas Gleixner
11725f4b55e1SPeter Zijlstra cpuhp_lock_acquire(false);
11735f4b55e1SPeter Zijlstra cpuhp_lock_release(false);
11745f4b55e1SPeter Zijlstra
11755f4b55e1SPeter Zijlstra cpuhp_lock_acquire(true);
11765f4b55e1SPeter Zijlstra cpuhp_lock_release(true);
11774dddfb5fSPeter Zijlstra
11784dddfb5fSPeter Zijlstra trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
1179b7ba6d8dSSteven Price ret = cpuhp_kick_ap(cpu, st, st->target);
11804dddfb5fSPeter Zijlstra trace_cpuhp_exit(cpu, st->state, prev_state, ret);
11814dddfb5fSPeter Zijlstra
11824dddfb5fSPeter Zijlstra return ret;
11834cb28cedSThomas Gleixner }
11844cb28cedSThomas Gleixner
11854cb28cedSThomas Gleixner static struct smp_hotplug_thread cpuhp_threads = {
11864cb28cedSThomas Gleixner .store = &cpuhp_state.thread,
11874cb28cedSThomas Gleixner .thread_should_run = cpuhp_should_run,
11884cb28cedSThomas Gleixner .thread_fn = cpuhp_thread_fun,
11894cb28cedSThomas Gleixner .thread_comm = "cpuhp/%u",
11904cb28cedSThomas Gleixner .selfparking = true,
11914cb28cedSThomas Gleixner };
11924cb28cedSThomas Gleixner
cpuhp_init_state(void)1193d308077eSSteven Price static __init void cpuhp_init_state(void)
1194d308077eSSteven Price {
1195d308077eSSteven Price struct cpuhp_cpu_state *st;
1196d308077eSSteven Price int cpu;
1197d308077eSSteven Price
1198d308077eSSteven Price for_each_possible_cpu(cpu) {
1199d308077eSSteven Price st = per_cpu_ptr(&cpuhp_state, cpu);
1200d308077eSSteven Price init_completion(&st->done_up);
1201d308077eSSteven Price init_completion(&st->done_down);
1202d308077eSSteven Price }
1203d308077eSSteven Price }
1204d308077eSSteven Price
cpuhp_threads_init(void)12054cb28cedSThomas Gleixner void __init cpuhp_threads_init(void)
12064cb28cedSThomas Gleixner {
1207d308077eSSteven Price cpuhp_init_state();
12084cb28cedSThomas Gleixner BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
12094cb28cedSThomas Gleixner kthread_unpark(this_cpu_read(cpuhp_state.thread));
12104cb28cedSThomas Gleixner }
12114cb28cedSThomas Gleixner
1212b22afcdfSThomas Gleixner /*
1213b22afcdfSThomas Gleixner *
1214b22afcdfSThomas Gleixner * Serialize hotplug trainwrecks outside of the cpu_hotplug_lock
1215b22afcdfSThomas Gleixner * protected region.
1216b22afcdfSThomas Gleixner *
1217b22afcdfSThomas Gleixner * The operation is still serialized against concurrent CPU hotplug via
1218b22afcdfSThomas Gleixner * cpu_add_remove_lock, i.e. CPU map protection. But it is _not_
1219b22afcdfSThomas Gleixner * serialized against other hotplug related activity like adding or
1220b22afcdfSThomas Gleixner * removing of state callbacks and state instances, which invoke either the
1221b22afcdfSThomas Gleixner * startup or the teardown callback of the affected state.
1222b22afcdfSThomas Gleixner *
1223b22afcdfSThomas Gleixner * This is required for subsystems which are unfixable vs. CPU hotplug and
1224b22afcdfSThomas Gleixner * evade lock inversion problems by scheduling work which has to be
1225b22afcdfSThomas Gleixner * completed _before_ cpu_up()/_cpu_down() returns.
1226b22afcdfSThomas Gleixner *
1227b22afcdfSThomas Gleixner * Don't even think about adding anything to this for any new code or even
1228b22afcdfSThomas Gleixner * drivers. It's only purpose is to keep existing lock order trainwrecks
1229b22afcdfSThomas Gleixner * working.
1230b22afcdfSThomas Gleixner *
1231b22afcdfSThomas Gleixner * For cpu_down() there might be valid reasons to finish cleanups which are
1232b22afcdfSThomas Gleixner * not required to be done under cpu_hotplug_lock, but that's a different
1233b22afcdfSThomas Gleixner * story and would be not invoked via this.
1234b22afcdfSThomas Gleixner */
cpu_up_down_serialize_trainwrecks(bool tasks_frozen)1235b22afcdfSThomas Gleixner static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen)
1236b22afcdfSThomas Gleixner {
1237b22afcdfSThomas Gleixner /*
1238b22afcdfSThomas Gleixner * cpusets delegate hotplug operations to a worker to "solve" the
1239b22afcdfSThomas Gleixner * lock order problems. Wait for the worker, but only if tasks are
1240b22afcdfSThomas Gleixner * _not_ frozen (suspend, hibernate) as that would wait forever.
1241b22afcdfSThomas Gleixner *
1242b22afcdfSThomas Gleixner * The wait is required because otherwise the hotplug operation
1243b22afcdfSThomas Gleixner * returns with inconsistent state, which could even be observed in
1244b22afcdfSThomas Gleixner * user space when a new CPU is brought up. The CPU plug uevent
1245b22afcdfSThomas Gleixner * would be delivered and user space reacting on it would fail to
1246b22afcdfSThomas Gleixner * move tasks to the newly plugged CPU up to the point where the
1247b22afcdfSThomas Gleixner * work has finished because up to that point the newly plugged CPU
1248b22afcdfSThomas Gleixner * is not assignable in cpusets/cgroups. On unplug that's not
1249b22afcdfSThomas Gleixner * necessarily a visible issue, but it is still inconsistent state,
1250b22afcdfSThomas Gleixner * which is the real problem which needs to be "fixed". This can't
1251b22afcdfSThomas Gleixner * prevent the transient state between scheduling the work and
1252b22afcdfSThomas Gleixner * returning from waiting for it.
1253b22afcdfSThomas Gleixner */
1254b22afcdfSThomas Gleixner if (!tasks_frozen)
1255b22afcdfSThomas Gleixner cpuset_wait_for_hotplug();
1256b22afcdfSThomas Gleixner }
1257b22afcdfSThomas Gleixner
1258777c6e0dSMichal Hocko #ifdef CONFIG_HOTPLUG_CPU
12598ff00399SNicholas Piggin #ifndef arch_clear_mm_cpumask_cpu
12608ff00399SNicholas Piggin #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
12618ff00399SNicholas Piggin #endif
12628ff00399SNicholas Piggin
1263e4cc2f87SAnton Vorontsov /**
1264e4cc2f87SAnton Vorontsov * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
1265e4cc2f87SAnton Vorontsov * @cpu: a CPU id
1266e4cc2f87SAnton Vorontsov *
1267e4cc2f87SAnton Vorontsov * This function walks all processes, finds a valid mm struct for each one and
1268e4cc2f87SAnton Vorontsov * then clears a corresponding bit in mm's cpumask. While this all sounds
1269e4cc2f87SAnton Vorontsov * trivial, there are various non-obvious corner cases, which this function
1270e4cc2f87SAnton Vorontsov * tries to solve in a safe manner.
1271e4cc2f87SAnton Vorontsov *
1272e4cc2f87SAnton Vorontsov * Also note that the function uses a somewhat relaxed locking scheme, so it may
1273e4cc2f87SAnton Vorontsov * be called only for an already offlined CPU.
1274e4cc2f87SAnton Vorontsov */
clear_tasks_mm_cpumask(int cpu)1275cb79295eSAnton Vorontsov void clear_tasks_mm_cpumask(int cpu)
1276cb79295eSAnton Vorontsov {
1277cb79295eSAnton Vorontsov struct task_struct *p;
1278cb79295eSAnton Vorontsov
1279cb79295eSAnton Vorontsov /*
1280cb79295eSAnton Vorontsov * This function is called after the cpu is taken down and marked
1281cb79295eSAnton Vorontsov * offline, so its not like new tasks will ever get this cpu set in
1282cb79295eSAnton Vorontsov * their mm mask. -- Peter Zijlstra
1283cb79295eSAnton Vorontsov * Thus, we may use rcu_read_lock() here, instead of grabbing
1284cb79295eSAnton Vorontsov * full-fledged tasklist_lock.
1285cb79295eSAnton Vorontsov */
1286e4cc2f87SAnton Vorontsov WARN_ON(cpu_online(cpu));
1287cb79295eSAnton Vorontsov rcu_read_lock();
1288cb79295eSAnton Vorontsov for_each_process(p) {
1289cb79295eSAnton Vorontsov struct task_struct *t;
1290cb79295eSAnton Vorontsov
1291e4cc2f87SAnton Vorontsov /*
1292e4cc2f87SAnton Vorontsov * Main thread might exit, but other threads may still have
1293e4cc2f87SAnton Vorontsov * a valid mm. Find one.
1294e4cc2f87SAnton Vorontsov */
1295cb79295eSAnton Vorontsov t = find_lock_task_mm(p);
1296cb79295eSAnton Vorontsov if (!t)
1297cb79295eSAnton Vorontsov continue;
12988ff00399SNicholas Piggin arch_clear_mm_cpumask_cpu(cpu, t->mm);
1299cb79295eSAnton Vorontsov task_unlock(t);
1300cb79295eSAnton Vorontsov }
1301cb79295eSAnton Vorontsov rcu_read_unlock();
1302cb79295eSAnton Vorontsov }
1303cb79295eSAnton Vorontsov
13041da177e4SLinus Torvalds /* Take this CPU down. */
take_cpu_down(void * _param)130571cf5aeeSMathias Krause static int take_cpu_down(void *_param)
13061da177e4SLinus Torvalds {
13074baa0afcSThomas Gleixner struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
13084baa0afcSThomas Gleixner enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
1309090e77c3SThomas Gleixner int err, cpu = smp_processor_id();
13101da177e4SLinus Torvalds
13111da177e4SLinus Torvalds /* Ensure this CPU doesn't handle any more interrupts. */
13121da177e4SLinus Torvalds err = __cpu_disable();
13131da177e4SLinus Torvalds if (err < 0)
1314f3705136SZwane Mwaikambo return err;
1315f3705136SZwane Mwaikambo
1316a724632cSThomas Gleixner /*
1317453e4108SVincent Donnefort * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going
1318453e4108SVincent Donnefort * down, that the current state is CPUHP_TEARDOWN_CPU - 1.
1319a724632cSThomas Gleixner */
1320453e4108SVincent Donnefort WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
1321453e4108SVincent Donnefort
1322724a8688SPeter Zijlstra /*
13236f855b39SVincent Donnefort * Invoke the former CPU_DYING callbacks. DYING must not fail!
1324724a8688SPeter Zijlstra */
13256f855b39SVincent Donnefort cpuhp_invoke_callback_range_nofail(false, cpu, st, target);
13264baa0afcSThomas Gleixner
132752c063d1SThomas Gleixner /* Give up timekeeping duties */
132852c063d1SThomas Gleixner tick_handover_do_timer();
13291b72d432SThomas Gleixner /* Remove CPU from timer broadcasting */
13301b72d432SThomas Gleixner tick_offline_cpu(cpu);
133114e568e7SThomas Gleixner /* Park the stopper thread */
1332090e77c3SThomas Gleixner stop_machine_park(cpu);
1333f3705136SZwane Mwaikambo return 0;
13341da177e4SLinus Torvalds }
13351da177e4SLinus Torvalds
takedown_cpu(unsigned int cpu)133698458172SThomas Gleixner static int takedown_cpu(unsigned int cpu)
13371da177e4SLinus Torvalds {
1338e69aab13SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
133998458172SThomas Gleixner int err;
13401da177e4SLinus Torvalds
13412a58c527SThomas Gleixner /* Park the smpboot threads */
134213070833SYuan ZhaoXiong kthread_park(st->thread);
13431cf4f629SThomas Gleixner
13446acce3efSPeter Zijlstra /*
1345a8994181SThomas Gleixner * Prevent irq alloc/free while the dying cpu reorganizes the
1346a8994181SThomas Gleixner * interrupt affinities.
1347a8994181SThomas Gleixner */
1348a8994181SThomas Gleixner irq_lock_sparse();
1349a8994181SThomas Gleixner
1350a8994181SThomas Gleixner /*
13516acce3efSPeter Zijlstra * So now all preempt/rcu users must observe !cpu_active().
13526acce3efSPeter Zijlstra */
1353210e2133SSebastian Andrzej Siewior err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
135404321587SRusty Russell if (err) {
13553b9d6da6SSebastian Andrzej Siewior /* CPU refused to die */
1356a8994181SThomas Gleixner irq_unlock_sparse();
13573b9d6da6SSebastian Andrzej Siewior /* Unpark the hotplug thread so we can rollback there */
135813070833SYuan ZhaoXiong kthread_unpark(st->thread);
135998458172SThomas Gleixner return err;
13601da177e4SLinus Torvalds }
136104321587SRusty Russell BUG_ON(cpu_online(cpu));
13621da177e4SLinus Torvalds
136348c5ccaeSPeter Zijlstra /*
13645b1ead68SBrendan Jackman * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
13655b1ead68SBrendan Jackman * all runnable tasks from the CPU, there's only the idle task left now
136648c5ccaeSPeter Zijlstra * that the migration thread is done doing the stop_machine thing.
136751a96c77SPeter Zijlstra *
136851a96c77SPeter Zijlstra * Wait for the stop thread to go away.
136948c5ccaeSPeter Zijlstra */
13705ebe7742SPeter Zijlstra wait_for_ap_thread(st, false);
1371e69aab13SThomas Gleixner BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
13721da177e4SLinus Torvalds
1373a8994181SThomas Gleixner /* Interrupts are moved away from the dying cpu, reenable alloc/free */
1374a8994181SThomas Gleixner irq_unlock_sparse();
1375a8994181SThomas Gleixner
1376345527b1SPreeti U Murthy hotplug_cpu__broadcast_tick_pull(cpu);
13771da177e4SLinus Torvalds /* This actually kills the CPU. */
13781da177e4SLinus Torvalds __cpu_die(cpu);
13791da177e4SLinus Torvalds
13806f062123SThomas Gleixner cpuhp_bp_sync_dead(cpu);
13816f062123SThomas Gleixner
1382a49b116dSThomas Gleixner tick_cleanup_dead_cpu(cpu);
1383a58163d8SPaul E. McKenney rcutree_migrate_callbacks(cpu);
138498458172SThomas Gleixner return 0;
138598458172SThomas Gleixner }
13861da177e4SLinus Torvalds
cpuhp_complete_idle_dead(void * arg)138771f87b2fSThomas Gleixner static void cpuhp_complete_idle_dead(void *arg)
138871f87b2fSThomas Gleixner {
138971f87b2fSThomas Gleixner struct cpuhp_cpu_state *st = arg;
139071f87b2fSThomas Gleixner
13915ebe7742SPeter Zijlstra complete_ap_thread(st, false);
139271f87b2fSThomas Gleixner }
139371f87b2fSThomas Gleixner
cpuhp_report_idle_dead(void)1394e69aab13SThomas Gleixner void cpuhp_report_idle_dead(void)
1395e69aab13SThomas Gleixner {
1396e69aab13SThomas Gleixner struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1397e69aab13SThomas Gleixner
1398e69aab13SThomas Gleixner BUG_ON(st->state != CPUHP_AP_OFFLINE);
139927d50c7eSThomas Gleixner rcu_report_dead(smp_processor_id());
140071f87b2fSThomas Gleixner st->state = CPUHP_AP_IDLE_DEAD;
140171f87b2fSThomas Gleixner /*
140271f87b2fSThomas Gleixner * We cannot call complete after rcu_report_dead() so we delegate it
140371f87b2fSThomas Gleixner * to an online cpu.
140471f87b2fSThomas Gleixner */
140571f87b2fSThomas Gleixner smp_call_function_single(cpumask_first(cpu_online_mask),
140671f87b2fSThomas Gleixner cpuhp_complete_idle_dead, st, 0);
1407e69aab13SThomas Gleixner }
1408e69aab13SThomas Gleixner
cpuhp_down_callbacks(unsigned int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)14094dddfb5fSPeter Zijlstra static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
14104dddfb5fSPeter Zijlstra enum cpuhp_state target)
14114dddfb5fSPeter Zijlstra {
14124dddfb5fSPeter Zijlstra enum cpuhp_state prev_state = st->state;
14134dddfb5fSPeter Zijlstra int ret = 0;
14144dddfb5fSPeter Zijlstra
1415453e4108SVincent Donnefort ret = cpuhp_invoke_callback_range(false, cpu, st, target);
14164dddfb5fSPeter Zijlstra if (ret) {
1417ebca71a8SDongli Zhang pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n",
1418ebca71a8SDongli Zhang ret, cpu, cpuhp_get_step(st->state)->name,
1419ebca71a8SDongli Zhang st->state);
1420453e4108SVincent Donnefort
1421b7ba6d8dSSteven Price cpuhp_reset_state(cpu, st, prev_state);
1422453e4108SVincent Donnefort
142369fa6eb7SThomas Gleixner if (st->state < prev_state)
1424453e4108SVincent Donnefort WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
1425453e4108SVincent Donnefort prev_state));
14264dddfb5fSPeter Zijlstra }
1427453e4108SVincent Donnefort
14284dddfb5fSPeter Zijlstra return ret;
14294dddfb5fSPeter Zijlstra }
1430cff7d378SThomas Gleixner
143198458172SThomas Gleixner /* Requires cpu_add_remove_lock to be held */
_cpu_down(unsigned int cpu,int tasks_frozen,enum cpuhp_state target)1432af1f4045SThomas Gleixner static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1433af1f4045SThomas Gleixner enum cpuhp_state target)
143498458172SThomas Gleixner {
1435cff7d378SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1436cff7d378SThomas Gleixner int prev_state, ret = 0;
143798458172SThomas Gleixner
143898458172SThomas Gleixner if (num_online_cpus() == 1)
143998458172SThomas Gleixner return -EBUSY;
144098458172SThomas Gleixner
1441757c989bSThomas Gleixner if (!cpu_present(cpu))
144298458172SThomas Gleixner return -EINVAL;
144398458172SThomas Gleixner
14448f553c49SThomas Gleixner cpus_write_lock();
144598458172SThomas Gleixner
144698458172SThomas Gleixner cpuhp_tasks_frozen = tasks_frozen;
144798458172SThomas Gleixner
1448b7ba6d8dSSteven Price prev_state = cpuhp_set_state(cpu, st, target);
14491cf4f629SThomas Gleixner /*
14501cf4f629SThomas Gleixner * If the current CPU state is in the range of the AP hotplug thread,
14511cf4f629SThomas Gleixner * then we need to kick the thread.
14521cf4f629SThomas Gleixner */
14538df3e07eSThomas Gleixner if (st->state > CPUHP_TEARDOWN_CPU) {
14544dddfb5fSPeter Zijlstra st->target = max((int)target, CPUHP_TEARDOWN_CPU);
14551cf4f629SThomas Gleixner ret = cpuhp_kick_ap_work(cpu);
14561cf4f629SThomas Gleixner /*
14571cf4f629SThomas Gleixner * The AP side has done the error rollback already. Just
14581cf4f629SThomas Gleixner * return the error code..
14591cf4f629SThomas Gleixner */
14601cf4f629SThomas Gleixner if (ret)
14611cf4f629SThomas Gleixner goto out;
14621cf4f629SThomas Gleixner
14631cf4f629SThomas Gleixner /*
14641cf4f629SThomas Gleixner * We might have stopped still in the range of the AP hotplug
14651cf4f629SThomas Gleixner * thread. Nothing to do anymore.
14661cf4f629SThomas Gleixner */
14678df3e07eSThomas Gleixner if (st->state > CPUHP_TEARDOWN_CPU)
14681cf4f629SThomas Gleixner goto out;
14694dddfb5fSPeter Zijlstra
14704dddfb5fSPeter Zijlstra st->target = target;
14711cf4f629SThomas Gleixner }
14721cf4f629SThomas Gleixner /*
14738df3e07eSThomas Gleixner * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
14741cf4f629SThomas Gleixner * to do the further cleanups.
14751cf4f629SThomas Gleixner */
1476a724632cSThomas Gleixner ret = cpuhp_down_callbacks(cpu, st, target);
147762f25069SVincent Donnefort if (ret && st->state < prev_state) {
147862f25069SVincent Donnefort if (st->state == CPUHP_TEARDOWN_CPU) {
1479b7ba6d8dSSteven Price cpuhp_reset_state(cpu, st, prev_state);
14804dddfb5fSPeter Zijlstra __cpuhp_kick_ap(st);
148162f25069SVincent Donnefort } else {
148262f25069SVincent Donnefort WARN(1, "DEAD callback error for CPU%d", cpu);
148362f25069SVincent Donnefort }
14843b9d6da6SSebastian Andrzej Siewior }
148598458172SThomas Gleixner
14861cf4f629SThomas Gleixner out:
14878f553c49SThomas Gleixner cpus_write_unlock();
1488941154bdSThomas Gleixner /*
1489941154bdSThomas Gleixner * Do post unplug cleanup. This is still protected against
1490941154bdSThomas Gleixner * concurrent CPU hotplug via cpu_add_remove_lock.
1491941154bdSThomas Gleixner */
1492941154bdSThomas Gleixner lockup_detector_cleanup();
1493a74cfffbSThomas Gleixner arch_smt_update();
1494b22afcdfSThomas Gleixner cpu_up_down_serialize_trainwrecks(tasks_frozen);
1495cff7d378SThomas Gleixner return ret;
1496e3920fb4SRafael J. Wysocki }
1497e3920fb4SRafael J. Wysocki
14982b8272ffSThomas Gleixner struct cpu_down_work {
14992b8272ffSThomas Gleixner unsigned int cpu;
15002b8272ffSThomas Gleixner enum cpuhp_state target;
15012b8272ffSThomas Gleixner };
15022b8272ffSThomas Gleixner
__cpu_down_maps_locked(void * arg)15032b8272ffSThomas Gleixner static long __cpu_down_maps_locked(void *arg)
15042b8272ffSThomas Gleixner {
15052b8272ffSThomas Gleixner struct cpu_down_work *work = arg;
15062b8272ffSThomas Gleixner
15072b8272ffSThomas Gleixner return _cpu_down(work->cpu, 0, work->target);
15082b8272ffSThomas Gleixner }
15092b8272ffSThomas Gleixner
cpu_down_maps_locked(unsigned int cpu,enum cpuhp_state target)1510cc1fe215SThomas Gleixner static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1511cc1fe215SThomas Gleixner {
15122b8272ffSThomas Gleixner struct cpu_down_work work = { .cpu = cpu, .target = target, };
15132b8272ffSThomas Gleixner
1514bae1a962SKuppuswamy Sathyanarayanan /*
1515bae1a962SKuppuswamy Sathyanarayanan * If the platform does not support hotplug, report it explicitly to
1516bae1a962SKuppuswamy Sathyanarayanan * differentiate it from a transient offlining failure.
1517bae1a962SKuppuswamy Sathyanarayanan */
1518bae1a962SKuppuswamy Sathyanarayanan if (cc_platform_has(CC_ATTR_HOTPLUG_DISABLED))
1519bae1a962SKuppuswamy Sathyanarayanan return -EOPNOTSUPP;
1520cc1fe215SThomas Gleixner if (cpu_hotplug_disabled)
1521cc1fe215SThomas Gleixner return -EBUSY;
15222b8272ffSThomas Gleixner
15232b8272ffSThomas Gleixner /*
15242b8272ffSThomas Gleixner * Ensure that the control task does not run on the to be offlined
15252b8272ffSThomas Gleixner * CPU to prevent a deadlock against cfs_b->period_timer.
15263073f6dfSRan Xiaokai * Also keep at least one housekeeping cpu onlined to avoid generating
15273073f6dfSRan Xiaokai * an empty sched_domain span.
15282b8272ffSThomas Gleixner */
15293073f6dfSRan Xiaokai for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) {
15303073f6dfSRan Xiaokai if (cpu != work.cpu)
15312b8272ffSThomas Gleixner return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
1532cc1fe215SThomas Gleixner }
15333073f6dfSRan Xiaokai return -EBUSY;
15343073f6dfSRan Xiaokai }
1535cc1fe215SThomas Gleixner
cpu_down(unsigned int cpu,enum cpuhp_state target)153633c3736eSQais Yousef static int cpu_down(unsigned int cpu, enum cpuhp_state target)
1537e3920fb4SRafael J. Wysocki {
15389ea09af3SHeiko Carstens int err;
1539e3920fb4SRafael J. Wysocki
1540d221938cSGautham R Shenoy cpu_maps_update_begin();
1541cc1fe215SThomas Gleixner err = cpu_down_maps_locked(cpu, target);
1542d221938cSGautham R Shenoy cpu_maps_update_done();
15431da177e4SLinus Torvalds return err;
15441da177e4SLinus Torvalds }
15454dddfb5fSPeter Zijlstra
154633c3736eSQais Yousef /**
154733c3736eSQais Yousef * cpu_device_down - Bring down a cpu device
154833c3736eSQais Yousef * @dev: Pointer to the cpu device to offline
154933c3736eSQais Yousef *
155033c3736eSQais Yousef * This function is meant to be used by device core cpu subsystem only.
155133c3736eSQais Yousef *
155233c3736eSQais Yousef * Other subsystems should use remove_cpu() instead.
155311bc021dSRandy Dunlap *
155411bc021dSRandy Dunlap * Return: %0 on success or a negative errno code
155533c3736eSQais Yousef */
cpu_device_down(struct device * dev)155633c3736eSQais Yousef int cpu_device_down(struct device *dev)
1557af1f4045SThomas Gleixner {
155833c3736eSQais Yousef return cpu_down(dev->id, CPUHP_OFFLINE);
1559af1f4045SThomas Gleixner }
15604dddfb5fSPeter Zijlstra
remove_cpu(unsigned int cpu)156193ef1429SQais Yousef int remove_cpu(unsigned int cpu)
156293ef1429SQais Yousef {
156393ef1429SQais Yousef int ret;
156493ef1429SQais Yousef
156593ef1429SQais Yousef lock_device_hotplug();
156693ef1429SQais Yousef ret = device_offline(get_cpu_device(cpu));
156793ef1429SQais Yousef unlock_device_hotplug();
156893ef1429SQais Yousef
156993ef1429SQais Yousef return ret;
157093ef1429SQais Yousef }
157193ef1429SQais Yousef EXPORT_SYMBOL_GPL(remove_cpu);
157293ef1429SQais Yousef
smp_shutdown_nonboot_cpus(unsigned int primary_cpu)15730441a559SQais Yousef void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
15740441a559SQais Yousef {
15750441a559SQais Yousef unsigned int cpu;
15760441a559SQais Yousef int error;
15770441a559SQais Yousef
15780441a559SQais Yousef cpu_maps_update_begin();
15790441a559SQais Yousef
15800441a559SQais Yousef /*
15810441a559SQais Yousef * Make certain the cpu I'm about to reboot on is online.
15820441a559SQais Yousef *
15830441a559SQais Yousef * This is inline to what migrate_to_reboot_cpu() already do.
15840441a559SQais Yousef */
15850441a559SQais Yousef if (!cpu_online(primary_cpu))
15860441a559SQais Yousef primary_cpu = cpumask_first(cpu_online_mask);
15870441a559SQais Yousef
15880441a559SQais Yousef for_each_online_cpu(cpu) {
15890441a559SQais Yousef if (cpu == primary_cpu)
15900441a559SQais Yousef continue;
15910441a559SQais Yousef
15920441a559SQais Yousef error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
15930441a559SQais Yousef if (error) {
15940441a559SQais Yousef pr_err("Failed to offline CPU%d - error=%d",
15950441a559SQais Yousef cpu, error);
15960441a559SQais Yousef break;
15970441a559SQais Yousef }
15980441a559SQais Yousef }
15990441a559SQais Yousef
16000441a559SQais Yousef /*
16010441a559SQais Yousef * Ensure all but the reboot CPU are offline.
16020441a559SQais Yousef */
16030441a559SQais Yousef BUG_ON(num_online_cpus() > 1);
16040441a559SQais Yousef
16050441a559SQais Yousef /*
16060441a559SQais Yousef * Make sure the CPUs won't be enabled by someone else after this
16070441a559SQais Yousef * point. Kexec will reboot to a new kernel shortly resetting
16080441a559SQais Yousef * everything along the way.
16090441a559SQais Yousef */
16100441a559SQais Yousef cpu_hotplug_disabled++;
16110441a559SQais Yousef
16120441a559SQais Yousef cpu_maps_update_done();
16130441a559SQais Yousef }
16144dddfb5fSPeter Zijlstra
16154dddfb5fSPeter Zijlstra #else
16164dddfb5fSPeter Zijlstra #define takedown_cpu NULL
16171da177e4SLinus Torvalds #endif /*CONFIG_HOTPLUG_CPU*/
16181da177e4SLinus Torvalds
16194baa0afcSThomas Gleixner /**
1620ee1e714bSThomas Gleixner * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
16214baa0afcSThomas Gleixner * @cpu: cpu that just started
16224baa0afcSThomas Gleixner *
16234baa0afcSThomas Gleixner * It must be called by the arch code on the new cpu, before the new cpu
16244baa0afcSThomas Gleixner * enables interrupts and before the "boot" cpu returns from __cpu_up().
16254baa0afcSThomas Gleixner */
notify_cpu_starting(unsigned int cpu)16264baa0afcSThomas Gleixner void notify_cpu_starting(unsigned int cpu)
16274baa0afcSThomas Gleixner {
16284baa0afcSThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
16294baa0afcSThomas Gleixner enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
16304baa0afcSThomas Gleixner
16310c6d4576SSebastian Andrzej Siewior rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
1632e797bda3SThomas Gleixner cpumask_set_cpu(cpu, &cpus_booted_once_mask);
1633453e4108SVincent Donnefort
1634724a8688SPeter Zijlstra /*
1635724a8688SPeter Zijlstra * STARTING must not fail!
1636724a8688SPeter Zijlstra */
16376f855b39SVincent Donnefort cpuhp_invoke_callback_range_nofail(true, cpu, st, target);
16384baa0afcSThomas Gleixner }
16394baa0afcSThomas Gleixner
1640949338e3SThomas Gleixner /*
16419cd4f1a4SThomas Gleixner * Called from the idle task. Wake up the controlling task which brings the
164245178ac0SPeter Zijlstra * hotplug thread of the upcoming CPU up and then delegates the rest of the
164345178ac0SPeter Zijlstra * online bringup to the hotplug thread.
1644949338e3SThomas Gleixner */
cpuhp_online_idle(enum cpuhp_state state)16458df3e07eSThomas Gleixner void cpuhp_online_idle(enum cpuhp_state state)
1646949338e3SThomas Gleixner {
16478df3e07eSThomas Gleixner struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
16488df3e07eSThomas Gleixner
16498df3e07eSThomas Gleixner /* Happens for the boot cpu */
16508df3e07eSThomas Gleixner if (state != CPUHP_AP_ONLINE_IDLE)
16518df3e07eSThomas Gleixner return;
16528df3e07eSThomas Gleixner
16536f062123SThomas Gleixner cpuhp_ap_update_sync_state(SYNC_STATE_ONLINE);
16546f062123SThomas Gleixner
165545178ac0SPeter Zijlstra /*
16566f062123SThomas Gleixner * Unpark the stopper thread before we start the idle loop (and start
165745178ac0SPeter Zijlstra * scheduling); this ensures the stopper task is always available.
165845178ac0SPeter Zijlstra */
165945178ac0SPeter Zijlstra stop_machine_unpark(smp_processor_id());
166045178ac0SPeter Zijlstra
16618df3e07eSThomas Gleixner st->state = CPUHP_AP_ONLINE_IDLE;
16625ebe7742SPeter Zijlstra complete_ap_thread(st, true);
1663949338e3SThomas Gleixner }
1664949338e3SThomas Gleixner
1665e3920fb4SRafael J. Wysocki /* Requires cpu_add_remove_lock to be held */
_cpu_up(unsigned int cpu,int tasks_frozen,enum cpuhp_state target)1666af1f4045SThomas Gleixner static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
16671da177e4SLinus Torvalds {
1668cff7d378SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
16693bb5d2eeSSuresh Siddha struct task_struct *idle;
16702e1a3483SThomas Gleixner int ret = 0;
16711da177e4SLinus Torvalds
16728f553c49SThomas Gleixner cpus_write_lock();
167338498a67SThomas Gleixner
1674757c989bSThomas Gleixner if (!cpu_present(cpu)) {
16755e5041f3SYasuaki Ishimatsu ret = -EINVAL;
16765e5041f3SYasuaki Ishimatsu goto out;
16775e5041f3SYasuaki Ishimatsu }
16785e5041f3SYasuaki Ishimatsu
1679757c989bSThomas Gleixner /*
168033c3736eSQais Yousef * The caller of cpu_up() might have raced with another
168133c3736eSQais Yousef * caller. Nothing to do.
1682757c989bSThomas Gleixner */
1683757c989bSThomas Gleixner if (st->state >= target)
1684757c989bSThomas Gleixner goto out;
1685757c989bSThomas Gleixner
1686757c989bSThomas Gleixner if (st->state == CPUHP_OFFLINE) {
1687cff7d378SThomas Gleixner /* Let it fail before we try to bring the cpu up */
16883bb5d2eeSSuresh Siddha idle = idle_thread_get(cpu);
16893bb5d2eeSSuresh Siddha if (IS_ERR(idle)) {
16903bb5d2eeSSuresh Siddha ret = PTR_ERR(idle);
169138498a67SThomas Gleixner goto out;
16923bb5d2eeSSuresh Siddha }
16936d712b9bSDavid Woodhouse
16946d712b9bSDavid Woodhouse /*
16956d712b9bSDavid Woodhouse * Reset stale stack state from the last time this CPU was online.
16966d712b9bSDavid Woodhouse */
16976d712b9bSDavid Woodhouse scs_task_reset(idle);
16986d712b9bSDavid Woodhouse kasan_unpoison_task_stack(idle);
1699757c989bSThomas Gleixner }
170038498a67SThomas Gleixner
1701ba997462SThomas Gleixner cpuhp_tasks_frozen = tasks_frozen;
1702ba997462SThomas Gleixner
1703b7ba6d8dSSteven Price cpuhp_set_state(cpu, st, target);
17041cf4f629SThomas Gleixner /*
17051cf4f629SThomas Gleixner * If the current CPU state is in the range of the AP hotplug thread,
17061cf4f629SThomas Gleixner * then we need to kick the thread once more.
17071cf4f629SThomas Gleixner */
17088df3e07eSThomas Gleixner if (st->state > CPUHP_BRINGUP_CPU) {
17091cf4f629SThomas Gleixner ret = cpuhp_kick_ap_work(cpu);
17101cf4f629SThomas Gleixner /*
17111cf4f629SThomas Gleixner * The AP side has done the error rollback already. Just
17121cf4f629SThomas Gleixner * return the error code..
17131cf4f629SThomas Gleixner */
17141cf4f629SThomas Gleixner if (ret)
17151cf4f629SThomas Gleixner goto out;
17161cf4f629SThomas Gleixner }
17171cf4f629SThomas Gleixner
17181cf4f629SThomas Gleixner /*
17191cf4f629SThomas Gleixner * Try to reach the target state. We max out on the BP at
17208df3e07eSThomas Gleixner * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
17211cf4f629SThomas Gleixner * responsible for bringing it up to the target state.
17221cf4f629SThomas Gleixner */
17238df3e07eSThomas Gleixner target = min((int)target, CPUHP_BRINGUP_CPU);
1724a724632cSThomas Gleixner ret = cpuhp_up_callbacks(cpu, st, target);
172538498a67SThomas Gleixner out:
17268f553c49SThomas Gleixner cpus_write_unlock();
1727a74cfffbSThomas Gleixner arch_smt_update();
1728b22afcdfSThomas Gleixner cpu_up_down_serialize_trainwrecks(tasks_frozen);
17291da177e4SLinus Torvalds return ret;
17301da177e4SLinus Torvalds }
1731e3920fb4SRafael J. Wysocki
cpu_up(unsigned int cpu,enum cpuhp_state target)173233c3736eSQais Yousef static int cpu_up(unsigned int cpu, enum cpuhp_state target)
1733e3920fb4SRafael J. Wysocki {
1734e3920fb4SRafael J. Wysocki int err = 0;
1735cf23422bSminskey guo
1736e0b582ecSRusty Russell if (!cpu_possible(cpu)) {
173784117da5SFabian Frederick pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
173884117da5SFabian Frederick cpu);
173987d5e023SChen Gong #if defined(CONFIG_IA64)
174084117da5SFabian Frederick pr_err("please check additional_cpus= boot parameter\n");
174173e753a5SKAMEZAWA Hiroyuki #endif
174273e753a5SKAMEZAWA Hiroyuki return -EINVAL;
174373e753a5SKAMEZAWA Hiroyuki }
1744e3920fb4SRafael J. Wysocki
174501b0f197SToshi Kani err = try_online_node(cpu_to_node(cpu));
1746cf23422bSminskey guo if (err)
1747cf23422bSminskey guo return err;
1748cf23422bSminskey guo
1749d221938cSGautham R Shenoy cpu_maps_update_begin();
1750e761b772SMax Krasnyansky
1751e761b772SMax Krasnyansky if (cpu_hotplug_disabled) {
1752e3920fb4SRafael J. Wysocki err = -EBUSY;
1753e761b772SMax Krasnyansky goto out;
1754e761b772SMax Krasnyansky }
175560edbe8eSThomas Gleixner if (!cpu_bootable(cpu)) {
175605736e4aSThomas Gleixner err = -EPERM;
175705736e4aSThomas Gleixner goto out;
175805736e4aSThomas Gleixner }
1759e761b772SMax Krasnyansky
1760af1f4045SThomas Gleixner err = _cpu_up(cpu, 0, target);
1761e761b772SMax Krasnyansky out:
1762d221938cSGautham R Shenoy cpu_maps_update_done();
1763e3920fb4SRafael J. Wysocki return err;
1764e3920fb4SRafael J. Wysocki }
1765af1f4045SThomas Gleixner
176633c3736eSQais Yousef /**
176733c3736eSQais Yousef * cpu_device_up - Bring up a cpu device
176833c3736eSQais Yousef * @dev: Pointer to the cpu device to online
176933c3736eSQais Yousef *
177033c3736eSQais Yousef * This function is meant to be used by device core cpu subsystem only.
177133c3736eSQais Yousef *
177233c3736eSQais Yousef * Other subsystems should use add_cpu() instead.
177311bc021dSRandy Dunlap *
177411bc021dSRandy Dunlap * Return: %0 on success or a negative errno code
177533c3736eSQais Yousef */
cpu_device_up(struct device * dev)177633c3736eSQais Yousef int cpu_device_up(struct device *dev)
1777af1f4045SThomas Gleixner {
177833c3736eSQais Yousef return cpu_up(dev->id, CPUHP_ONLINE);
1779af1f4045SThomas Gleixner }
1780e3920fb4SRafael J. Wysocki
add_cpu(unsigned int cpu)178193ef1429SQais Yousef int add_cpu(unsigned int cpu)
178293ef1429SQais Yousef {
178393ef1429SQais Yousef int ret;
178493ef1429SQais Yousef
178593ef1429SQais Yousef lock_device_hotplug();
178693ef1429SQais Yousef ret = device_online(get_cpu_device(cpu));
178793ef1429SQais Yousef unlock_device_hotplug();
178893ef1429SQais Yousef
178993ef1429SQais Yousef return ret;
179093ef1429SQais Yousef }
179193ef1429SQais Yousef EXPORT_SYMBOL_GPL(add_cpu);
179293ef1429SQais Yousef
1793d720f986SQais Yousef /**
1794d720f986SQais Yousef * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1795d720f986SQais Yousef * @sleep_cpu: The cpu we hibernated on and should be brought up.
1796d720f986SQais Yousef *
1797d720f986SQais Yousef * On some architectures like arm64, we can hibernate on any CPU, but on
1798d720f986SQais Yousef * wake up the CPU we hibernated on might be offline as a side effect of
1799d720f986SQais Yousef * using maxcpus= for example.
180011bc021dSRandy Dunlap *
180111bc021dSRandy Dunlap * Return: %0 on success or a negative errno code
1802d720f986SQais Yousef */
bringup_hibernate_cpu(unsigned int sleep_cpu)1803d720f986SQais Yousef int bringup_hibernate_cpu(unsigned int sleep_cpu)
1804d720f986SQais Yousef {
1805d720f986SQais Yousef int ret;
1806d720f986SQais Yousef
1807d720f986SQais Yousef if (!cpu_online(sleep_cpu)) {
1808d720f986SQais Yousef pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
180933c3736eSQais Yousef ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
1810d720f986SQais Yousef if (ret) {
1811d720f986SQais Yousef pr_err("Failed to bring hibernate-CPU up!\n");
1812d720f986SQais Yousef return ret;
1813d720f986SQais Yousef }
1814d720f986SQais Yousef }
1815d720f986SQais Yousef return 0;
1816d720f986SQais Yousef }
1817d720f986SQais Yousef
cpuhp_bringup_mask(const struct cpumask * mask,unsigned int ncpus,enum cpuhp_state target)181818415f33SThomas Gleixner static void __init cpuhp_bringup_mask(const struct cpumask *mask, unsigned int ncpus,
181918415f33SThomas Gleixner enum cpuhp_state target)
1820b99a2659SQais Yousef {
1821b99a2659SQais Yousef unsigned int cpu;
1822b99a2659SQais Yousef
182318415f33SThomas Gleixner for_each_cpu(cpu, mask) {
182418415f33SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
182518415f33SThomas Gleixner
182618415f33SThomas Gleixner if (cpu_up(cpu, target) && can_rollback_cpu(st)) {
182718415f33SThomas Gleixner /*
182818415f33SThomas Gleixner * If this failed then cpu_up() might have only
182918415f33SThomas Gleixner * rolled back to CPUHP_BP_KICK_AP for the final
183018415f33SThomas Gleixner * online. Clean it up. NOOP if already rolled back.
183118415f33SThomas Gleixner */
183218415f33SThomas Gleixner WARN_ON(cpuhp_invoke_callback_range(false, cpu, st, CPUHP_OFFLINE));
1833b99a2659SQais Yousef }
183406c6796eSThomas Gleixner
183506c6796eSThomas Gleixner if (!--ncpus)
183606c6796eSThomas Gleixner break;
1837b99a2659SQais Yousef }
183818415f33SThomas Gleixner }
183918415f33SThomas Gleixner
184018415f33SThomas Gleixner #ifdef CONFIG_HOTPLUG_PARALLEL
184118415f33SThomas Gleixner static bool __cpuhp_parallel_bringup __ro_after_init = true;
184218415f33SThomas Gleixner
parallel_bringup_parse_param(char * arg)184318415f33SThomas Gleixner static int __init parallel_bringup_parse_param(char *arg)
184418415f33SThomas Gleixner {
184518415f33SThomas Gleixner return kstrtobool(arg, &__cpuhp_parallel_bringup);
184618415f33SThomas Gleixner }
184718415f33SThomas Gleixner early_param("cpuhp.parallel", parallel_bringup_parse_param);
184818415f33SThomas Gleixner
cpuhp_smt_aware(void)18497a4dcb4aSLaurent Dufour static inline bool cpuhp_smt_aware(void)
18507a4dcb4aSLaurent Dufour {
185191b4a7dbSLaurent Dufour return cpu_smt_max_threads > 1;
18527a4dcb4aSLaurent Dufour }
18537a4dcb4aSLaurent Dufour
cpuhp_get_primary_thread_mask(void)18547a4dcb4aSLaurent Dufour static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
18557a4dcb4aSLaurent Dufour {
18567a4dcb4aSLaurent Dufour return cpu_primary_thread_mask;
18577a4dcb4aSLaurent Dufour }
18587a4dcb4aSLaurent Dufour
185918415f33SThomas Gleixner /*
186018415f33SThomas Gleixner * On architectures which have enabled parallel bringup this invokes all BP
186118415f33SThomas Gleixner * prepare states for each of the to be onlined APs first. The last state
186218415f33SThomas Gleixner * sends the startup IPI to the APs. The APs proceed through the low level
186318415f33SThomas Gleixner * bringup code in parallel and then wait for the control CPU to release
186418415f33SThomas Gleixner * them one by one for the final onlining procedure.
186518415f33SThomas Gleixner *
186618415f33SThomas Gleixner * This avoids waiting for each AP to respond to the startup IPI in
186718415f33SThomas Gleixner * CPUHP_BRINGUP_CPU.
186818415f33SThomas Gleixner */
cpuhp_bringup_cpus_parallel(unsigned int ncpus)186918415f33SThomas Gleixner static bool __init cpuhp_bringup_cpus_parallel(unsigned int ncpus)
187018415f33SThomas Gleixner {
187118415f33SThomas Gleixner const struct cpumask *mask = cpu_present_mask;
187218415f33SThomas Gleixner
187318415f33SThomas Gleixner if (__cpuhp_parallel_bringup)
187418415f33SThomas Gleixner __cpuhp_parallel_bringup = arch_cpuhp_init_parallel_bringup();
187518415f33SThomas Gleixner if (!__cpuhp_parallel_bringup)
187618415f33SThomas Gleixner return false;
187718415f33SThomas Gleixner
187818415f33SThomas Gleixner if (cpuhp_smt_aware()) {
187918415f33SThomas Gleixner const struct cpumask *pmask = cpuhp_get_primary_thread_mask();
188018415f33SThomas Gleixner static struct cpumask tmp_mask __initdata;
188118415f33SThomas Gleixner
188218415f33SThomas Gleixner /*
188318415f33SThomas Gleixner * X86 requires to prevent that SMT siblings stopped while
188418415f33SThomas Gleixner * the primary thread does a microcode update for various
188518415f33SThomas Gleixner * reasons. Bring the primary threads up first.
188618415f33SThomas Gleixner */
188718415f33SThomas Gleixner cpumask_and(&tmp_mask, mask, pmask);
188818415f33SThomas Gleixner cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_BP_KICK_AP);
188918415f33SThomas Gleixner cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_ONLINE);
189018415f33SThomas Gleixner /* Account for the online CPUs */
189118415f33SThomas Gleixner ncpus -= num_online_cpus();
189218415f33SThomas Gleixner if (!ncpus)
189318415f33SThomas Gleixner return true;
189418415f33SThomas Gleixner /* Create the mask for secondary CPUs */
189518415f33SThomas Gleixner cpumask_andnot(&tmp_mask, mask, pmask);
189618415f33SThomas Gleixner mask = &tmp_mask;
189718415f33SThomas Gleixner }
189818415f33SThomas Gleixner
189918415f33SThomas Gleixner /* Bring the not-yet started CPUs up */
190018415f33SThomas Gleixner cpuhp_bringup_mask(mask, ncpus, CPUHP_BP_KICK_AP);
190118415f33SThomas Gleixner cpuhp_bringup_mask(mask, ncpus, CPUHP_ONLINE);
190218415f33SThomas Gleixner return true;
190318415f33SThomas Gleixner }
190418415f33SThomas Gleixner #else
cpuhp_bringup_cpus_parallel(unsigned int ncpus)190518415f33SThomas Gleixner static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return false; }
190618415f33SThomas Gleixner #endif /* CONFIG_HOTPLUG_PARALLEL */
190718415f33SThomas Gleixner
bringup_nonboot_cpus(unsigned int setup_max_cpus)190818415f33SThomas Gleixner void __init bringup_nonboot_cpus(unsigned int setup_max_cpus)
190918415f33SThomas Gleixner {
191069787793SHuacai Chen if (!setup_max_cpus)
191169787793SHuacai Chen return;
191269787793SHuacai Chen
191318415f33SThomas Gleixner /* Try parallel bringup optimization if enabled */
191418415f33SThomas Gleixner if (cpuhp_bringup_cpus_parallel(setup_max_cpus))
191518415f33SThomas Gleixner return;
191618415f33SThomas Gleixner
191718415f33SThomas Gleixner /* Full per CPU serialized bringup */
191818415f33SThomas Gleixner cpuhp_bringup_mask(cpu_present_mask, setup_max_cpus, CPUHP_ONLINE);
191918415f33SThomas Gleixner }
1920e3920fb4SRafael J. Wysocki
1921f3de4be9SRafael J. Wysocki #ifdef CONFIG_PM_SLEEP_SMP
1922e0b582ecSRusty Russell static cpumask_var_t frozen_cpus;
1923e3920fb4SRafael J. Wysocki
freeze_secondary_cpus(int primary)1924fb7fb84aSQais Yousef int freeze_secondary_cpus(int primary)
1925e3920fb4SRafael J. Wysocki {
1926d391e552SJames Morse int cpu, error = 0;
1927e3920fb4SRafael J. Wysocki
1928d221938cSGautham R Shenoy cpu_maps_update_begin();
19299ca12ac0SNicholas Piggin if (primary == -1) {
19309ca12ac0SNicholas Piggin primary = cpumask_first(cpu_online_mask);
193104d4e665SFrederic Weisbecker if (!housekeeping_cpu(primary, HK_TYPE_TIMER))
193204d4e665SFrederic Weisbecker primary = housekeeping_any_cpu(HK_TYPE_TIMER);
19339ca12ac0SNicholas Piggin } else {
1934d391e552SJames Morse if (!cpu_online(primary))
1935d391e552SJames Morse primary = cpumask_first(cpu_online_mask);
19369ca12ac0SNicholas Piggin }
19379ca12ac0SNicholas Piggin
19389ee349adSXiaotian Feng /*
19399ee349adSXiaotian Feng * We take down all of the non-boot CPUs in one shot to avoid races
1940e3920fb4SRafael J. Wysocki * with the userspace trying to use the CPU hotplug at the same time
1941e3920fb4SRafael J. Wysocki */
1942e0b582ecSRusty Russell cpumask_clear(frozen_cpus);
19436ad4c188SPeter Zijlstra
194484117da5SFabian Frederick pr_info("Disabling non-boot CPUs ...\n");
1945e3920fb4SRafael J. Wysocki for_each_online_cpu(cpu) {
1946d391e552SJames Morse if (cpu == primary)
1947e3920fb4SRafael J. Wysocki continue;
1948a66d955eSPavankumar Kondeti
1949fb7fb84aSQais Yousef if (pm_wakeup_pending()) {
1950a66d955eSPavankumar Kondeti pr_info("Wakeup pending. Abort CPU freeze\n");
1951a66d955eSPavankumar Kondeti error = -EBUSY;
1952a66d955eSPavankumar Kondeti break;
1953a66d955eSPavankumar Kondeti }
1954a66d955eSPavankumar Kondeti
1955bb3632c6STodd E Brandt trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1956af1f4045SThomas Gleixner error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1957bb3632c6STodd E Brandt trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1958feae3203SMike Travis if (!error)
1959e0b582ecSRusty Russell cpumask_set_cpu(cpu, frozen_cpus);
1960feae3203SMike Travis else {
196184117da5SFabian Frederick pr_err("Error taking CPU%d down: %d\n", cpu, error);
1962e3920fb4SRafael J. Wysocki break;
1963e3920fb4SRafael J. Wysocki }
1964e3920fb4SRafael J. Wysocki }
196586886e55SJoseph Cihula
196689af7ba5SVitaly Kuznetsov if (!error)
1967e3920fb4SRafael J. Wysocki BUG_ON(num_online_cpus() > 1);
196889af7ba5SVitaly Kuznetsov else
196984117da5SFabian Frederick pr_err("Non-boot CPUs are not disabled\n");
197089af7ba5SVitaly Kuznetsov
197189af7ba5SVitaly Kuznetsov /*
197289af7ba5SVitaly Kuznetsov * Make sure the CPUs won't be enabled by someone else. We need to do
197356555855SQais Yousef * this even in case of failure as all freeze_secondary_cpus() users are
197456555855SQais Yousef * supposed to do thaw_secondary_cpus() on the failure path.
197589af7ba5SVitaly Kuznetsov */
197689af7ba5SVitaly Kuznetsov cpu_hotplug_disabled++;
197789af7ba5SVitaly Kuznetsov
1978d221938cSGautham R Shenoy cpu_maps_update_done();
1979e3920fb4SRafael J. Wysocki return error;
1980e3920fb4SRafael J. Wysocki }
1981e3920fb4SRafael J. Wysocki
arch_thaw_secondary_cpus_begin(void)198256555855SQais Yousef void __weak arch_thaw_secondary_cpus_begin(void)
1983d0af9eedSSuresh Siddha {
1984d0af9eedSSuresh Siddha }
1985d0af9eedSSuresh Siddha
arch_thaw_secondary_cpus_end(void)198656555855SQais Yousef void __weak arch_thaw_secondary_cpus_end(void)
1987d0af9eedSSuresh Siddha {
1988d0af9eedSSuresh Siddha }
1989d0af9eedSSuresh Siddha
thaw_secondary_cpus(void)199056555855SQais Yousef void thaw_secondary_cpus(void)
1991e3920fb4SRafael J. Wysocki {
1992e3920fb4SRafael J. Wysocki int cpu, error;
1993e3920fb4SRafael J. Wysocki
1994e3920fb4SRafael J. Wysocki /* Allow everyone to use the CPU hotplug again */
1995d221938cSGautham R Shenoy cpu_maps_update_begin();
199601b41159SLianwei Wang __cpu_hotplug_enable();
1997e0b582ecSRusty Russell if (cpumask_empty(frozen_cpus))
19981d64b9cbSRafael J. Wysocki goto out;
1999e3920fb4SRafael J. Wysocki
200084117da5SFabian Frederick pr_info("Enabling non-boot CPUs ...\n");
2001d0af9eedSSuresh Siddha
200256555855SQais Yousef arch_thaw_secondary_cpus_begin();
2003d0af9eedSSuresh Siddha
2004e0b582ecSRusty Russell for_each_cpu(cpu, frozen_cpus) {
2005bb3632c6STodd E Brandt trace_suspend_resume(TPS("CPU_ON"), cpu, true);
2006af1f4045SThomas Gleixner error = _cpu_up(cpu, 1, CPUHP_ONLINE);
2007bb3632c6STodd E Brandt trace_suspend_resume(TPS("CPU_ON"), cpu, false);
2008e3920fb4SRafael J. Wysocki if (!error) {
200984117da5SFabian Frederick pr_info("CPU%d is up\n", cpu);
2010e3920fb4SRafael J. Wysocki continue;
2011e3920fb4SRafael J. Wysocki }
201284117da5SFabian Frederick pr_warn("Error taking CPU%d up: %d\n", cpu, error);
2013e3920fb4SRafael J. Wysocki }
2014d0af9eedSSuresh Siddha
201556555855SQais Yousef arch_thaw_secondary_cpus_end();
2016d0af9eedSSuresh Siddha
2017e0b582ecSRusty Russell cpumask_clear(frozen_cpus);
20181d64b9cbSRafael J. Wysocki out:
2019d221938cSGautham R Shenoy cpu_maps_update_done();
2020e3920fb4SRafael J. Wysocki }
2021e0b582ecSRusty Russell
alloc_frozen_cpus(void)2022d7268a31SFenghua Yu static int __init alloc_frozen_cpus(void)
2023e0b582ecSRusty Russell {
2024e0b582ecSRusty Russell if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
2025e0b582ecSRusty Russell return -ENOMEM;
2026e0b582ecSRusty Russell return 0;
2027e0b582ecSRusty Russell }
2028e0b582ecSRusty Russell core_initcall(alloc_frozen_cpus);
202979cfbdfaSSrivatsa S. Bhat
203079cfbdfaSSrivatsa S. Bhat /*
203179cfbdfaSSrivatsa S. Bhat * When callbacks for CPU hotplug notifications are being executed, we must
203279cfbdfaSSrivatsa S. Bhat * ensure that the state of the system with respect to the tasks being frozen
203379cfbdfaSSrivatsa S. Bhat * or not, as reported by the notification, remains unchanged *throughout the
203479cfbdfaSSrivatsa S. Bhat * duration* of the execution of the callbacks.
203579cfbdfaSSrivatsa S. Bhat * Hence we need to prevent the freezer from racing with regular CPU hotplug.
203679cfbdfaSSrivatsa S. Bhat *
203779cfbdfaSSrivatsa S. Bhat * This synchronization is implemented by mutually excluding regular CPU
203879cfbdfaSSrivatsa S. Bhat * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
203979cfbdfaSSrivatsa S. Bhat * Hibernate notifications.
204079cfbdfaSSrivatsa S. Bhat */
204179cfbdfaSSrivatsa S. Bhat static int
cpu_hotplug_pm_callback(struct notifier_block * nb,unsigned long action,void * ptr)204279cfbdfaSSrivatsa S. Bhat cpu_hotplug_pm_callback(struct notifier_block *nb,
204379cfbdfaSSrivatsa S. Bhat unsigned long action, void *ptr)
204479cfbdfaSSrivatsa S. Bhat {
204579cfbdfaSSrivatsa S. Bhat switch (action) {
204679cfbdfaSSrivatsa S. Bhat
204779cfbdfaSSrivatsa S. Bhat case PM_SUSPEND_PREPARE:
204879cfbdfaSSrivatsa S. Bhat case PM_HIBERNATION_PREPARE:
204916e53dbfSSrivatsa S. Bhat cpu_hotplug_disable();
205079cfbdfaSSrivatsa S. Bhat break;
205179cfbdfaSSrivatsa S. Bhat
205279cfbdfaSSrivatsa S. Bhat case PM_POST_SUSPEND:
205379cfbdfaSSrivatsa S. Bhat case PM_POST_HIBERNATION:
205416e53dbfSSrivatsa S. Bhat cpu_hotplug_enable();
205579cfbdfaSSrivatsa S. Bhat break;
205679cfbdfaSSrivatsa S. Bhat
205779cfbdfaSSrivatsa S. Bhat default:
205879cfbdfaSSrivatsa S. Bhat return NOTIFY_DONE;
205979cfbdfaSSrivatsa S. Bhat }
206079cfbdfaSSrivatsa S. Bhat
206179cfbdfaSSrivatsa S. Bhat return NOTIFY_OK;
206279cfbdfaSSrivatsa S. Bhat }
206379cfbdfaSSrivatsa S. Bhat
206479cfbdfaSSrivatsa S. Bhat
cpu_hotplug_pm_sync_init(void)2065d7268a31SFenghua Yu static int __init cpu_hotplug_pm_sync_init(void)
206679cfbdfaSSrivatsa S. Bhat {
20676e32d479SFenghua Yu /*
20686e32d479SFenghua Yu * cpu_hotplug_pm_callback has higher priority than x86
20696e32d479SFenghua Yu * bsp_pm_callback which depends on cpu_hotplug_pm_callback
20706e32d479SFenghua Yu * to disable cpu hotplug to avoid cpu hotplug race.
20716e32d479SFenghua Yu */
207279cfbdfaSSrivatsa S. Bhat pm_notifier(cpu_hotplug_pm_callback, 0);
207379cfbdfaSSrivatsa S. Bhat return 0;
207479cfbdfaSSrivatsa S. Bhat }
207579cfbdfaSSrivatsa S. Bhat core_initcall(cpu_hotplug_pm_sync_init);
207679cfbdfaSSrivatsa S. Bhat
2077f3de4be9SRafael J. Wysocki #endif /* CONFIG_PM_SLEEP_SMP */
207868f4f1ecSMax Krasnyansky
20798ce371f9SPeter Zijlstra int __boot_cpu_id;
20808ce371f9SPeter Zijlstra
208168f4f1ecSMax Krasnyansky #endif /* CONFIG_SMP */
2082b8d317d1SMike Travis
2083cff7d378SThomas Gleixner /* Boot processor state steps */
208417a2f1ceSLai Jiangshan static struct cpuhp_step cpuhp_hp_states[] = {
2085cff7d378SThomas Gleixner [CPUHP_OFFLINE] = {
2086cff7d378SThomas Gleixner .name = "offline",
20873c1627e9SThomas Gleixner .startup.single = NULL,
20883c1627e9SThomas Gleixner .teardown.single = NULL,
2089cff7d378SThomas Gleixner },
2090cff7d378SThomas Gleixner #ifdef CONFIG_SMP
2091cff7d378SThomas Gleixner [CPUHP_CREATE_THREADS]= {
2092677f6646SThomas Gleixner .name = "threads:prepare",
20933c1627e9SThomas Gleixner .startup.single = smpboot_create_threads,
20943c1627e9SThomas Gleixner .teardown.single = NULL,
2095757c989bSThomas Gleixner .cant_stop = true,
2096cff7d378SThomas Gleixner },
209700e16c3dSThomas Gleixner [CPUHP_PERF_PREPARE] = {
20983c1627e9SThomas Gleixner .name = "perf:prepare",
20993c1627e9SThomas Gleixner .startup.single = perf_event_init_cpu,
21003c1627e9SThomas Gleixner .teardown.single = perf_event_exit_cpu,
210100e16c3dSThomas Gleixner },
21023191dd5aSJason A. Donenfeld [CPUHP_RANDOM_PREPARE] = {
21033191dd5aSJason A. Donenfeld .name = "random:prepare",
21043191dd5aSJason A. Donenfeld .startup.single = random_prepare_cpu,
21053191dd5aSJason A. Donenfeld .teardown.single = NULL,
21063191dd5aSJason A. Donenfeld },
21077ee681b2SThomas Gleixner [CPUHP_WORKQUEUE_PREP] = {
21083c1627e9SThomas Gleixner .name = "workqueue:prepare",
21093c1627e9SThomas Gleixner .startup.single = workqueue_prepare_cpu,
21103c1627e9SThomas Gleixner .teardown.single = NULL,
21117ee681b2SThomas Gleixner },
211227590dc1SThomas Gleixner [CPUHP_HRTIMERS_PREPARE] = {
21133c1627e9SThomas Gleixner .name = "hrtimers:prepare",
21143c1627e9SThomas Gleixner .startup.single = hrtimers_prepare_cpu,
211553f408caSThomas Gleixner .teardown.single = NULL,
211627590dc1SThomas Gleixner },
211731487f83SRichard Weinberger [CPUHP_SMPCFD_PREPARE] = {
2118677f6646SThomas Gleixner .name = "smpcfd:prepare",
21193c1627e9SThomas Gleixner .startup.single = smpcfd_prepare_cpu,
21203c1627e9SThomas Gleixner .teardown.single = smpcfd_dead_cpu,
212131487f83SRichard Weinberger },
2122e6d4989aSRichard Weinberger [CPUHP_RELAY_PREPARE] = {
2123e6d4989aSRichard Weinberger .name = "relay:prepare",
2124e6d4989aSRichard Weinberger .startup.single = relay_prepare_cpu,
2125e6d4989aSRichard Weinberger .teardown.single = NULL,
2126e6d4989aSRichard Weinberger },
21276731d4f1SSebastian Andrzej Siewior [CPUHP_SLAB_PREPARE] = {
21286731d4f1SSebastian Andrzej Siewior .name = "slab:prepare",
21296731d4f1SSebastian Andrzej Siewior .startup.single = slab_prepare_cpu,
21306731d4f1SSebastian Andrzej Siewior .teardown.single = slab_dead_cpu,
2131cff7d378SThomas Gleixner },
21324df83742SThomas Gleixner [CPUHP_RCUTREE_PREP] = {
2133677f6646SThomas Gleixner .name = "RCU/tree:prepare",
21343c1627e9SThomas Gleixner .startup.single = rcutree_prepare_cpu,
21353c1627e9SThomas Gleixner .teardown.single = rcutree_dead_cpu,
21364df83742SThomas Gleixner },
2137cff7d378SThomas Gleixner /*
21384fae16dfSRichard Cochran * On the tear-down path, timers_dead_cpu() must be invoked
21394fae16dfSRichard Cochran * before blk_mq_queue_reinit_notify() from notify_dead(),
21404fae16dfSRichard Cochran * otherwise a RCU stall occurs.
21414fae16dfSRichard Cochran */
214226456f87SThomas Gleixner [CPUHP_TIMERS_PREPARE] = {
2143d018031fSMukesh Ojha .name = "timers:prepare",
214426456f87SThomas Gleixner .startup.single = timers_prepare_cpu,
21453c1627e9SThomas Gleixner .teardown.single = timers_dead_cpu,
21464fae16dfSRichard Cochran },
2147a631be92SThomas Gleixner
2148a631be92SThomas Gleixner #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP
2149a631be92SThomas Gleixner /*
2150a631be92SThomas Gleixner * Kicks the AP alive. AP will wait in cpuhp_ap_sync_alive() until
2151a631be92SThomas Gleixner * the next step will release it.
2152a631be92SThomas Gleixner */
2153a631be92SThomas Gleixner [CPUHP_BP_KICK_AP] = {
2154a631be92SThomas Gleixner .name = "cpu:kick_ap",
2155a631be92SThomas Gleixner .startup.single = cpuhp_kick_ap_alive,
2156a631be92SThomas Gleixner },
2157a631be92SThomas Gleixner
2158a631be92SThomas Gleixner /*
2159a631be92SThomas Gleixner * Waits for the AP to reach cpuhp_ap_sync_alive() and then
2160a631be92SThomas Gleixner * releases it for the complete bringup.
2161a631be92SThomas Gleixner */
2162a631be92SThomas Gleixner [CPUHP_BRINGUP_CPU] = {
2163a631be92SThomas Gleixner .name = "cpu:bringup",
2164a631be92SThomas Gleixner .startup.single = cpuhp_bringup_ap,
2165a631be92SThomas Gleixner .teardown.single = finish_cpu,
2166a631be92SThomas Gleixner .cant_stop = true,
2167a631be92SThomas Gleixner },
2168a631be92SThomas Gleixner #else
2169a631be92SThomas Gleixner /*
2170a631be92SThomas Gleixner * All-in-one CPU bringup state which includes the kick alive.
2171a631be92SThomas Gleixner */
2172cff7d378SThomas Gleixner [CPUHP_BRINGUP_CPU] = {
2173cff7d378SThomas Gleixner .name = "cpu:bringup",
21743c1627e9SThomas Gleixner .startup.single = bringup_cpu,
2175bf2c59fcSPeter Zijlstra .teardown.single = finish_cpu,
2176757c989bSThomas Gleixner .cant_stop = true,
21774baa0afcSThomas Gleixner },
2178a631be92SThomas Gleixner #endif
2179d10ef6f9SThomas Gleixner /* Final state before CPU kills itself */
2180d10ef6f9SThomas Gleixner [CPUHP_AP_IDLE_DEAD] = {
2181d10ef6f9SThomas Gleixner .name = "idle:dead",
2182d10ef6f9SThomas Gleixner },
2183d10ef6f9SThomas Gleixner /*
2184d10ef6f9SThomas Gleixner * Last state before CPU enters the idle loop to die. Transient state
2185d10ef6f9SThomas Gleixner * for synchronization.
2186d10ef6f9SThomas Gleixner */
2187d10ef6f9SThomas Gleixner [CPUHP_AP_OFFLINE] = {
2188d10ef6f9SThomas Gleixner .name = "ap:offline",
2189d10ef6f9SThomas Gleixner .cant_stop = true,
2190d10ef6f9SThomas Gleixner },
21919cf7243dSThomas Gleixner /* First state is scheduler control. Interrupts are disabled */
21929cf7243dSThomas Gleixner [CPUHP_AP_SCHED_STARTING] = {
21939cf7243dSThomas Gleixner .name = "sched:starting",
21943c1627e9SThomas Gleixner .startup.single = sched_cpu_starting,
21953c1627e9SThomas Gleixner .teardown.single = sched_cpu_dying,
21969cf7243dSThomas Gleixner },
21974df83742SThomas Gleixner [CPUHP_AP_RCUTREE_DYING] = {
2198677f6646SThomas Gleixner .name = "RCU/tree:dying",
21993c1627e9SThomas Gleixner .startup.single = NULL,
22003c1627e9SThomas Gleixner .teardown.single = rcutree_dying_cpu,
22014baa0afcSThomas Gleixner },
220246febd37SLai Jiangshan [CPUHP_AP_SMPCFD_DYING] = {
220346febd37SLai Jiangshan .name = "smpcfd:dying",
220446febd37SLai Jiangshan .startup.single = NULL,
220546febd37SLai Jiangshan .teardown.single = smpcfd_dying_cpu,
220646febd37SLai Jiangshan },
220753f408caSThomas Gleixner [CPUHP_AP_HRTIMERS_DYING] = {
220853f408caSThomas Gleixner .name = "hrtimers:dying",
2209*a5cbbea1SKoichiro Den .startup.single = hrtimers_cpu_starting,
221053f408caSThomas Gleixner .teardown.single = hrtimers_cpu_dying,
221153f408caSThomas Gleixner },
221253f408caSThomas Gleixner
2213d10ef6f9SThomas Gleixner /* Entry state on starting. Interrupts enabled from here on. Transient
2214d10ef6f9SThomas Gleixner * state for synchronsization */
2215d10ef6f9SThomas Gleixner [CPUHP_AP_ONLINE] = {
2216d10ef6f9SThomas Gleixner .name = "ap:online",
2217d10ef6f9SThomas Gleixner },
221817a2f1ceSLai Jiangshan /*
22191cf12e08SThomas Gleixner * Handled on control processor until the plugged processor manages
222017a2f1ceSLai Jiangshan * this itself.
222117a2f1ceSLai Jiangshan */
222217a2f1ceSLai Jiangshan [CPUHP_TEARDOWN_CPU] = {
222317a2f1ceSLai Jiangshan .name = "cpu:teardown",
222417a2f1ceSLai Jiangshan .startup.single = NULL,
222517a2f1ceSLai Jiangshan .teardown.single = takedown_cpu,
222617a2f1ceSLai Jiangshan .cant_stop = true,
222717a2f1ceSLai Jiangshan },
22281cf12e08SThomas Gleixner
22291cf12e08SThomas Gleixner [CPUHP_AP_SCHED_WAIT_EMPTY] = {
22301cf12e08SThomas Gleixner .name = "sched:waitempty",
22311cf12e08SThomas Gleixner .startup.single = NULL,
22321cf12e08SThomas Gleixner .teardown.single = sched_cpu_wait_empty,
22331cf12e08SThomas Gleixner },
22341cf12e08SThomas Gleixner
2235d10ef6f9SThomas Gleixner /* Handle smpboot threads park/unpark */
22361cf4f629SThomas Gleixner [CPUHP_AP_SMPBOOT_THREADS] = {
2237677f6646SThomas Gleixner .name = "smpboot/threads:online",
22383c1627e9SThomas Gleixner .startup.single = smpboot_unpark_threads,
2239c4de6569SThomas Gleixner .teardown.single = smpboot_park_threads,
22401cf4f629SThomas Gleixner },
2241c5cb83bbSThomas Gleixner [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
2242c5cb83bbSThomas Gleixner .name = "irq/affinity:online",
2243c5cb83bbSThomas Gleixner .startup.single = irq_affinity_online_cpu,
2244c5cb83bbSThomas Gleixner .teardown.single = NULL,
2245c5cb83bbSThomas Gleixner },
224600e16c3dSThomas Gleixner [CPUHP_AP_PERF_ONLINE] = {
22473c1627e9SThomas Gleixner .name = "perf:online",
22483c1627e9SThomas Gleixner .startup.single = perf_event_init_cpu,
22493c1627e9SThomas Gleixner .teardown.single = perf_event_exit_cpu,
225000e16c3dSThomas Gleixner },
22519cf57731SPeter Zijlstra [CPUHP_AP_WATCHDOG_ONLINE] = {
22529cf57731SPeter Zijlstra .name = "lockup_detector:online",
22539cf57731SPeter Zijlstra .startup.single = lockup_detector_online_cpu,
22549cf57731SPeter Zijlstra .teardown.single = lockup_detector_offline_cpu,
22559cf57731SPeter Zijlstra },
22567ee681b2SThomas Gleixner [CPUHP_AP_WORKQUEUE_ONLINE] = {
22573c1627e9SThomas Gleixner .name = "workqueue:online",
22583c1627e9SThomas Gleixner .startup.single = workqueue_online_cpu,
22593c1627e9SThomas Gleixner .teardown.single = workqueue_offline_cpu,
22607ee681b2SThomas Gleixner },
22613191dd5aSJason A. Donenfeld [CPUHP_AP_RANDOM_ONLINE] = {
22623191dd5aSJason A. Donenfeld .name = "random:online",
22633191dd5aSJason A. Donenfeld .startup.single = random_online_cpu,
22643191dd5aSJason A. Donenfeld .teardown.single = NULL,
22653191dd5aSJason A. Donenfeld },
22664df83742SThomas Gleixner [CPUHP_AP_RCUTREE_ONLINE] = {
2267677f6646SThomas Gleixner .name = "RCU/tree:online",
22683c1627e9SThomas Gleixner .startup.single = rcutree_online_cpu,
22693c1627e9SThomas Gleixner .teardown.single = rcutree_offline_cpu,
22704df83742SThomas Gleixner },
22714baa0afcSThomas Gleixner #endif
2272d10ef6f9SThomas Gleixner /*
2273d10ef6f9SThomas Gleixner * The dynamically registered state space is here
2274d10ef6f9SThomas Gleixner */
2275d10ef6f9SThomas Gleixner
2276aaddd7d1SThomas Gleixner #ifdef CONFIG_SMP
2277aaddd7d1SThomas Gleixner /* Last state is scheduler control setting the cpu active */
2278aaddd7d1SThomas Gleixner [CPUHP_AP_ACTIVE] = {
2279aaddd7d1SThomas Gleixner .name = "sched:active",
22803c1627e9SThomas Gleixner .startup.single = sched_cpu_activate,
22813c1627e9SThomas Gleixner .teardown.single = sched_cpu_deactivate,
2282aaddd7d1SThomas Gleixner },
2283aaddd7d1SThomas Gleixner #endif
2284aaddd7d1SThomas Gleixner
2285d10ef6f9SThomas Gleixner /* CPU is fully up and running. */
22864baa0afcSThomas Gleixner [CPUHP_ONLINE] = {
22874baa0afcSThomas Gleixner .name = "online",
22883c1627e9SThomas Gleixner .startup.single = NULL,
22893c1627e9SThomas Gleixner .teardown.single = NULL,
22904baa0afcSThomas Gleixner },
22914baa0afcSThomas Gleixner };
22924baa0afcSThomas Gleixner
22935b7aa87eSThomas Gleixner /* Sanity check for callbacks */
cpuhp_cb_check(enum cpuhp_state state)22945b7aa87eSThomas Gleixner static int cpuhp_cb_check(enum cpuhp_state state)
22955b7aa87eSThomas Gleixner {
22965b7aa87eSThomas Gleixner if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
22975b7aa87eSThomas Gleixner return -EINVAL;
22985b7aa87eSThomas Gleixner return 0;
22995b7aa87eSThomas Gleixner }
23005b7aa87eSThomas Gleixner
2301dc280d93SThomas Gleixner /*
2302dc280d93SThomas Gleixner * Returns a free for dynamic slot assignment of the Online state. The states
2303dc280d93SThomas Gleixner * are protected by the cpuhp_slot_states mutex and an empty slot is identified
2304dc280d93SThomas Gleixner * by having no name assigned.
2305dc280d93SThomas Gleixner */
cpuhp_reserve_state(enum cpuhp_state state)2306dc280d93SThomas Gleixner static int cpuhp_reserve_state(enum cpuhp_state state)
2307dc280d93SThomas Gleixner {
23084205e478SThomas Gleixner enum cpuhp_state i, end;
23094205e478SThomas Gleixner struct cpuhp_step *step;
2310dc280d93SThomas Gleixner
23114205e478SThomas Gleixner switch (state) {
23124205e478SThomas Gleixner case CPUHP_AP_ONLINE_DYN:
231317a2f1ceSLai Jiangshan step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
23144205e478SThomas Gleixner end = CPUHP_AP_ONLINE_DYN_END;
23154205e478SThomas Gleixner break;
23164205e478SThomas Gleixner case CPUHP_BP_PREPARE_DYN:
231717a2f1ceSLai Jiangshan step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
23184205e478SThomas Gleixner end = CPUHP_BP_PREPARE_DYN_END;
23194205e478SThomas Gleixner break;
23204205e478SThomas Gleixner default:
23214205e478SThomas Gleixner return -EINVAL;
23224205e478SThomas Gleixner }
23234205e478SThomas Gleixner
23244205e478SThomas Gleixner for (i = state; i <= end; i++, step++) {
23254205e478SThomas Gleixner if (!step->name)
2326dc280d93SThomas Gleixner return i;
2327dc280d93SThomas Gleixner }
2328dc280d93SThomas Gleixner WARN(1, "No more dynamic states available for CPU hotplug\n");
2329dc280d93SThomas Gleixner return -ENOSPC;
2330dc280d93SThomas Gleixner }
2331dc280d93SThomas Gleixner
cpuhp_store_callbacks(enum cpuhp_state state,const char * name,int (* startup)(unsigned int cpu),int (* teardown)(unsigned int cpu),bool multi_instance)2332dc280d93SThomas Gleixner static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
23335b7aa87eSThomas Gleixner int (*startup)(unsigned int cpu),
2334cf392d10SThomas Gleixner int (*teardown)(unsigned int cpu),
2335cf392d10SThomas Gleixner bool multi_instance)
23365b7aa87eSThomas Gleixner {
23375b7aa87eSThomas Gleixner /* (Un)Install the callbacks for further cpu hotplug operations */
23385b7aa87eSThomas Gleixner struct cpuhp_step *sp;
2339dc280d93SThomas Gleixner int ret = 0;
23405b7aa87eSThomas Gleixner
23410c96b273SEthan Barnes /*
23420c96b273SEthan Barnes * If name is NULL, then the state gets removed.
23430c96b273SEthan Barnes *
23440c96b273SEthan Barnes * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
23450c96b273SEthan Barnes * the first allocation from these dynamic ranges, so the removal
23460c96b273SEthan Barnes * would trigger a new allocation and clear the wrong (already
23470c96b273SEthan Barnes * empty) state, leaving the callbacks of the to be cleared state
23480c96b273SEthan Barnes * dangling, which causes wreckage on the next hotplug operation.
23490c96b273SEthan Barnes */
23500c96b273SEthan Barnes if (name && (state == CPUHP_AP_ONLINE_DYN ||
23510c96b273SEthan Barnes state == CPUHP_BP_PREPARE_DYN)) {
2352dc280d93SThomas Gleixner ret = cpuhp_reserve_state(state);
2353dc280d93SThomas Gleixner if (ret < 0)
2354dc434e05SSebastian Andrzej Siewior return ret;
2355dc280d93SThomas Gleixner state = ret;
2356dc280d93SThomas Gleixner }
23575b7aa87eSThomas Gleixner sp = cpuhp_get_step(state);
2358dc434e05SSebastian Andrzej Siewior if (name && sp->name)
2359dc434e05SSebastian Andrzej Siewior return -EBUSY;
2360dc434e05SSebastian Andrzej Siewior
23613c1627e9SThomas Gleixner sp->startup.single = startup;
23623c1627e9SThomas Gleixner sp->teardown.single = teardown;
23635b7aa87eSThomas Gleixner sp->name = name;
2364cf392d10SThomas Gleixner sp->multi_instance = multi_instance;
2365cf392d10SThomas Gleixner INIT_HLIST_HEAD(&sp->list);
2366dc280d93SThomas Gleixner return ret;
23675b7aa87eSThomas Gleixner }
23685b7aa87eSThomas Gleixner
cpuhp_get_teardown_cb(enum cpuhp_state state)23695b7aa87eSThomas Gleixner static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
23705b7aa87eSThomas Gleixner {
23713c1627e9SThomas Gleixner return cpuhp_get_step(state)->teardown.single;
23725b7aa87eSThomas Gleixner }
23735b7aa87eSThomas Gleixner
23745b7aa87eSThomas Gleixner /*
23755b7aa87eSThomas Gleixner * Call the startup/teardown function for a step either on the AP or
23765b7aa87eSThomas Gleixner * on the current CPU.
23775b7aa87eSThomas Gleixner */
cpuhp_issue_call(int cpu,enum cpuhp_state state,bool bringup,struct hlist_node * node)2378cf392d10SThomas Gleixner static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
2379cf392d10SThomas Gleixner struct hlist_node *node)
23805b7aa87eSThomas Gleixner {
2381a724632cSThomas Gleixner struct cpuhp_step *sp = cpuhp_get_step(state);
23825b7aa87eSThomas Gleixner int ret;
23835b7aa87eSThomas Gleixner
23844dddfb5fSPeter Zijlstra /*
23854dddfb5fSPeter Zijlstra * If there's nothing to do, we done.
23864dddfb5fSPeter Zijlstra * Relies on the union for multi_instance.
23874dddfb5fSPeter Zijlstra */
2388453e4108SVincent Donnefort if (cpuhp_step_empty(bringup, sp))
23895b7aa87eSThomas Gleixner return 0;
23905b7aa87eSThomas Gleixner /*
23915b7aa87eSThomas Gleixner * The non AP bound callbacks can fail on bringup. On teardown
23925b7aa87eSThomas Gleixner * e.g. module removal we crash for now.
23935b7aa87eSThomas Gleixner */
23941cf4f629SThomas Gleixner #ifdef CONFIG_SMP
23951cf4f629SThomas Gleixner if (cpuhp_is_ap_state(state))
2396cf392d10SThomas Gleixner ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
23971cf4f629SThomas Gleixner else
239896abb968SPeter Zijlstra ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
23991cf4f629SThomas Gleixner #else
240096abb968SPeter Zijlstra ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
24011cf4f629SThomas Gleixner #endif
24025b7aa87eSThomas Gleixner BUG_ON(ret && !bringup);
24035b7aa87eSThomas Gleixner return ret;
24045b7aa87eSThomas Gleixner }
24055b7aa87eSThomas Gleixner
24065b7aa87eSThomas Gleixner /*
24075b7aa87eSThomas Gleixner * Called from __cpuhp_setup_state on a recoverable failure.
24085b7aa87eSThomas Gleixner *
24095b7aa87eSThomas Gleixner * Note: The teardown callbacks for rollback are not allowed to fail!
24105b7aa87eSThomas Gleixner */
cpuhp_rollback_install(int failedcpu,enum cpuhp_state state,struct hlist_node * node)24115b7aa87eSThomas Gleixner static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
2412cf392d10SThomas Gleixner struct hlist_node *node)
24135b7aa87eSThomas Gleixner {
24145b7aa87eSThomas Gleixner int cpu;
24155b7aa87eSThomas Gleixner
24165b7aa87eSThomas Gleixner /* Roll back the already executed steps on the other cpus */
24175b7aa87eSThomas Gleixner for_each_present_cpu(cpu) {
24185b7aa87eSThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
24195b7aa87eSThomas Gleixner int cpustate = st->state;
24205b7aa87eSThomas Gleixner
24215b7aa87eSThomas Gleixner if (cpu >= failedcpu)
24225b7aa87eSThomas Gleixner break;
24235b7aa87eSThomas Gleixner
24245b7aa87eSThomas Gleixner /* Did we invoke the startup call on that cpu ? */
24255b7aa87eSThomas Gleixner if (cpustate >= state)
2426cf392d10SThomas Gleixner cpuhp_issue_call(cpu, state, false, node);
24275b7aa87eSThomas Gleixner }
24285b7aa87eSThomas Gleixner }
24295b7aa87eSThomas Gleixner
__cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,struct hlist_node * node,bool invoke)24309805c673SThomas Gleixner int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
24319805c673SThomas Gleixner struct hlist_node *node,
2432cf392d10SThomas Gleixner bool invoke)
2433cf392d10SThomas Gleixner {
2434cf392d10SThomas Gleixner struct cpuhp_step *sp;
2435cf392d10SThomas Gleixner int cpu;
2436cf392d10SThomas Gleixner int ret;
2437cf392d10SThomas Gleixner
24389805c673SThomas Gleixner lockdep_assert_cpus_held();
24399805c673SThomas Gleixner
2440cf392d10SThomas Gleixner sp = cpuhp_get_step(state);
2441cf392d10SThomas Gleixner if (sp->multi_instance == false)
2442cf392d10SThomas Gleixner return -EINVAL;
2443cf392d10SThomas Gleixner
2444dc434e05SSebastian Andrzej Siewior mutex_lock(&cpuhp_state_mutex);
2445cf392d10SThomas Gleixner
24463c1627e9SThomas Gleixner if (!invoke || !sp->startup.multi)
2447cf392d10SThomas Gleixner goto add_node;
2448cf392d10SThomas Gleixner
2449cf392d10SThomas Gleixner /*
2450cf392d10SThomas Gleixner * Try to call the startup callback for each present cpu
2451cf392d10SThomas Gleixner * depending on the hotplug state of the cpu.
2452cf392d10SThomas Gleixner */
2453cf392d10SThomas Gleixner for_each_present_cpu(cpu) {
2454cf392d10SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2455cf392d10SThomas Gleixner int cpustate = st->state;
2456cf392d10SThomas Gleixner
2457cf392d10SThomas Gleixner if (cpustate < state)
2458cf392d10SThomas Gleixner continue;
2459cf392d10SThomas Gleixner
2460cf392d10SThomas Gleixner ret = cpuhp_issue_call(cpu, state, true, node);
2461cf392d10SThomas Gleixner if (ret) {
24623c1627e9SThomas Gleixner if (sp->teardown.multi)
2463cf392d10SThomas Gleixner cpuhp_rollback_install(cpu, state, node);
2464dc434e05SSebastian Andrzej Siewior goto unlock;
2465cf392d10SThomas Gleixner }
2466cf392d10SThomas Gleixner }
2467cf392d10SThomas Gleixner add_node:
2468cf392d10SThomas Gleixner ret = 0;
2469cf392d10SThomas Gleixner hlist_add_head(node, &sp->list);
2470dc434e05SSebastian Andrzej Siewior unlock:
2471cf392d10SThomas Gleixner mutex_unlock(&cpuhp_state_mutex);
24729805c673SThomas Gleixner return ret;
24739805c673SThomas Gleixner }
24749805c673SThomas Gleixner
__cpuhp_state_add_instance(enum cpuhp_state state,struct hlist_node * node,bool invoke)24759805c673SThomas Gleixner int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
24769805c673SThomas Gleixner bool invoke)
24779805c673SThomas Gleixner {
24789805c673SThomas Gleixner int ret;
24799805c673SThomas Gleixner
24809805c673SThomas Gleixner cpus_read_lock();
24819805c673SThomas Gleixner ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
24828f553c49SThomas Gleixner cpus_read_unlock();
2483cf392d10SThomas Gleixner return ret;
2484cf392d10SThomas Gleixner }
2485cf392d10SThomas Gleixner EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
2486cf392d10SThomas Gleixner
24875b7aa87eSThomas Gleixner /**
248871def423SSebastian Andrzej Siewior * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
24895b7aa87eSThomas Gleixner * @state: The state to setup
2490ed3cd1daSBaokun Li * @name: Name of the step
24915b7aa87eSThomas Gleixner * @invoke: If true, the startup function is invoked for cpus where
24925b7aa87eSThomas Gleixner * cpu state >= @state
24935b7aa87eSThomas Gleixner * @startup: startup callback function
24945b7aa87eSThomas Gleixner * @teardown: teardown callback function
2495dc280d93SThomas Gleixner * @multi_instance: State is set up for multiple instances which get
2496dc280d93SThomas Gleixner * added afterwards.
24975b7aa87eSThomas Gleixner *
249871def423SSebastian Andrzej Siewior * The caller needs to hold cpus read locked while calling this function.
249911bc021dSRandy Dunlap * Return:
2500512f0980SBoris Ostrovsky * On success:
250152bbae42SYuntao Wang * Positive state number if @state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN;
2502512f0980SBoris Ostrovsky * 0 for all other states
2503512f0980SBoris Ostrovsky * On failure: proper (negative) error code
25045b7aa87eSThomas Gleixner */
__cpuhp_setup_state_cpuslocked(enum cpuhp_state state,const char * name,bool invoke,int (* startup)(unsigned int cpu),int (* teardown)(unsigned int cpu),bool multi_instance)250571def423SSebastian Andrzej Siewior int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
25065b7aa87eSThomas Gleixner const char *name, bool invoke,
25075b7aa87eSThomas Gleixner int (*startup)(unsigned int cpu),
2508cf392d10SThomas Gleixner int (*teardown)(unsigned int cpu),
2509cf392d10SThomas Gleixner bool multi_instance)
25105b7aa87eSThomas Gleixner {
25115b7aa87eSThomas Gleixner int cpu, ret = 0;
2512b9d9d691SThomas Gleixner bool dynstate;
25135b7aa87eSThomas Gleixner
251471def423SSebastian Andrzej Siewior lockdep_assert_cpus_held();
251571def423SSebastian Andrzej Siewior
25165b7aa87eSThomas Gleixner if (cpuhp_cb_check(state) || !name)
25175b7aa87eSThomas Gleixner return -EINVAL;
25185b7aa87eSThomas Gleixner
2519dc434e05SSebastian Andrzej Siewior mutex_lock(&cpuhp_state_mutex);
25205b7aa87eSThomas Gleixner
2521dc280d93SThomas Gleixner ret = cpuhp_store_callbacks(state, name, startup, teardown,
2522dc280d93SThomas Gleixner multi_instance);
25235b7aa87eSThomas Gleixner
252452bbae42SYuntao Wang dynstate = state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN;
2525b9d9d691SThomas Gleixner if (ret > 0 && dynstate) {
2526b9d9d691SThomas Gleixner state = ret;
2527b9d9d691SThomas Gleixner ret = 0;
2528b9d9d691SThomas Gleixner }
2529b9d9d691SThomas Gleixner
2530dc280d93SThomas Gleixner if (ret || !invoke || !startup)
25315b7aa87eSThomas Gleixner goto out;
25325b7aa87eSThomas Gleixner
25335b7aa87eSThomas Gleixner /*
25345b7aa87eSThomas Gleixner * Try to call the startup callback for each present cpu
25355b7aa87eSThomas Gleixner * depending on the hotplug state of the cpu.
25365b7aa87eSThomas Gleixner */
25375b7aa87eSThomas Gleixner for_each_present_cpu(cpu) {
25385b7aa87eSThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
25395b7aa87eSThomas Gleixner int cpustate = st->state;
25405b7aa87eSThomas Gleixner
25415b7aa87eSThomas Gleixner if (cpustate < state)
25425b7aa87eSThomas Gleixner continue;
25435b7aa87eSThomas Gleixner
2544cf392d10SThomas Gleixner ret = cpuhp_issue_call(cpu, state, true, NULL);
25455b7aa87eSThomas Gleixner if (ret) {
2546a724632cSThomas Gleixner if (teardown)
2547cf392d10SThomas Gleixner cpuhp_rollback_install(cpu, state, NULL);
2548cf392d10SThomas Gleixner cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
25495b7aa87eSThomas Gleixner goto out;
25505b7aa87eSThomas Gleixner }
25515b7aa87eSThomas Gleixner }
25525b7aa87eSThomas Gleixner out:
2553dc434e05SSebastian Andrzej Siewior mutex_unlock(&cpuhp_state_mutex);
2554dc280d93SThomas Gleixner /*
255552bbae42SYuntao Wang * If the requested state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN,
255652bbae42SYuntao Wang * return the dynamically allocated state in case of success.
2557dc280d93SThomas Gleixner */
2558b9d9d691SThomas Gleixner if (!ret && dynstate)
25595b7aa87eSThomas Gleixner return state;
25605b7aa87eSThomas Gleixner return ret;
25615b7aa87eSThomas Gleixner }
256271def423SSebastian Andrzej Siewior EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
256371def423SSebastian Andrzej Siewior
__cpuhp_setup_state(enum cpuhp_state state,const char * name,bool invoke,int (* startup)(unsigned int cpu),int (* teardown)(unsigned int cpu),bool multi_instance)256471def423SSebastian Andrzej Siewior int __cpuhp_setup_state(enum cpuhp_state state,
256571def423SSebastian Andrzej Siewior const char *name, bool invoke,
256671def423SSebastian Andrzej Siewior int (*startup)(unsigned int cpu),
256771def423SSebastian Andrzej Siewior int (*teardown)(unsigned int cpu),
256871def423SSebastian Andrzej Siewior bool multi_instance)
256971def423SSebastian Andrzej Siewior {
257071def423SSebastian Andrzej Siewior int ret;
257171def423SSebastian Andrzej Siewior
257271def423SSebastian Andrzej Siewior cpus_read_lock();
257371def423SSebastian Andrzej Siewior ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
257471def423SSebastian Andrzej Siewior teardown, multi_instance);
257571def423SSebastian Andrzej Siewior cpus_read_unlock();
257671def423SSebastian Andrzej Siewior return ret;
257771def423SSebastian Andrzej Siewior }
25785b7aa87eSThomas Gleixner EXPORT_SYMBOL(__cpuhp_setup_state);
25795b7aa87eSThomas Gleixner
__cpuhp_state_remove_instance(enum cpuhp_state state,struct hlist_node * node,bool invoke)2580cf392d10SThomas Gleixner int __cpuhp_state_remove_instance(enum cpuhp_state state,
2581cf392d10SThomas Gleixner struct hlist_node *node, bool invoke)
2582cf392d10SThomas Gleixner {
2583cf392d10SThomas Gleixner struct cpuhp_step *sp = cpuhp_get_step(state);
2584cf392d10SThomas Gleixner int cpu;
2585cf392d10SThomas Gleixner
2586cf392d10SThomas Gleixner BUG_ON(cpuhp_cb_check(state));
2587cf392d10SThomas Gleixner
2588cf392d10SThomas Gleixner if (!sp->multi_instance)
2589cf392d10SThomas Gleixner return -EINVAL;
2590cf392d10SThomas Gleixner
25918f553c49SThomas Gleixner cpus_read_lock();
2592dc434e05SSebastian Andrzej Siewior mutex_lock(&cpuhp_state_mutex);
2593dc434e05SSebastian Andrzej Siewior
2594cf392d10SThomas Gleixner if (!invoke || !cpuhp_get_teardown_cb(state))
2595cf392d10SThomas Gleixner goto remove;
2596cf392d10SThomas Gleixner /*
2597cf392d10SThomas Gleixner * Call the teardown callback for each present cpu depending
2598cf392d10SThomas Gleixner * on the hotplug state of the cpu. This function is not
2599cf392d10SThomas Gleixner * allowed to fail currently!
2600cf392d10SThomas Gleixner */
2601cf392d10SThomas Gleixner for_each_present_cpu(cpu) {
2602cf392d10SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2603cf392d10SThomas Gleixner int cpustate = st->state;
2604cf392d10SThomas Gleixner
2605cf392d10SThomas Gleixner if (cpustate >= state)
2606cf392d10SThomas Gleixner cpuhp_issue_call(cpu, state, false, node);
2607cf392d10SThomas Gleixner }
2608cf392d10SThomas Gleixner
2609cf392d10SThomas Gleixner remove:
2610cf392d10SThomas Gleixner hlist_del(node);
2611cf392d10SThomas Gleixner mutex_unlock(&cpuhp_state_mutex);
26128f553c49SThomas Gleixner cpus_read_unlock();
2613cf392d10SThomas Gleixner
2614cf392d10SThomas Gleixner return 0;
2615cf392d10SThomas Gleixner }
2616cf392d10SThomas Gleixner EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
2617dc434e05SSebastian Andrzej Siewior
26185b7aa87eSThomas Gleixner /**
261971def423SSebastian Andrzej Siewior * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
26205b7aa87eSThomas Gleixner * @state: The state to remove
26215b7aa87eSThomas Gleixner * @invoke: If true, the teardown function is invoked for cpus where
26225b7aa87eSThomas Gleixner * cpu state >= @state
26235b7aa87eSThomas Gleixner *
262471def423SSebastian Andrzej Siewior * The caller needs to hold cpus read locked while calling this function.
26255b7aa87eSThomas Gleixner * The teardown callback is currently not allowed to fail. Think
26265b7aa87eSThomas Gleixner * about module removal!
26275b7aa87eSThomas Gleixner */
__cpuhp_remove_state_cpuslocked(enum cpuhp_state state,bool invoke)262871def423SSebastian Andrzej Siewior void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
26295b7aa87eSThomas Gleixner {
2630cf392d10SThomas Gleixner struct cpuhp_step *sp = cpuhp_get_step(state);
26315b7aa87eSThomas Gleixner int cpu;
26325b7aa87eSThomas Gleixner
26335b7aa87eSThomas Gleixner BUG_ON(cpuhp_cb_check(state));
26345b7aa87eSThomas Gleixner
263571def423SSebastian Andrzej Siewior lockdep_assert_cpus_held();
26365b7aa87eSThomas Gleixner
2637dc434e05SSebastian Andrzej Siewior mutex_lock(&cpuhp_state_mutex);
2638cf392d10SThomas Gleixner if (sp->multi_instance) {
2639cf392d10SThomas Gleixner WARN(!hlist_empty(&sp->list),
2640cf392d10SThomas Gleixner "Error: Removing state %d which has instances left.\n",
2641cf392d10SThomas Gleixner state);
2642cf392d10SThomas Gleixner goto remove;
2643cf392d10SThomas Gleixner }
2644cf392d10SThomas Gleixner
2645a724632cSThomas Gleixner if (!invoke || !cpuhp_get_teardown_cb(state))
26465b7aa87eSThomas Gleixner goto remove;
26475b7aa87eSThomas Gleixner
26485b7aa87eSThomas Gleixner /*
26495b7aa87eSThomas Gleixner * Call the teardown callback for each present cpu depending
26505b7aa87eSThomas Gleixner * on the hotplug state of the cpu. This function is not
26515b7aa87eSThomas Gleixner * allowed to fail currently!
26525b7aa87eSThomas Gleixner */
26535b7aa87eSThomas Gleixner for_each_present_cpu(cpu) {
26545b7aa87eSThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
26555b7aa87eSThomas Gleixner int cpustate = st->state;
26565b7aa87eSThomas Gleixner
26575b7aa87eSThomas Gleixner if (cpustate >= state)
2658cf392d10SThomas Gleixner cpuhp_issue_call(cpu, state, false, NULL);
26595b7aa87eSThomas Gleixner }
26605b7aa87eSThomas Gleixner remove:
2661cf392d10SThomas Gleixner cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2662dc434e05SSebastian Andrzej Siewior mutex_unlock(&cpuhp_state_mutex);
266371def423SSebastian Andrzej Siewior }
266471def423SSebastian Andrzej Siewior EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
266571def423SSebastian Andrzej Siewior
__cpuhp_remove_state(enum cpuhp_state state,bool invoke)266671def423SSebastian Andrzej Siewior void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
266771def423SSebastian Andrzej Siewior {
266871def423SSebastian Andrzej Siewior cpus_read_lock();
266971def423SSebastian Andrzej Siewior __cpuhp_remove_state_cpuslocked(state, invoke);
26708f553c49SThomas Gleixner cpus_read_unlock();
26715b7aa87eSThomas Gleixner }
26725b7aa87eSThomas Gleixner EXPORT_SYMBOL(__cpuhp_remove_state);
26735b7aa87eSThomas Gleixner
2674dc8d37edSArnd Bergmann #ifdef CONFIG_HOTPLUG_SMT
cpuhp_offline_cpu_device(unsigned int cpu)2675dc8d37edSArnd Bergmann static void cpuhp_offline_cpu_device(unsigned int cpu)
2676dc8d37edSArnd Bergmann {
2677dc8d37edSArnd Bergmann struct device *dev = get_cpu_device(cpu);
2678dc8d37edSArnd Bergmann
2679dc8d37edSArnd Bergmann dev->offline = true;
2680dc8d37edSArnd Bergmann /* Tell user space about the state change */
2681dc8d37edSArnd Bergmann kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2682dc8d37edSArnd Bergmann }
2683dc8d37edSArnd Bergmann
cpuhp_online_cpu_device(unsigned int cpu)2684dc8d37edSArnd Bergmann static void cpuhp_online_cpu_device(unsigned int cpu)
2685dc8d37edSArnd Bergmann {
2686dc8d37edSArnd Bergmann struct device *dev = get_cpu_device(cpu);
2687dc8d37edSArnd Bergmann
2688dc8d37edSArnd Bergmann dev->offline = false;
2689dc8d37edSArnd Bergmann /* Tell user space about the state change */
2690dc8d37edSArnd Bergmann kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2691dc8d37edSArnd Bergmann }
2692dc8d37edSArnd Bergmann
cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)2693dc8d37edSArnd Bergmann int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2694dc8d37edSArnd Bergmann {
2695dc8d37edSArnd Bergmann int cpu, ret = 0;
2696dc8d37edSArnd Bergmann
2697dc8d37edSArnd Bergmann cpu_maps_update_begin();
2698dc8d37edSArnd Bergmann for_each_online_cpu(cpu) {
2699dc8d37edSArnd Bergmann if (topology_is_primary_thread(cpu))
2700dc8d37edSArnd Bergmann continue;
270138253464SMichael Ellerman /*
270238253464SMichael Ellerman * Disable can be called with CPU_SMT_ENABLED when changing
270338253464SMichael Ellerman * from a higher to lower number of SMT threads per core.
270438253464SMichael Ellerman */
270538253464SMichael Ellerman if (ctrlval == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
270638253464SMichael Ellerman continue;
2707dc8d37edSArnd Bergmann ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2708dc8d37edSArnd Bergmann if (ret)
2709dc8d37edSArnd Bergmann break;
2710dc8d37edSArnd Bergmann /*
2711dc8d37edSArnd Bergmann * As this needs to hold the cpu maps lock it's impossible
2712dc8d37edSArnd Bergmann * to call device_offline() because that ends up calling
2713dc8d37edSArnd Bergmann * cpu_down() which takes cpu maps lock. cpu maps lock
2714dc8d37edSArnd Bergmann * needs to be held as this might race against in kernel
2715dc8d37edSArnd Bergmann * abusers of the hotplug machinery (thermal management).
2716dc8d37edSArnd Bergmann *
2717dc8d37edSArnd Bergmann * So nothing would update device:offline state. That would
2718dc8d37edSArnd Bergmann * leave the sysfs entry stale and prevent onlining after
2719dc8d37edSArnd Bergmann * smt control has been changed to 'off' again. This is
2720dc8d37edSArnd Bergmann * called under the sysfs hotplug lock, so it is properly
2721dc8d37edSArnd Bergmann * serialized against the regular offline usage.
2722dc8d37edSArnd Bergmann */
2723dc8d37edSArnd Bergmann cpuhp_offline_cpu_device(cpu);
2724dc8d37edSArnd Bergmann }
2725dc8d37edSArnd Bergmann if (!ret)
2726dc8d37edSArnd Bergmann cpu_smt_control = ctrlval;
2727dc8d37edSArnd Bergmann cpu_maps_update_done();
2728dc8d37edSArnd Bergmann return ret;
2729dc8d37edSArnd Bergmann }
2730dc8d37edSArnd Bergmann
2731f17c3a37SNysal Jan K.A /**
2732f17c3a37SNysal Jan K.A * Check if the core a CPU belongs to is online
2733f17c3a37SNysal Jan K.A */
2734f17c3a37SNysal Jan K.A #if !defined(topology_is_core_online)
topology_is_core_online(unsigned int cpu)2735f17c3a37SNysal Jan K.A static inline bool topology_is_core_online(unsigned int cpu)
2736f17c3a37SNysal Jan K.A {
2737f17c3a37SNysal Jan K.A return true;
2738f17c3a37SNysal Jan K.A }
2739f17c3a37SNysal Jan K.A #endif
2740f17c3a37SNysal Jan K.A
cpuhp_smt_enable(void)2741dc8d37edSArnd Bergmann int cpuhp_smt_enable(void)
2742dc8d37edSArnd Bergmann {
2743dc8d37edSArnd Bergmann int cpu, ret = 0;
2744dc8d37edSArnd Bergmann
2745dc8d37edSArnd Bergmann cpu_maps_update_begin();
2746dc8d37edSArnd Bergmann cpu_smt_control = CPU_SMT_ENABLED;
2747dc8d37edSArnd Bergmann for_each_present_cpu(cpu) {
2748dc8d37edSArnd Bergmann /* Skip online CPUs and CPUs on offline nodes */
2749dc8d37edSArnd Bergmann if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2750dc8d37edSArnd Bergmann continue;
2751f17c3a37SNysal Jan K.A if (!cpu_smt_thread_allowed(cpu) || !topology_is_core_online(cpu))
275238253464SMichael Ellerman continue;
2753dc8d37edSArnd Bergmann ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2754dc8d37edSArnd Bergmann if (ret)
2755dc8d37edSArnd Bergmann break;
2756dc8d37edSArnd Bergmann /* See comment in cpuhp_smt_disable() */
2757dc8d37edSArnd Bergmann cpuhp_online_cpu_device(cpu);
2758dc8d37edSArnd Bergmann }
2759dc8d37edSArnd Bergmann cpu_maps_update_done();
2760dc8d37edSArnd Bergmann return ret;
2761dc8d37edSArnd Bergmann }
2762dc8d37edSArnd Bergmann #endif
2763dc8d37edSArnd Bergmann
276498f8cdceSThomas Gleixner #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
state_show(struct device * dev,struct device_attribute * attr,char * buf)27651782dc87SYueHaibing static ssize_t state_show(struct device *dev,
276698f8cdceSThomas Gleixner struct device_attribute *attr, char *buf)
276798f8cdceSThomas Gleixner {
276898f8cdceSThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
276998f8cdceSThomas Gleixner
277098f8cdceSThomas Gleixner return sprintf(buf, "%d\n", st->state);
277198f8cdceSThomas Gleixner }
27721782dc87SYueHaibing static DEVICE_ATTR_RO(state);
277398f8cdceSThomas Gleixner
target_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)27741782dc87SYueHaibing static ssize_t target_store(struct device *dev, struct device_attribute *attr,
2775757c989bSThomas Gleixner const char *buf, size_t count)
2776757c989bSThomas Gleixner {
2777757c989bSThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2778757c989bSThomas Gleixner struct cpuhp_step *sp;
2779757c989bSThomas Gleixner int target, ret;
2780757c989bSThomas Gleixner
2781757c989bSThomas Gleixner ret = kstrtoint(buf, 10, &target);
2782757c989bSThomas Gleixner if (ret)
2783757c989bSThomas Gleixner return ret;
2784757c989bSThomas Gleixner
2785757c989bSThomas Gleixner #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2786757c989bSThomas Gleixner if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2787757c989bSThomas Gleixner return -EINVAL;
2788757c989bSThomas Gleixner #else
2789757c989bSThomas Gleixner if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2790757c989bSThomas Gleixner return -EINVAL;
2791757c989bSThomas Gleixner #endif
2792757c989bSThomas Gleixner
2793757c989bSThomas Gleixner ret = lock_device_hotplug_sysfs();
2794757c989bSThomas Gleixner if (ret)
2795757c989bSThomas Gleixner return ret;
2796757c989bSThomas Gleixner
2797757c989bSThomas Gleixner mutex_lock(&cpuhp_state_mutex);
2798757c989bSThomas Gleixner sp = cpuhp_get_step(target);
2799757c989bSThomas Gleixner ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2800757c989bSThomas Gleixner mutex_unlock(&cpuhp_state_mutex);
2801757c989bSThomas Gleixner if (ret)
280240da1b11SSebastian Andrzej Siewior goto out;
2803757c989bSThomas Gleixner
2804757c989bSThomas Gleixner if (st->state < target)
280533c3736eSQais Yousef ret = cpu_up(dev->id, target);
280664ea6e44SPhil Auld else if (st->state > target)
280733c3736eSQais Yousef ret = cpu_down(dev->id, target);
280864ea6e44SPhil Auld else if (WARN_ON(st->target != target))
280964ea6e44SPhil Auld st->target = target;
281040da1b11SSebastian Andrzej Siewior out:
2811757c989bSThomas Gleixner unlock_device_hotplug();
2812757c989bSThomas Gleixner return ret ? ret : count;
2813757c989bSThomas Gleixner }
2814757c989bSThomas Gleixner
target_show(struct device * dev,struct device_attribute * attr,char * buf)28151782dc87SYueHaibing static ssize_t target_show(struct device *dev,
281698f8cdceSThomas Gleixner struct device_attribute *attr, char *buf)
281798f8cdceSThomas Gleixner {
281898f8cdceSThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
281998f8cdceSThomas Gleixner
282098f8cdceSThomas Gleixner return sprintf(buf, "%d\n", st->target);
282198f8cdceSThomas Gleixner }
28221782dc87SYueHaibing static DEVICE_ATTR_RW(target);
282398f8cdceSThomas Gleixner
fail_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)28241782dc87SYueHaibing static ssize_t fail_store(struct device *dev, struct device_attribute *attr,
28251db49484SPeter Zijlstra const char *buf, size_t count)
28261db49484SPeter Zijlstra {
28271db49484SPeter Zijlstra struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
28281db49484SPeter Zijlstra struct cpuhp_step *sp;
28291db49484SPeter Zijlstra int fail, ret;
28301db49484SPeter Zijlstra
28311db49484SPeter Zijlstra ret = kstrtoint(buf, 10, &fail);
28321db49484SPeter Zijlstra if (ret)
28331db49484SPeter Zijlstra return ret;
28341db49484SPeter Zijlstra
28353ae70c25SVincent Donnefort if (fail == CPUHP_INVALID) {
28363ae70c25SVincent Donnefort st->fail = fail;
28373ae70c25SVincent Donnefort return count;
28383ae70c25SVincent Donnefort }
28393ae70c25SVincent Donnefort
284033d4a5a7SEiichi Tsukata if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
284133d4a5a7SEiichi Tsukata return -EINVAL;
284233d4a5a7SEiichi Tsukata
28431db49484SPeter Zijlstra /*
28441db49484SPeter Zijlstra * Cannot fail STARTING/DYING callbacks.
28451db49484SPeter Zijlstra */
28461db49484SPeter Zijlstra if (cpuhp_is_atomic_state(fail))
28471db49484SPeter Zijlstra return -EINVAL;
28481db49484SPeter Zijlstra
28491db49484SPeter Zijlstra /*
285062f25069SVincent Donnefort * DEAD callbacks cannot fail...
285162f25069SVincent Donnefort * ... neither can CPUHP_BRINGUP_CPU during hotunplug. The latter
285262f25069SVincent Donnefort * triggering STARTING callbacks, a failure in this state would
285362f25069SVincent Donnefort * hinder rollback.
285462f25069SVincent Donnefort */
285562f25069SVincent Donnefort if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU)
285662f25069SVincent Donnefort return -EINVAL;
285762f25069SVincent Donnefort
285862f25069SVincent Donnefort /*
28591db49484SPeter Zijlstra * Cannot fail anything that doesn't have callbacks.
28601db49484SPeter Zijlstra */
28611db49484SPeter Zijlstra mutex_lock(&cpuhp_state_mutex);
28621db49484SPeter Zijlstra sp = cpuhp_get_step(fail);
28631db49484SPeter Zijlstra if (!sp->startup.single && !sp->teardown.single)
28641db49484SPeter Zijlstra ret = -EINVAL;
28651db49484SPeter Zijlstra mutex_unlock(&cpuhp_state_mutex);
28661db49484SPeter Zijlstra if (ret)
28671db49484SPeter Zijlstra return ret;
28681db49484SPeter Zijlstra
28691db49484SPeter Zijlstra st->fail = fail;
28701db49484SPeter Zijlstra
28711db49484SPeter Zijlstra return count;
28721db49484SPeter Zijlstra }
28731db49484SPeter Zijlstra
fail_show(struct device * dev,struct device_attribute * attr,char * buf)28741782dc87SYueHaibing static ssize_t fail_show(struct device *dev,
28751db49484SPeter Zijlstra struct device_attribute *attr, char *buf)
28761db49484SPeter Zijlstra {
28771db49484SPeter Zijlstra struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
28781db49484SPeter Zijlstra
28791db49484SPeter Zijlstra return sprintf(buf, "%d\n", st->fail);
28801db49484SPeter Zijlstra }
28811db49484SPeter Zijlstra
28821782dc87SYueHaibing static DEVICE_ATTR_RW(fail);
28831db49484SPeter Zijlstra
288498f8cdceSThomas Gleixner static struct attribute *cpuhp_cpu_attrs[] = {
288598f8cdceSThomas Gleixner &dev_attr_state.attr,
288698f8cdceSThomas Gleixner &dev_attr_target.attr,
28871db49484SPeter Zijlstra &dev_attr_fail.attr,
288898f8cdceSThomas Gleixner NULL
288998f8cdceSThomas Gleixner };
289098f8cdceSThomas Gleixner
2891993647a2SArvind Yadav static const struct attribute_group cpuhp_cpu_attr_group = {
289298f8cdceSThomas Gleixner .attrs = cpuhp_cpu_attrs,
289398f8cdceSThomas Gleixner .name = "hotplug",
289498f8cdceSThomas Gleixner NULL
289598f8cdceSThomas Gleixner };
289698f8cdceSThomas Gleixner
states_show(struct device * dev,struct device_attribute * attr,char * buf)28971782dc87SYueHaibing static ssize_t states_show(struct device *dev,
289898f8cdceSThomas Gleixner struct device_attribute *attr, char *buf)
289998f8cdceSThomas Gleixner {
290098f8cdceSThomas Gleixner ssize_t cur, res = 0;
290198f8cdceSThomas Gleixner int i;
290298f8cdceSThomas Gleixner
290398f8cdceSThomas Gleixner mutex_lock(&cpuhp_state_mutex);
2904757c989bSThomas Gleixner for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
290598f8cdceSThomas Gleixner struct cpuhp_step *sp = cpuhp_get_step(i);
290698f8cdceSThomas Gleixner
290798f8cdceSThomas Gleixner if (sp->name) {
290898f8cdceSThomas Gleixner cur = sprintf(buf, "%3d: %s\n", i, sp->name);
290998f8cdceSThomas Gleixner buf += cur;
291098f8cdceSThomas Gleixner res += cur;
291198f8cdceSThomas Gleixner }
291298f8cdceSThomas Gleixner }
291398f8cdceSThomas Gleixner mutex_unlock(&cpuhp_state_mutex);
291498f8cdceSThomas Gleixner return res;
291598f8cdceSThomas Gleixner }
29161782dc87SYueHaibing static DEVICE_ATTR_RO(states);
291798f8cdceSThomas Gleixner
291898f8cdceSThomas Gleixner static struct attribute *cpuhp_cpu_root_attrs[] = {
291998f8cdceSThomas Gleixner &dev_attr_states.attr,
292098f8cdceSThomas Gleixner NULL
292198f8cdceSThomas Gleixner };
292298f8cdceSThomas Gleixner
2923993647a2SArvind Yadav static const struct attribute_group cpuhp_cpu_root_attr_group = {
292498f8cdceSThomas Gleixner .attrs = cpuhp_cpu_root_attrs,
292598f8cdceSThomas Gleixner .name = "hotplug",
292698f8cdceSThomas Gleixner NULL
292798f8cdceSThomas Gleixner };
292898f8cdceSThomas Gleixner
292905736e4aSThomas Gleixner #ifdef CONFIG_HOTPLUG_SMT
293005736e4aSThomas Gleixner
cpu_smt_num_threads_valid(unsigned int threads)29317f48405cSMichael Ellerman static bool cpu_smt_num_threads_valid(unsigned int threads)
29327f48405cSMichael Ellerman {
29337f48405cSMichael Ellerman if (IS_ENABLED(CONFIG_SMT_NUM_THREADS_DYNAMIC))
29347f48405cSMichael Ellerman return threads >= 1 && threads <= cpu_smt_max_threads;
29357f48405cSMichael Ellerman return threads == 1 || threads == cpu_smt_max_threads;
29367f48405cSMichael Ellerman }
29377f48405cSMichael Ellerman
293805736e4aSThomas Gleixner static ssize_t
__store_smt_control(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2939de7b77e5SJosh Poimboeuf __store_smt_control(struct device *dev, struct device_attribute *attr,
294005736e4aSThomas Gleixner const char *buf, size_t count)
294105736e4aSThomas Gleixner {
29427f48405cSMichael Ellerman int ctrlval, ret, num_threads, orig_threads;
29437f48405cSMichael Ellerman bool force_off;
294405736e4aSThomas Gleixner
2945c53361ceSMichael Ellerman if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2946c53361ceSMichael Ellerman return -EPERM;
2947c53361ceSMichael Ellerman
2948c53361ceSMichael Ellerman if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2949c53361ceSMichael Ellerman return -ENODEV;
2950c53361ceSMichael Ellerman
29517f48405cSMichael Ellerman if (sysfs_streq(buf, "on")) {
295205736e4aSThomas Gleixner ctrlval = CPU_SMT_ENABLED;
29537f48405cSMichael Ellerman num_threads = cpu_smt_max_threads;
29547f48405cSMichael Ellerman } else if (sysfs_streq(buf, "off")) {
295505736e4aSThomas Gleixner ctrlval = CPU_SMT_DISABLED;
29567f48405cSMichael Ellerman num_threads = 1;
29577f48405cSMichael Ellerman } else if (sysfs_streq(buf, "forceoff")) {
295805736e4aSThomas Gleixner ctrlval = CPU_SMT_FORCE_DISABLED;
29597f48405cSMichael Ellerman num_threads = 1;
29607f48405cSMichael Ellerman } else if (kstrtoint(buf, 10, &num_threads) == 0) {
29617f48405cSMichael Ellerman if (num_threads == 1)
29627f48405cSMichael Ellerman ctrlval = CPU_SMT_DISABLED;
29637f48405cSMichael Ellerman else if (cpu_smt_num_threads_valid(num_threads))
29647f48405cSMichael Ellerman ctrlval = CPU_SMT_ENABLED;
296505736e4aSThomas Gleixner else
296605736e4aSThomas Gleixner return -EINVAL;
29677f48405cSMichael Ellerman } else {
29687f48405cSMichael Ellerman return -EINVAL;
29697f48405cSMichael Ellerman }
297005736e4aSThomas Gleixner
297105736e4aSThomas Gleixner ret = lock_device_hotplug_sysfs();
297205736e4aSThomas Gleixner if (ret)
297305736e4aSThomas Gleixner return ret;
297405736e4aSThomas Gleixner
29757f48405cSMichael Ellerman orig_threads = cpu_smt_num_threads;
29767f48405cSMichael Ellerman cpu_smt_num_threads = num_threads;
29777f48405cSMichael Ellerman
29787f48405cSMichael Ellerman force_off = ctrlval != cpu_smt_control && ctrlval == CPU_SMT_FORCE_DISABLED;
29797f48405cSMichael Ellerman
29807f48405cSMichael Ellerman if (num_threads > orig_threads)
2981215af549SThomas Gleixner ret = cpuhp_smt_enable();
29827f48405cSMichael Ellerman else if (num_threads < orig_threads || force_off)
298305736e4aSThomas Gleixner ret = cpuhp_smt_disable(ctrlval);
298405736e4aSThomas Gleixner
298505736e4aSThomas Gleixner unlock_device_hotplug();
298605736e4aSThomas Gleixner return ret ? ret : count;
298705736e4aSThomas Gleixner }
2988de7b77e5SJosh Poimboeuf
2989de7b77e5SJosh Poimboeuf #else /* !CONFIG_HOTPLUG_SMT */
2990de7b77e5SJosh Poimboeuf static ssize_t
__store_smt_control(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2991de7b77e5SJosh Poimboeuf __store_smt_control(struct device *dev, struct device_attribute *attr,
2992de7b77e5SJosh Poimboeuf const char *buf, size_t count)
2993de7b77e5SJosh Poimboeuf {
2994de7b77e5SJosh Poimboeuf return -ENODEV;
2995de7b77e5SJosh Poimboeuf }
2996de7b77e5SJosh Poimboeuf #endif /* CONFIG_HOTPLUG_SMT */
2997de7b77e5SJosh Poimboeuf
2998de7b77e5SJosh Poimboeuf static const char *smt_states[] = {
2999de7b77e5SJosh Poimboeuf [CPU_SMT_ENABLED] = "on",
3000de7b77e5SJosh Poimboeuf [CPU_SMT_DISABLED] = "off",
3001de7b77e5SJosh Poimboeuf [CPU_SMT_FORCE_DISABLED] = "forceoff",
3002de7b77e5SJosh Poimboeuf [CPU_SMT_NOT_SUPPORTED] = "notsupported",
3003de7b77e5SJosh Poimboeuf [CPU_SMT_NOT_IMPLEMENTED] = "notimplemented",
3004de7b77e5SJosh Poimboeuf };
3005de7b77e5SJosh Poimboeuf
control_show(struct device * dev,struct device_attribute * attr,char * buf)30061782dc87SYueHaibing static ssize_t control_show(struct device *dev,
30071782dc87SYueHaibing struct device_attribute *attr, char *buf)
3008de7b77e5SJosh Poimboeuf {
3009de7b77e5SJosh Poimboeuf const char *state = smt_states[cpu_smt_control];
3010de7b77e5SJosh Poimboeuf
30117f48405cSMichael Ellerman #ifdef CONFIG_HOTPLUG_SMT
30127f48405cSMichael Ellerman /*
30137f48405cSMichael Ellerman * If SMT is enabled but not all threads are enabled then show the
30147f48405cSMichael Ellerman * number of threads. If all threads are enabled show "on". Otherwise
30157f48405cSMichael Ellerman * show the state name.
30167f48405cSMichael Ellerman */
30177f48405cSMichael Ellerman if (cpu_smt_control == CPU_SMT_ENABLED &&
30187f48405cSMichael Ellerman cpu_smt_num_threads != cpu_smt_max_threads)
30197f48405cSMichael Ellerman return sysfs_emit(buf, "%d\n", cpu_smt_num_threads);
30207f48405cSMichael Ellerman #endif
30217f48405cSMichael Ellerman
3022de7b77e5SJosh Poimboeuf return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
3023de7b77e5SJosh Poimboeuf }
3024de7b77e5SJosh Poimboeuf
control_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)30251782dc87SYueHaibing static ssize_t control_store(struct device *dev, struct device_attribute *attr,
3026de7b77e5SJosh Poimboeuf const char *buf, size_t count)
3027de7b77e5SJosh Poimboeuf {
3028de7b77e5SJosh Poimboeuf return __store_smt_control(dev, attr, buf, count);
3029de7b77e5SJosh Poimboeuf }
30301782dc87SYueHaibing static DEVICE_ATTR_RW(control);
303105736e4aSThomas Gleixner
active_show(struct device * dev,struct device_attribute * attr,char * buf)30321782dc87SYueHaibing static ssize_t active_show(struct device *dev,
30331782dc87SYueHaibing struct device_attribute *attr, char *buf)
303405736e4aSThomas Gleixner {
3035de7b77e5SJosh Poimboeuf return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
303605736e4aSThomas Gleixner }
30371782dc87SYueHaibing static DEVICE_ATTR_RO(active);
303805736e4aSThomas Gleixner
303905736e4aSThomas Gleixner static struct attribute *cpuhp_smt_attrs[] = {
304005736e4aSThomas Gleixner &dev_attr_control.attr,
304105736e4aSThomas Gleixner &dev_attr_active.attr,
304205736e4aSThomas Gleixner NULL
304305736e4aSThomas Gleixner };
304405736e4aSThomas Gleixner
304505736e4aSThomas Gleixner static const struct attribute_group cpuhp_smt_attr_group = {
304605736e4aSThomas Gleixner .attrs = cpuhp_smt_attrs,
304705736e4aSThomas Gleixner .name = "smt",
304805736e4aSThomas Gleixner NULL
304905736e4aSThomas Gleixner };
305005736e4aSThomas Gleixner
cpu_smt_sysfs_init(void)3051de7b77e5SJosh Poimboeuf static int __init cpu_smt_sysfs_init(void)
305205736e4aSThomas Gleixner {
3053db281d59SGreg Kroah-Hartman struct device *dev_root;
3054db281d59SGreg Kroah-Hartman int ret = -ENODEV;
3055db281d59SGreg Kroah-Hartman
3056db281d59SGreg Kroah-Hartman dev_root = bus_get_dev_root(&cpu_subsys);
3057db281d59SGreg Kroah-Hartman if (dev_root) {
3058db281d59SGreg Kroah-Hartman ret = sysfs_create_group(&dev_root->kobj, &cpuhp_smt_attr_group);
3059db281d59SGreg Kroah-Hartman put_device(dev_root);
3060db281d59SGreg Kroah-Hartman }
3061db281d59SGreg Kroah-Hartman return ret;
306205736e4aSThomas Gleixner }
306305736e4aSThomas Gleixner
cpuhp_sysfs_init(void)306498f8cdceSThomas Gleixner static int __init cpuhp_sysfs_init(void)
306598f8cdceSThomas Gleixner {
3066db281d59SGreg Kroah-Hartman struct device *dev_root;
306798f8cdceSThomas Gleixner int cpu, ret;
306898f8cdceSThomas Gleixner
3069de7b77e5SJosh Poimboeuf ret = cpu_smt_sysfs_init();
307005736e4aSThomas Gleixner if (ret)
307105736e4aSThomas Gleixner return ret;
307205736e4aSThomas Gleixner
3073db281d59SGreg Kroah-Hartman dev_root = bus_get_dev_root(&cpu_subsys);
3074db281d59SGreg Kroah-Hartman if (dev_root) {
3075db281d59SGreg Kroah-Hartman ret = sysfs_create_group(&dev_root->kobj, &cpuhp_cpu_root_attr_group);
3076db281d59SGreg Kroah-Hartman put_device(dev_root);
307798f8cdceSThomas Gleixner if (ret)
307898f8cdceSThomas Gleixner return ret;
3079db281d59SGreg Kroah-Hartman }
308098f8cdceSThomas Gleixner
308198f8cdceSThomas Gleixner for_each_possible_cpu(cpu) {
308298f8cdceSThomas Gleixner struct device *dev = get_cpu_device(cpu);
308398f8cdceSThomas Gleixner
308498f8cdceSThomas Gleixner if (!dev)
308598f8cdceSThomas Gleixner continue;
308698f8cdceSThomas Gleixner ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
308798f8cdceSThomas Gleixner if (ret)
308898f8cdceSThomas Gleixner return ret;
308998f8cdceSThomas Gleixner }
309098f8cdceSThomas Gleixner return 0;
309198f8cdceSThomas Gleixner }
309298f8cdceSThomas Gleixner device_initcall(cpuhp_sysfs_init);
3093de7b77e5SJosh Poimboeuf #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
309498f8cdceSThomas Gleixner
3095e56b3bc7SLinus Torvalds /*
3096e56b3bc7SLinus Torvalds * cpu_bit_bitmap[] is a special, "compressed" data structure that
3097e56b3bc7SLinus Torvalds * represents all NR_CPUS bits binary values of 1<<nr.
3098e56b3bc7SLinus Torvalds *
3099e0b582ecSRusty Russell * It is used by cpumask_of() to get a constant address to a CPU
3100e56b3bc7SLinus Torvalds * mask value that has a single bit set only.
3101e56b3bc7SLinus Torvalds */
3102b8d317d1SMike Travis
3103e56b3bc7SLinus Torvalds /* cpu_bit_bitmap[0] is empty - so we can back into it */
31044d51985eSMichael Rodriguez #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
3105e56b3bc7SLinus Torvalds #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
3106e56b3bc7SLinus Torvalds #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
3107e56b3bc7SLinus Torvalds #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
3108b8d317d1SMike Travis
3109e56b3bc7SLinus Torvalds const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
3110b8d317d1SMike Travis
3111e56b3bc7SLinus Torvalds MASK_DECLARE_8(0), MASK_DECLARE_8(8),
3112e56b3bc7SLinus Torvalds MASK_DECLARE_8(16), MASK_DECLARE_8(24),
3113e56b3bc7SLinus Torvalds #if BITS_PER_LONG > 32
3114e56b3bc7SLinus Torvalds MASK_DECLARE_8(32), MASK_DECLARE_8(40),
3115e56b3bc7SLinus Torvalds MASK_DECLARE_8(48), MASK_DECLARE_8(56),
3116b8d317d1SMike Travis #endif
3117b8d317d1SMike Travis };
3118e56b3bc7SLinus Torvalds EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
31192d3854a3SRusty Russell
31202d3854a3SRusty Russell const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
31212d3854a3SRusty Russell EXPORT_SYMBOL(cpu_all_bits);
3122b3199c02SRusty Russell
3123b3199c02SRusty Russell #ifdef CONFIG_INIT_ALL_POSSIBLE
31244b804c85SRasmus Villemoes struct cpumask __cpu_possible_mask __read_mostly
3125c4c54dd1SRasmus Villemoes = {CPU_BITS_ALL};
3126b3199c02SRusty Russell #else
31274b804c85SRasmus Villemoes struct cpumask __cpu_possible_mask __read_mostly;
3128b3199c02SRusty Russell #endif
31294b804c85SRasmus Villemoes EXPORT_SYMBOL(__cpu_possible_mask);
3130b3199c02SRusty Russell
31314b804c85SRasmus Villemoes struct cpumask __cpu_online_mask __read_mostly;
31324b804c85SRasmus Villemoes EXPORT_SYMBOL(__cpu_online_mask);
3133b3199c02SRusty Russell
31344b804c85SRasmus Villemoes struct cpumask __cpu_present_mask __read_mostly;
31354b804c85SRasmus Villemoes EXPORT_SYMBOL(__cpu_present_mask);
3136b3199c02SRusty Russell
31374b804c85SRasmus Villemoes struct cpumask __cpu_active_mask __read_mostly;
31384b804c85SRasmus Villemoes EXPORT_SYMBOL(__cpu_active_mask);
31393fa41520SRusty Russell
3140e40f74c5SPeter Zijlstra struct cpumask __cpu_dying_mask __read_mostly;
3141e40f74c5SPeter Zijlstra EXPORT_SYMBOL(__cpu_dying_mask);
3142e40f74c5SPeter Zijlstra
31430c09ab96SThomas Gleixner atomic_t __num_online_cpus __read_mostly;
31440c09ab96SThomas Gleixner EXPORT_SYMBOL(__num_online_cpus);
31450c09ab96SThomas Gleixner
init_cpu_present(const struct cpumask * src)31463fa41520SRusty Russell void init_cpu_present(const struct cpumask *src)
31473fa41520SRusty Russell {
3148c4c54dd1SRasmus Villemoes cpumask_copy(&__cpu_present_mask, src);
31493fa41520SRusty Russell }
31503fa41520SRusty Russell
init_cpu_possible(const struct cpumask * src)31513fa41520SRusty Russell void init_cpu_possible(const struct cpumask *src)
31523fa41520SRusty Russell {
3153c4c54dd1SRasmus Villemoes cpumask_copy(&__cpu_possible_mask, src);
31543fa41520SRusty Russell }
31553fa41520SRusty Russell
init_cpu_online(const struct cpumask * src)31563fa41520SRusty Russell void init_cpu_online(const struct cpumask *src)
31573fa41520SRusty Russell {
3158c4c54dd1SRasmus Villemoes cpumask_copy(&__cpu_online_mask, src);
31593fa41520SRusty Russell }
3160cff7d378SThomas Gleixner
set_cpu_online(unsigned int cpu,bool online)31610c09ab96SThomas Gleixner void set_cpu_online(unsigned int cpu, bool online)
31620c09ab96SThomas Gleixner {
31630c09ab96SThomas Gleixner /*
31640c09ab96SThomas Gleixner * atomic_inc/dec() is required to handle the horrid abuse of this
31650c09ab96SThomas Gleixner * function by the reboot and kexec code which invoke it from
31660c09ab96SThomas Gleixner * IPI/NMI broadcasts when shutting down CPUs. Invocation from
31670c09ab96SThomas Gleixner * regular CPU hotplug is properly serialized.
31680c09ab96SThomas Gleixner *
31690c09ab96SThomas Gleixner * Note, that the fact that __num_online_cpus is of type atomic_t
31700c09ab96SThomas Gleixner * does not protect readers which are not serialized against
31710c09ab96SThomas Gleixner * concurrent hotplug operations.
31720c09ab96SThomas Gleixner */
31730c09ab96SThomas Gleixner if (online) {
31740c09ab96SThomas Gleixner if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
31750c09ab96SThomas Gleixner atomic_inc(&__num_online_cpus);
31760c09ab96SThomas Gleixner } else {
31770c09ab96SThomas Gleixner if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
31780c09ab96SThomas Gleixner atomic_dec(&__num_online_cpus);
31790c09ab96SThomas Gleixner }
31800c09ab96SThomas Gleixner }
31810c09ab96SThomas Gleixner
3182cff7d378SThomas Gleixner /*
3183cff7d378SThomas Gleixner * Activate the first processor.
3184cff7d378SThomas Gleixner */
boot_cpu_init(void)3185cff7d378SThomas Gleixner void __init boot_cpu_init(void)
3186cff7d378SThomas Gleixner {
3187cff7d378SThomas Gleixner int cpu = smp_processor_id();
3188cff7d378SThomas Gleixner
3189cff7d378SThomas Gleixner /* Mark the boot cpu "present", "online" etc for SMP and UP case */
3190cff7d378SThomas Gleixner set_cpu_online(cpu, true);
3191cff7d378SThomas Gleixner set_cpu_active(cpu, true);
3192cff7d378SThomas Gleixner set_cpu_present(cpu, true);
3193cff7d378SThomas Gleixner set_cpu_possible(cpu, true);
31948ce371f9SPeter Zijlstra
31958ce371f9SPeter Zijlstra #ifdef CONFIG_SMP
31968ce371f9SPeter Zijlstra __boot_cpu_id = cpu;
31978ce371f9SPeter Zijlstra #endif
3198cff7d378SThomas Gleixner }
3199cff7d378SThomas Gleixner
3200cff7d378SThomas Gleixner /*
3201cff7d378SThomas Gleixner * Must be called _AFTER_ setting up the per_cpu areas
3202cff7d378SThomas Gleixner */
boot_cpu_hotplug_init(void)3203b5b1404dSLinus Torvalds void __init boot_cpu_hotplug_init(void)
3204cff7d378SThomas Gleixner {
3205269777aaSAbel Vesa #ifdef CONFIG_SMP
3206e797bda3SThomas Gleixner cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
32076f062123SThomas Gleixner atomic_set(this_cpu_ptr(&cpuhp_state.ap_sync_state), SYNC_STATE_ONLINE);
3208269777aaSAbel Vesa #endif
32090cc3cd21SThomas Gleixner this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
3210d385febcSPhil Auld this_cpu_write(cpuhp_state.target, CPUHP_ONLINE);
3211cff7d378SThomas Gleixner }
321298af8452SJosh Poimboeuf
3213976b74faSSean Christopherson #ifdef CONFIG_CPU_MITIGATIONS
3214731dc9dfSTyler Hicks /*
3215731dc9dfSTyler Hicks * These are used for a global "mitigations=" cmdline option for toggling
3216731dc9dfSTyler Hicks * optional CPU mitigations.
3217731dc9dfSTyler Hicks */
3218731dc9dfSTyler Hicks enum cpu_mitigations {
3219731dc9dfSTyler Hicks CPU_MITIGATIONS_OFF,
3220731dc9dfSTyler Hicks CPU_MITIGATIONS_AUTO,
3221731dc9dfSTyler Hicks CPU_MITIGATIONS_AUTO_NOSMT,
3222731dc9dfSTyler Hicks };
3223731dc9dfSTyler Hicks
3224976b74faSSean Christopherson static enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
322598af8452SJosh Poimboeuf
mitigations_parse_cmdline(char * arg)322698af8452SJosh Poimboeuf static int __init mitigations_parse_cmdline(char *arg)
322798af8452SJosh Poimboeuf {
322898af8452SJosh Poimboeuf if (!strcmp(arg, "off"))
322998af8452SJosh Poimboeuf cpu_mitigations = CPU_MITIGATIONS_OFF;
323098af8452SJosh Poimboeuf else if (!strcmp(arg, "auto"))
323198af8452SJosh Poimboeuf cpu_mitigations = CPU_MITIGATIONS_AUTO;
323298af8452SJosh Poimboeuf else if (!strcmp(arg, "auto,nosmt"))
323398af8452SJosh Poimboeuf cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
32341bf72720SGeert Uytterhoeven else
32351bf72720SGeert Uytterhoeven pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
32361bf72720SGeert Uytterhoeven arg);
323798af8452SJosh Poimboeuf
323898af8452SJosh Poimboeuf return 0;
323998af8452SJosh Poimboeuf }
3240731dc9dfSTyler Hicks
3241731dc9dfSTyler Hicks /* mitigations=off */
cpu_mitigations_off(void)3242731dc9dfSTyler Hicks bool cpu_mitigations_off(void)
3243731dc9dfSTyler Hicks {
3244731dc9dfSTyler Hicks return cpu_mitigations == CPU_MITIGATIONS_OFF;
3245731dc9dfSTyler Hicks }
3246731dc9dfSTyler Hicks EXPORT_SYMBOL_GPL(cpu_mitigations_off);
3247731dc9dfSTyler Hicks
3248731dc9dfSTyler Hicks /* mitigations=auto,nosmt */
cpu_mitigations_auto_nosmt(void)3249731dc9dfSTyler Hicks bool cpu_mitigations_auto_nosmt(void)
3250731dc9dfSTyler Hicks {
3251731dc9dfSTyler Hicks return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
3252731dc9dfSTyler Hicks }
3253731dc9dfSTyler Hicks EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
3254976b74faSSean Christopherson #else
mitigations_parse_cmdline(char * arg)3255976b74faSSean Christopherson static int __init mitigations_parse_cmdline(char *arg)
3256976b74faSSean Christopherson {
3257976b74faSSean Christopherson pr_crit("Kernel compiled without mitigations, ignoring 'mitigations'; system may still be vulnerable\n");
3258976b74faSSean Christopherson return 0;
3259976b74faSSean Christopherson }
3260976b74faSSean Christopherson #endif
3261976b74faSSean Christopherson early_param("mitigations", mitigations_parse_cmdline);
3262