1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
238498a67SThomas Gleixner /*
338498a67SThomas Gleixner * Common SMP CPU bringup/teardown functions
438498a67SThomas Gleixner */
5f97f8f06SThomas Gleixner #include <linux/cpu.h>
629d5e047SThomas Gleixner #include <linux/err.h>
729d5e047SThomas Gleixner #include <linux/smp.h>
88038dad7SPaul E. McKenney #include <linux/delay.h>
938498a67SThomas Gleixner #include <linux/init.h>
10f97f8f06SThomas Gleixner #include <linux/list.h>
11f97f8f06SThomas Gleixner #include <linux/slab.h>
1229d5e047SThomas Gleixner #include <linux/sched.h>
1329930025SIngo Molnar #include <linux/sched/task.h>
14f97f8f06SThomas Gleixner #include <linux/export.h>
1529d5e047SThomas Gleixner #include <linux/percpu.h>
16f97f8f06SThomas Gleixner #include <linux/kthread.h>
17f97f8f06SThomas Gleixner #include <linux/smpboot.h>
1838498a67SThomas Gleixner
1938498a67SThomas Gleixner #include "smpboot.h"
2038498a67SThomas Gleixner
213180d89bSPaul E. McKenney #ifdef CONFIG_SMP
223180d89bSPaul E. McKenney
2329d5e047SThomas Gleixner #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
2429d5e047SThomas Gleixner /*
2529d5e047SThomas Gleixner * For the hotplug case we keep the task structs around and reuse
2629d5e047SThomas Gleixner * them.
2729d5e047SThomas Gleixner */
2829d5e047SThomas Gleixner static DEFINE_PER_CPU(struct task_struct *, idle_threads);
2929d5e047SThomas Gleixner
idle_thread_get(unsigned int cpu)300db0628dSPaul Gortmaker struct task_struct *idle_thread_get(unsigned int cpu)
3129d5e047SThomas Gleixner {
3229d5e047SThomas Gleixner struct task_struct *tsk = per_cpu(idle_threads, cpu);
3329d5e047SThomas Gleixner
3429d5e047SThomas Gleixner if (!tsk)
353bb5d2eeSSuresh Siddha return ERR_PTR(-ENOMEM);
3629d5e047SThomas Gleixner return tsk;
3729d5e047SThomas Gleixner }
3829d5e047SThomas Gleixner
idle_thread_set_boot_cpu(void)3929d5e047SThomas Gleixner void __init idle_thread_set_boot_cpu(void)
4029d5e047SThomas Gleixner {
4129d5e047SThomas Gleixner per_cpu(idle_threads, smp_processor_id()) = current;
4229d5e047SThomas Gleixner }
4329d5e047SThomas Gleixner
444a70d2d9SSrivatsa S. Bhat /**
454a70d2d9SSrivatsa S. Bhat * idle_init - Initialize the idle thread for a cpu
464a70d2d9SSrivatsa S. Bhat * @cpu: The cpu for which the idle thread should be initialized
474a70d2d9SSrivatsa S. Bhat *
484a70d2d9SSrivatsa S. Bhat * Creates the thread if it does not exist.
494a70d2d9SSrivatsa S. Bhat */
idle_init(unsigned int cpu)50a1833a54SLinus Torvalds static __always_inline void idle_init(unsigned int cpu)
513bb5d2eeSSuresh Siddha {
523bb5d2eeSSuresh Siddha struct task_struct *tsk = per_cpu(idle_threads, cpu);
533bb5d2eeSSuresh Siddha
543bb5d2eeSSuresh Siddha if (!tsk) {
553bb5d2eeSSuresh Siddha tsk = fork_idle(cpu);
563bb5d2eeSSuresh Siddha if (IS_ERR(tsk))
573bb5d2eeSSuresh Siddha pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
583bb5d2eeSSuresh Siddha else
593bb5d2eeSSuresh Siddha per_cpu(idle_threads, cpu) = tsk;
603bb5d2eeSSuresh Siddha }
613bb5d2eeSSuresh Siddha }
623bb5d2eeSSuresh Siddha
6329d5e047SThomas Gleixner /**
644a70d2d9SSrivatsa S. Bhat * idle_threads_init - Initialize idle threads for all cpus
6529d5e047SThomas Gleixner */
idle_threads_init(void)663bb5d2eeSSuresh Siddha void __init idle_threads_init(void)
6729d5e047SThomas Gleixner {
68ee74d132SSrivatsa S. Bhat unsigned int cpu, boot_cpu;
69ee74d132SSrivatsa S. Bhat
70ee74d132SSrivatsa S. Bhat boot_cpu = smp_processor_id();
7129d5e047SThomas Gleixner
723bb5d2eeSSuresh Siddha for_each_possible_cpu(cpu) {
73ee74d132SSrivatsa S. Bhat if (cpu != boot_cpu)
743bb5d2eeSSuresh Siddha idle_init(cpu);
7529d5e047SThomas Gleixner }
7629d5e047SThomas Gleixner }
7729d5e047SThomas Gleixner #endif
78f97f8f06SThomas Gleixner
793180d89bSPaul E. McKenney #endif /* #ifdef CONFIG_SMP */
803180d89bSPaul E. McKenney
81f97f8f06SThomas Gleixner static LIST_HEAD(hotplug_threads);
82f97f8f06SThomas Gleixner static DEFINE_MUTEX(smpboot_threads_lock);
83f97f8f06SThomas Gleixner
84f97f8f06SThomas Gleixner struct smpboot_thread_data {
85f97f8f06SThomas Gleixner unsigned int cpu;
86f97f8f06SThomas Gleixner unsigned int status;
87f97f8f06SThomas Gleixner struct smp_hotplug_thread *ht;
88f97f8f06SThomas Gleixner };
89f97f8f06SThomas Gleixner
90f97f8f06SThomas Gleixner enum {
91f97f8f06SThomas Gleixner HP_THREAD_NONE = 0,
92f97f8f06SThomas Gleixner HP_THREAD_ACTIVE,
93f97f8f06SThomas Gleixner HP_THREAD_PARKED,
94f97f8f06SThomas Gleixner };
95f97f8f06SThomas Gleixner
96f97f8f06SThomas Gleixner /**
97f97f8f06SThomas Gleixner * smpboot_thread_fn - percpu hotplug thread loop function
98f97f8f06SThomas Gleixner * @data: thread data pointer
99f97f8f06SThomas Gleixner *
100f97f8f06SThomas Gleixner * Checks for thread stop and park conditions. Calls the necessary
101f97f8f06SThomas Gleixner * setup, cleanup, park and unpark functions for the registered
102f97f8f06SThomas Gleixner * thread.
103f97f8f06SThomas Gleixner *
104f97f8f06SThomas Gleixner * Returns 1 when the thread should exit, 0 otherwise.
105f97f8f06SThomas Gleixner */
smpboot_thread_fn(void * data)106f97f8f06SThomas Gleixner static int smpboot_thread_fn(void *data)
107f97f8f06SThomas Gleixner {
108f97f8f06SThomas Gleixner struct smpboot_thread_data *td = data;
109f97f8f06SThomas Gleixner struct smp_hotplug_thread *ht = td->ht;
110f97f8f06SThomas Gleixner
111f97f8f06SThomas Gleixner while (1) {
112f97f8f06SThomas Gleixner set_current_state(TASK_INTERRUPTIBLE);
113f97f8f06SThomas Gleixner preempt_disable();
114f97f8f06SThomas Gleixner if (kthread_should_stop()) {
1157d4d2696SPeter Zijlstra __set_current_state(TASK_RUNNING);
116f97f8f06SThomas Gleixner preempt_enable();
1173dd08c0cSFrederic Weisbecker /* cleanup must mirror setup */
1183dd08c0cSFrederic Weisbecker if (ht->cleanup && td->status != HP_THREAD_NONE)
119f97f8f06SThomas Gleixner ht->cleanup(td->cpu, cpu_online(td->cpu));
120f97f8f06SThomas Gleixner kfree(td);
121f97f8f06SThomas Gleixner return 0;
122f97f8f06SThomas Gleixner }
123f97f8f06SThomas Gleixner
124f97f8f06SThomas Gleixner if (kthread_should_park()) {
125f97f8f06SThomas Gleixner __set_current_state(TASK_RUNNING);
126be6a2e4cSIngo Molnar preempt_enable();
127f97f8f06SThomas Gleixner if (ht->park && td->status == HP_THREAD_ACTIVE) {
128f97f8f06SThomas Gleixner BUG_ON(td->cpu != smp_processor_id());
129f97f8f06SThomas Gleixner ht->park(td->cpu);
130f97f8f06SThomas Gleixner td->status = HP_THREAD_PARKED;
131f97f8f06SThomas Gleixner }
132f97f8f06SThomas Gleixner kthread_parkme();
133f97f8f06SThomas Gleixner /* We might have been woken for stop */
134f97f8f06SThomas Gleixner continue;
135f97f8f06SThomas Gleixner }
136f97f8f06SThomas Gleixner
137dc893e19SArnd Bergmann BUG_ON(td->cpu != smp_processor_id());
138f97f8f06SThomas Gleixner
139f97f8f06SThomas Gleixner /* Check for state change setup */
140f97f8f06SThomas Gleixner switch (td->status) {
141f97f8f06SThomas Gleixner case HP_THREAD_NONE:
1427d4d2696SPeter Zijlstra __set_current_state(TASK_RUNNING);
143f97f8f06SThomas Gleixner preempt_enable();
144f97f8f06SThomas Gleixner if (ht->setup)
145f97f8f06SThomas Gleixner ht->setup(td->cpu);
146f97f8f06SThomas Gleixner td->status = HP_THREAD_ACTIVE;
1477d4d2696SPeter Zijlstra continue;
1487d4d2696SPeter Zijlstra
149f97f8f06SThomas Gleixner case HP_THREAD_PARKED:
1507d4d2696SPeter Zijlstra __set_current_state(TASK_RUNNING);
151f97f8f06SThomas Gleixner preempt_enable();
152f97f8f06SThomas Gleixner if (ht->unpark)
153f97f8f06SThomas Gleixner ht->unpark(td->cpu);
154f97f8f06SThomas Gleixner td->status = HP_THREAD_ACTIVE;
1557d4d2696SPeter Zijlstra continue;
156f97f8f06SThomas Gleixner }
157f97f8f06SThomas Gleixner
158f97f8f06SThomas Gleixner if (!ht->thread_should_run(td->cpu)) {
1597d4d2696SPeter Zijlstra preempt_enable_no_resched();
160f97f8f06SThomas Gleixner schedule();
161f97f8f06SThomas Gleixner } else {
1627d4d2696SPeter Zijlstra __set_current_state(TASK_RUNNING);
163f97f8f06SThomas Gleixner preempt_enable();
164f97f8f06SThomas Gleixner ht->thread_fn(td->cpu);
165f97f8f06SThomas Gleixner }
166f97f8f06SThomas Gleixner }
167f97f8f06SThomas Gleixner }
168f97f8f06SThomas Gleixner
169f97f8f06SThomas Gleixner static int
__smpboot_create_thread(struct smp_hotplug_thread * ht,unsigned int cpu)170f97f8f06SThomas Gleixner __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
171f97f8f06SThomas Gleixner {
172f97f8f06SThomas Gleixner struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
173f97f8f06SThomas Gleixner struct smpboot_thread_data *td;
174f97f8f06SThomas Gleixner
175f97f8f06SThomas Gleixner if (tsk)
176f97f8f06SThomas Gleixner return 0;
177f97f8f06SThomas Gleixner
178f97f8f06SThomas Gleixner td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
179f97f8f06SThomas Gleixner if (!td)
180f97f8f06SThomas Gleixner return -ENOMEM;
181f97f8f06SThomas Gleixner td->cpu = cpu;
182f97f8f06SThomas Gleixner td->ht = ht;
183f97f8f06SThomas Gleixner
184f97f8f06SThomas Gleixner tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
185f97f8f06SThomas Gleixner ht->thread_comm);
186f97f8f06SThomas Gleixner if (IS_ERR(tsk)) {
187f97f8f06SThomas Gleixner kfree(td);
188f97f8f06SThomas Gleixner return PTR_ERR(tsk);
189f97f8f06SThomas Gleixner }
190ac687e6eSPeter Zijlstra kthread_set_per_cpu(tsk, cpu);
191a65d4096SPetr Mladek /*
192a65d4096SPetr Mladek * Park the thread so that it could start right on the CPU
193a65d4096SPetr Mladek * when it is available.
194a65d4096SPetr Mladek */
195a65d4096SPetr Mladek kthread_park(tsk);
196f97f8f06SThomas Gleixner get_task_struct(tsk);
197f97f8f06SThomas Gleixner *per_cpu_ptr(ht->store, cpu) = tsk;
198f2530dc7SThomas Gleixner if (ht->create) {
199f2530dc7SThomas Gleixner /*
200f2530dc7SThomas Gleixner * Make sure that the task has actually scheduled out
201f2530dc7SThomas Gleixner * into park position, before calling the create
202f2530dc7SThomas Gleixner * callback. At least the migration thread callback
203f2530dc7SThomas Gleixner * requires that the task is off the runqueue.
204f2530dc7SThomas Gleixner */
205f2530dc7SThomas Gleixner if (!wait_task_inactive(tsk, TASK_PARKED))
206f2530dc7SThomas Gleixner WARN_ON(1);
207f2530dc7SThomas Gleixner else
2087d7e499fSThomas Gleixner ht->create(cpu);
209f2530dc7SThomas Gleixner }
210f97f8f06SThomas Gleixner return 0;
211f97f8f06SThomas Gleixner }
212f97f8f06SThomas Gleixner
smpboot_create_threads(unsigned int cpu)213f97f8f06SThomas Gleixner int smpboot_create_threads(unsigned int cpu)
214f97f8f06SThomas Gleixner {
215f97f8f06SThomas Gleixner struct smp_hotplug_thread *cur;
216f97f8f06SThomas Gleixner int ret = 0;
217f97f8f06SThomas Gleixner
218f97f8f06SThomas Gleixner mutex_lock(&smpboot_threads_lock);
219f97f8f06SThomas Gleixner list_for_each_entry(cur, &hotplug_threads, list) {
220f97f8f06SThomas Gleixner ret = __smpboot_create_thread(cur, cpu);
221f97f8f06SThomas Gleixner if (ret)
222f97f8f06SThomas Gleixner break;
223f97f8f06SThomas Gleixner }
224f97f8f06SThomas Gleixner mutex_unlock(&smpboot_threads_lock);
225f97f8f06SThomas Gleixner return ret;
226f97f8f06SThomas Gleixner }
227f97f8f06SThomas Gleixner
smpboot_unpark_thread(struct smp_hotplug_thread * ht,unsigned int cpu)228f97f8f06SThomas Gleixner static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
229f97f8f06SThomas Gleixner {
230f97f8f06SThomas Gleixner struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
231f97f8f06SThomas Gleixner
232c00166d8SOleg Nesterov if (!ht->selfparking)
233f97f8f06SThomas Gleixner kthread_unpark(tsk);
234f97f8f06SThomas Gleixner }
235f97f8f06SThomas Gleixner
smpboot_unpark_threads(unsigned int cpu)236931ef163SThomas Gleixner int smpboot_unpark_threads(unsigned int cpu)
237f97f8f06SThomas Gleixner {
238f97f8f06SThomas Gleixner struct smp_hotplug_thread *cur;
239f97f8f06SThomas Gleixner
240f97f8f06SThomas Gleixner mutex_lock(&smpboot_threads_lock);
241f97f8f06SThomas Gleixner list_for_each_entry(cur, &hotplug_threads, list)
242f97f8f06SThomas Gleixner smpboot_unpark_thread(cur, cpu);
243f97f8f06SThomas Gleixner mutex_unlock(&smpboot_threads_lock);
244931ef163SThomas Gleixner return 0;
245f97f8f06SThomas Gleixner }
246f97f8f06SThomas Gleixner
smpboot_park_thread(struct smp_hotplug_thread * ht,unsigned int cpu)247f97f8f06SThomas Gleixner static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
248f97f8f06SThomas Gleixner {
249f97f8f06SThomas Gleixner struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
250f97f8f06SThomas Gleixner
2517d7e499fSThomas Gleixner if (tsk && !ht->selfparking)
252f97f8f06SThomas Gleixner kthread_park(tsk);
253f97f8f06SThomas Gleixner }
254f97f8f06SThomas Gleixner
smpboot_park_threads(unsigned int cpu)255931ef163SThomas Gleixner int smpboot_park_threads(unsigned int cpu)
256f97f8f06SThomas Gleixner {
257f97f8f06SThomas Gleixner struct smp_hotplug_thread *cur;
258f97f8f06SThomas Gleixner
259f97f8f06SThomas Gleixner mutex_lock(&smpboot_threads_lock);
260f97f8f06SThomas Gleixner list_for_each_entry_reverse(cur, &hotplug_threads, list)
261f97f8f06SThomas Gleixner smpboot_park_thread(cur, cpu);
262f97f8f06SThomas Gleixner mutex_unlock(&smpboot_threads_lock);
263931ef163SThomas Gleixner return 0;
264f97f8f06SThomas Gleixner }
265f97f8f06SThomas Gleixner
smpboot_destroy_threads(struct smp_hotplug_thread * ht)266f97f8f06SThomas Gleixner static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
267f97f8f06SThomas Gleixner {
268f97f8f06SThomas Gleixner unsigned int cpu;
269f97f8f06SThomas Gleixner
270f97f8f06SThomas Gleixner /* We need to destroy also the parked threads of offline cpus */
271f97f8f06SThomas Gleixner for_each_possible_cpu(cpu) {
272f97f8f06SThomas Gleixner struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
273f97f8f06SThomas Gleixner
274f97f8f06SThomas Gleixner if (tsk) {
275*a9da6ddaSAndreas Gruenbacher kthread_stop_put(tsk);
276f97f8f06SThomas Gleixner *per_cpu_ptr(ht->store, cpu) = NULL;
277f97f8f06SThomas Gleixner }
278f97f8f06SThomas Gleixner }
279f97f8f06SThomas Gleixner }
280f97f8f06SThomas Gleixner
281f97f8f06SThomas Gleixner /**
282167a8867SPeter Zijlstra * smpboot_register_percpu_thread - Register a per_cpu thread related
283230ec939SFrederic Weisbecker * to hotplug
284f97f8f06SThomas Gleixner * @plug_thread: Hotplug thread descriptor
285f97f8f06SThomas Gleixner *
286f97f8f06SThomas Gleixner * Creates and starts the threads on all online cpus.
287f97f8f06SThomas Gleixner */
smpboot_register_percpu_thread(struct smp_hotplug_thread * plug_thread)288167a8867SPeter Zijlstra int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
289f97f8f06SThomas Gleixner {
290f97f8f06SThomas Gleixner unsigned int cpu;
291f97f8f06SThomas Gleixner int ret = 0;
292f97f8f06SThomas Gleixner
293844d8787SSebastian Andrzej Siewior cpus_read_lock();
294f97f8f06SThomas Gleixner mutex_lock(&smpboot_threads_lock);
295f97f8f06SThomas Gleixner for_each_online_cpu(cpu) {
296f97f8f06SThomas Gleixner ret = __smpboot_create_thread(plug_thread, cpu);
297f97f8f06SThomas Gleixner if (ret) {
298f97f8f06SThomas Gleixner smpboot_destroy_threads(plug_thread);
299f97f8f06SThomas Gleixner goto out;
300f97f8f06SThomas Gleixner }
301f97f8f06SThomas Gleixner smpboot_unpark_thread(plug_thread, cpu);
302f97f8f06SThomas Gleixner }
303f97f8f06SThomas Gleixner list_add(&plug_thread->list, &hotplug_threads);
304f97f8f06SThomas Gleixner out:
305f97f8f06SThomas Gleixner mutex_unlock(&smpboot_threads_lock);
306844d8787SSebastian Andrzej Siewior cpus_read_unlock();
307f97f8f06SThomas Gleixner return ret;
308f97f8f06SThomas Gleixner }
309167a8867SPeter Zijlstra EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
310f97f8f06SThomas Gleixner
311f97f8f06SThomas Gleixner /**
312f97f8f06SThomas Gleixner * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
313f97f8f06SThomas Gleixner * @plug_thread: Hotplug thread descriptor
314f97f8f06SThomas Gleixner *
315f97f8f06SThomas Gleixner * Stops all threads on all possible cpus.
316f97f8f06SThomas Gleixner */
smpboot_unregister_percpu_thread(struct smp_hotplug_thread * plug_thread)317f97f8f06SThomas Gleixner void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
318f97f8f06SThomas Gleixner {
319844d8787SSebastian Andrzej Siewior cpus_read_lock();
320f97f8f06SThomas Gleixner mutex_lock(&smpboot_threads_lock);
321f97f8f06SThomas Gleixner list_del(&plug_thread->list);
322f97f8f06SThomas Gleixner smpboot_destroy_threads(plug_thread);
323f97f8f06SThomas Gleixner mutex_unlock(&smpboot_threads_lock);
324844d8787SSebastian Andrzej Siewior cpus_read_unlock();
325f97f8f06SThomas Gleixner }
326f97f8f06SThomas Gleixner EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
327