1 /* 2 * Common SMP CPU bringup/teardown functions 3 */ 4 #include <linux/cpu.h> 5 #include <linux/err.h> 6 #include <linux/smp.h> 7 #include <linux/init.h> 8 #include <linux/list.h> 9 #include <linux/slab.h> 10 #include <linux/sched.h> 11 #include <linux/export.h> 12 #include <linux/percpu.h> 13 #include <linux/kthread.h> 14 #include <linux/smpboot.h> 15 16 #include "smpboot.h" 17 18 #ifdef CONFIG_SMP 19 20 #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD 21 /* 22 * For the hotplug case we keep the task structs around and reuse 23 * them. 24 */ 25 static DEFINE_PER_CPU(struct task_struct *, idle_threads); 26 27 struct task_struct *idle_thread_get(unsigned int cpu) 28 { 29 struct task_struct *tsk = per_cpu(idle_threads, cpu); 30 31 if (!tsk) 32 return ERR_PTR(-ENOMEM); 33 init_idle(tsk, cpu); 34 return tsk; 35 } 36 37 void __init idle_thread_set_boot_cpu(void) 38 { 39 per_cpu(idle_threads, smp_processor_id()) = current; 40 } 41 42 /** 43 * idle_init - Initialize the idle thread for a cpu 44 * @cpu: The cpu for which the idle thread should be initialized 45 * 46 * Creates the thread if it does not exist. 47 */ 48 static inline void idle_init(unsigned int cpu) 49 { 50 struct task_struct *tsk = per_cpu(idle_threads, cpu); 51 52 if (!tsk) { 53 tsk = fork_idle(cpu); 54 if (IS_ERR(tsk)) 55 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu); 56 else 57 per_cpu(idle_threads, cpu) = tsk; 58 } 59 } 60 61 /** 62 * idle_threads_init - Initialize idle threads for all cpus 63 */ 64 void __init idle_threads_init(void) 65 { 66 unsigned int cpu, boot_cpu; 67 68 boot_cpu = smp_processor_id(); 69 70 for_each_possible_cpu(cpu) { 71 if (cpu != boot_cpu) 72 idle_init(cpu); 73 } 74 } 75 #endif 76 77 #endif /* #ifdef CONFIG_SMP */ 78 79 static LIST_HEAD(hotplug_threads); 80 static DEFINE_MUTEX(smpboot_threads_lock); 81 82 struct smpboot_thread_data { 83 unsigned int cpu; 84 unsigned int status; 85 struct smp_hotplug_thread *ht; 86 }; 87 88 enum { 89 HP_THREAD_NONE = 0, 90 HP_THREAD_ACTIVE, 91 HP_THREAD_PARKED, 92 }; 93 94 /** 95 * smpboot_thread_fn - percpu hotplug thread loop function 96 * @data: thread data pointer 97 * 98 * Checks for thread stop and park conditions. Calls the necessary 99 * setup, cleanup, park and unpark functions for the registered 100 * thread. 101 * 102 * Returns 1 when the thread should exit, 0 otherwise. 103 */ 104 static int smpboot_thread_fn(void *data) 105 { 106 struct smpboot_thread_data *td = data; 107 struct smp_hotplug_thread *ht = td->ht; 108 109 while (1) { 110 set_current_state(TASK_INTERRUPTIBLE); 111 preempt_disable(); 112 if (kthread_should_stop()) { 113 __set_current_state(TASK_RUNNING); 114 preempt_enable(); 115 if (ht->cleanup) 116 ht->cleanup(td->cpu, cpu_online(td->cpu)); 117 kfree(td); 118 return 0; 119 } 120 121 if (kthread_should_park()) { 122 __set_current_state(TASK_RUNNING); 123 preempt_enable(); 124 if (ht->park && td->status == HP_THREAD_ACTIVE) { 125 BUG_ON(td->cpu != smp_processor_id()); 126 ht->park(td->cpu); 127 td->status = HP_THREAD_PARKED; 128 } 129 kthread_parkme(); 130 /* We might have been woken for stop */ 131 continue; 132 } 133 134 BUG_ON(td->cpu != smp_processor_id()); 135 136 /* Check for state change setup */ 137 switch (td->status) { 138 case HP_THREAD_NONE: 139 __set_current_state(TASK_RUNNING); 140 preempt_enable(); 141 if (ht->setup) 142 ht->setup(td->cpu); 143 td->status = HP_THREAD_ACTIVE; 144 continue; 145 146 case HP_THREAD_PARKED: 147 __set_current_state(TASK_RUNNING); 148 preempt_enable(); 149 if (ht->unpark) 150 ht->unpark(td->cpu); 151 td->status = HP_THREAD_ACTIVE; 152 continue; 153 } 154 155 if (!ht->thread_should_run(td->cpu)) { 156 preempt_enable_no_resched(); 157 schedule(); 158 } else { 159 __set_current_state(TASK_RUNNING); 160 preempt_enable(); 161 ht->thread_fn(td->cpu); 162 } 163 } 164 } 165 166 static int 167 __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu) 168 { 169 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); 170 struct smpboot_thread_data *td; 171 172 if (tsk) 173 return 0; 174 175 td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu)); 176 if (!td) 177 return -ENOMEM; 178 td->cpu = cpu; 179 td->ht = ht; 180 181 tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu, 182 ht->thread_comm); 183 if (IS_ERR(tsk)) { 184 kfree(td); 185 return PTR_ERR(tsk); 186 } 187 get_task_struct(tsk); 188 *per_cpu_ptr(ht->store, cpu) = tsk; 189 if (ht->create) { 190 /* 191 * Make sure that the task has actually scheduled out 192 * into park position, before calling the create 193 * callback. At least the migration thread callback 194 * requires that the task is off the runqueue. 195 */ 196 if (!wait_task_inactive(tsk, TASK_PARKED)) 197 WARN_ON(1); 198 else 199 ht->create(cpu); 200 } 201 return 0; 202 } 203 204 int smpboot_create_threads(unsigned int cpu) 205 { 206 struct smp_hotplug_thread *cur; 207 int ret = 0; 208 209 mutex_lock(&smpboot_threads_lock); 210 list_for_each_entry(cur, &hotplug_threads, list) { 211 ret = __smpboot_create_thread(cur, cpu); 212 if (ret) 213 break; 214 } 215 mutex_unlock(&smpboot_threads_lock); 216 return ret; 217 } 218 219 static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu) 220 { 221 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); 222 223 if (ht->pre_unpark) 224 ht->pre_unpark(cpu); 225 kthread_unpark(tsk); 226 } 227 228 void smpboot_unpark_threads(unsigned int cpu) 229 { 230 struct smp_hotplug_thread *cur; 231 232 mutex_lock(&smpboot_threads_lock); 233 list_for_each_entry(cur, &hotplug_threads, list) 234 smpboot_unpark_thread(cur, cpu); 235 mutex_unlock(&smpboot_threads_lock); 236 } 237 238 static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu) 239 { 240 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); 241 242 if (tsk && !ht->selfparking) 243 kthread_park(tsk); 244 } 245 246 void smpboot_park_threads(unsigned int cpu) 247 { 248 struct smp_hotplug_thread *cur; 249 250 mutex_lock(&smpboot_threads_lock); 251 list_for_each_entry_reverse(cur, &hotplug_threads, list) 252 smpboot_park_thread(cur, cpu); 253 mutex_unlock(&smpboot_threads_lock); 254 } 255 256 static void smpboot_destroy_threads(struct smp_hotplug_thread *ht) 257 { 258 unsigned int cpu; 259 260 /* We need to destroy also the parked threads of offline cpus */ 261 for_each_possible_cpu(cpu) { 262 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); 263 264 if (tsk) { 265 kthread_stop(tsk); 266 put_task_struct(tsk); 267 *per_cpu_ptr(ht->store, cpu) = NULL; 268 } 269 } 270 } 271 272 /** 273 * smpboot_register_percpu_thread - Register a per_cpu thread related to hotplug 274 * @plug_thread: Hotplug thread descriptor 275 * 276 * Creates and starts the threads on all online cpus. 277 */ 278 int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread) 279 { 280 unsigned int cpu; 281 int ret = 0; 282 283 get_online_cpus(); 284 mutex_lock(&smpboot_threads_lock); 285 for_each_online_cpu(cpu) { 286 ret = __smpboot_create_thread(plug_thread, cpu); 287 if (ret) { 288 smpboot_destroy_threads(plug_thread); 289 goto out; 290 } 291 smpboot_unpark_thread(plug_thread, cpu); 292 } 293 list_add(&plug_thread->list, &hotplug_threads); 294 out: 295 mutex_unlock(&smpboot_threads_lock); 296 put_online_cpus(); 297 return ret; 298 } 299 EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread); 300 301 /** 302 * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug 303 * @plug_thread: Hotplug thread descriptor 304 * 305 * Stops all threads on all possible cpus. 306 */ 307 void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread) 308 { 309 get_online_cpus(); 310 mutex_lock(&smpboot_threads_lock); 311 list_del(&plug_thread->list); 312 smpboot_destroy_threads(plug_thread); 313 mutex_unlock(&smpboot_threads_lock); 314 put_online_cpus(); 315 } 316 EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread); 317