1 /* 2 * Common SMP CPU bringup/teardown functions 3 */ 4 #include <linux/cpu.h> 5 #include <linux/err.h> 6 #include <linux/smp.h> 7 #include <linux/init.h> 8 #include <linux/list.h> 9 #include <linux/slab.h> 10 #include <linux/sched.h> 11 #include <linux/export.h> 12 #include <linux/percpu.h> 13 #include <linux/kthread.h> 14 #include <linux/smpboot.h> 15 16 #include "smpboot.h" 17 18 #ifdef CONFIG_SMP 19 20 #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD 21 /* 22 * For the hotplug case we keep the task structs around and reuse 23 * them. 24 */ 25 static DEFINE_PER_CPU(struct task_struct *, idle_threads); 26 27 struct task_struct *idle_thread_get(unsigned int cpu) 28 { 29 struct task_struct *tsk = per_cpu(idle_threads, cpu); 30 31 if (!tsk) 32 return ERR_PTR(-ENOMEM); 33 init_idle(tsk, cpu); 34 return tsk; 35 } 36 37 void __init idle_thread_set_boot_cpu(void) 38 { 39 per_cpu(idle_threads, smp_processor_id()) = current; 40 } 41 42 /** 43 * idle_init - Initialize the idle thread for a cpu 44 * @cpu: The cpu for which the idle thread should be initialized 45 * 46 * Creates the thread if it does not exist. 47 */ 48 static inline void idle_init(unsigned int cpu) 49 { 50 struct task_struct *tsk = per_cpu(idle_threads, cpu); 51 52 if (!tsk) { 53 tsk = fork_idle(cpu); 54 if (IS_ERR(tsk)) 55 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu); 56 else 57 per_cpu(idle_threads, cpu) = tsk; 58 } 59 } 60 61 /** 62 * idle_threads_init - Initialize idle threads for all cpus 63 */ 64 void __init idle_threads_init(void) 65 { 66 unsigned int cpu, boot_cpu; 67 68 boot_cpu = smp_processor_id(); 69 70 for_each_possible_cpu(cpu) { 71 if (cpu != boot_cpu) 72 idle_init(cpu); 73 } 74 } 75 #endif 76 77 #endif /* #ifdef CONFIG_SMP */ 78 79 static LIST_HEAD(hotplug_threads); 80 static DEFINE_MUTEX(smpboot_threads_lock); 81 82 struct smpboot_thread_data { 83 unsigned int cpu; 84 unsigned int status; 85 struct smp_hotplug_thread *ht; 86 }; 87 88 enum { 89 HP_THREAD_NONE = 0, 90 HP_THREAD_ACTIVE, 91 HP_THREAD_PARKED, 92 }; 93 94 /** 95 * smpboot_thread_fn - percpu hotplug thread loop function 96 * @data: thread data pointer 97 * 98 * Checks for thread stop and park conditions. Calls the necessary 99 * setup, cleanup, park and unpark functions for the registered 100 * thread. 101 * 102 * Returns 1 when the thread should exit, 0 otherwise. 103 */ 104 static int smpboot_thread_fn(void *data) 105 { 106 struct smpboot_thread_data *td = data; 107 struct smp_hotplug_thread *ht = td->ht; 108 109 while (1) { 110 set_current_state(TASK_INTERRUPTIBLE); 111 preempt_disable(); 112 if (kthread_should_stop()) { 113 set_current_state(TASK_RUNNING); 114 preempt_enable(); 115 if (ht->cleanup) 116 ht->cleanup(td->cpu, cpu_online(td->cpu)); 117 kfree(td); 118 return 0; 119 } 120 121 if (kthread_should_park()) { 122 __set_current_state(TASK_RUNNING); 123 preempt_enable(); 124 if (ht->park && td->status == HP_THREAD_ACTIVE) { 125 BUG_ON(td->cpu != smp_processor_id()); 126 ht->park(td->cpu); 127 td->status = HP_THREAD_PARKED; 128 } 129 kthread_parkme(); 130 /* We might have been woken for stop */ 131 continue; 132 } 133 134 BUG_ON(td->cpu != smp_processor_id()); 135 136 /* Check for state change setup */ 137 switch (td->status) { 138 case HP_THREAD_NONE: 139 preempt_enable(); 140 if (ht->setup) 141 ht->setup(td->cpu); 142 td->status = HP_THREAD_ACTIVE; 143 preempt_disable(); 144 break; 145 case HP_THREAD_PARKED: 146 preempt_enable(); 147 if (ht->unpark) 148 ht->unpark(td->cpu); 149 td->status = HP_THREAD_ACTIVE; 150 preempt_disable(); 151 break; 152 } 153 154 if (!ht->thread_should_run(td->cpu)) { 155 preempt_enable(); 156 schedule(); 157 } else { 158 set_current_state(TASK_RUNNING); 159 preempt_enable(); 160 ht->thread_fn(td->cpu); 161 } 162 } 163 } 164 165 static int 166 __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu) 167 { 168 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); 169 struct smpboot_thread_data *td; 170 171 if (tsk) 172 return 0; 173 174 td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu)); 175 if (!td) 176 return -ENOMEM; 177 td->cpu = cpu; 178 td->ht = ht; 179 180 tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu, 181 ht->thread_comm); 182 if (IS_ERR(tsk)) { 183 kfree(td); 184 return PTR_ERR(tsk); 185 } 186 get_task_struct(tsk); 187 *per_cpu_ptr(ht->store, cpu) = tsk; 188 if (ht->create) { 189 /* 190 * Make sure that the task has actually scheduled out 191 * into park position, before calling the create 192 * callback. At least the migration thread callback 193 * requires that the task is off the runqueue. 194 */ 195 if (!wait_task_inactive(tsk, TASK_PARKED)) 196 WARN_ON(1); 197 else 198 ht->create(cpu); 199 } 200 return 0; 201 } 202 203 int smpboot_create_threads(unsigned int cpu) 204 { 205 struct smp_hotplug_thread *cur; 206 int ret = 0; 207 208 mutex_lock(&smpboot_threads_lock); 209 list_for_each_entry(cur, &hotplug_threads, list) { 210 ret = __smpboot_create_thread(cur, cpu); 211 if (ret) 212 break; 213 } 214 mutex_unlock(&smpboot_threads_lock); 215 return ret; 216 } 217 218 static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu) 219 { 220 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); 221 222 if (ht->pre_unpark) 223 ht->pre_unpark(cpu); 224 kthread_unpark(tsk); 225 } 226 227 void smpboot_unpark_threads(unsigned int cpu) 228 { 229 struct smp_hotplug_thread *cur; 230 231 mutex_lock(&smpboot_threads_lock); 232 list_for_each_entry(cur, &hotplug_threads, list) 233 smpboot_unpark_thread(cur, cpu); 234 mutex_unlock(&smpboot_threads_lock); 235 } 236 237 static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu) 238 { 239 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); 240 241 if (tsk && !ht->selfparking) 242 kthread_park(tsk); 243 } 244 245 void smpboot_park_threads(unsigned int cpu) 246 { 247 struct smp_hotplug_thread *cur; 248 249 mutex_lock(&smpboot_threads_lock); 250 list_for_each_entry_reverse(cur, &hotplug_threads, list) 251 smpboot_park_thread(cur, cpu); 252 mutex_unlock(&smpboot_threads_lock); 253 } 254 255 static void smpboot_destroy_threads(struct smp_hotplug_thread *ht) 256 { 257 unsigned int cpu; 258 259 /* We need to destroy also the parked threads of offline cpus */ 260 for_each_possible_cpu(cpu) { 261 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); 262 263 if (tsk) { 264 kthread_stop(tsk); 265 put_task_struct(tsk); 266 *per_cpu_ptr(ht->store, cpu) = NULL; 267 } 268 } 269 } 270 271 /** 272 * smpboot_register_percpu_thread - Register a per_cpu thread related to hotplug 273 * @plug_thread: Hotplug thread descriptor 274 * 275 * Creates and starts the threads on all online cpus. 276 */ 277 int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread) 278 { 279 unsigned int cpu; 280 int ret = 0; 281 282 mutex_lock(&smpboot_threads_lock); 283 for_each_online_cpu(cpu) { 284 ret = __smpboot_create_thread(plug_thread, cpu); 285 if (ret) { 286 smpboot_destroy_threads(plug_thread); 287 goto out; 288 } 289 smpboot_unpark_thread(plug_thread, cpu); 290 } 291 list_add(&plug_thread->list, &hotplug_threads); 292 out: 293 mutex_unlock(&smpboot_threads_lock); 294 return ret; 295 } 296 EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread); 297 298 /** 299 * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug 300 * @plug_thread: Hotplug thread descriptor 301 * 302 * Stops all threads on all possible cpus. 303 */ 304 void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread) 305 { 306 get_online_cpus(); 307 mutex_lock(&smpboot_threads_lock); 308 list_del(&plug_thread->list); 309 smpboot_destroy_threads(plug_thread); 310 mutex_unlock(&smpboot_threads_lock); 311 put_online_cpus(); 312 } 313 EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread); 314