1 /* Kernel thread helper functions. 2 * Copyright (C) 2004 IBM Corporation, Rusty Russell. 3 * 4 * Creation is done via kthreadd, so that we get a clean environment 5 * even if we're invoked from userspace (think modprobe, hotplug cpu, 6 * etc.). 7 */ 8 #include <linux/sched.h> 9 #include <linux/kthread.h> 10 #include <linux/completion.h> 11 #include <linux/err.h> 12 #include <linux/cpuset.h> 13 #include <linux/unistd.h> 14 #include <linux/file.h> 15 #include <linux/export.h> 16 #include <linux/mutex.h> 17 #include <linux/slab.h> 18 #include <linux/freezer.h> 19 #include <trace/events/sched.h> 20 21 static DEFINE_SPINLOCK(kthread_create_lock); 22 static LIST_HEAD(kthread_create_list); 23 struct task_struct *kthreadd_task; 24 25 struct kthread_create_info 26 { 27 /* Information passed to kthread() from kthreadd. */ 28 int (*threadfn)(void *data); 29 void *data; 30 int node; 31 32 /* Result passed back to kthread_create() from kthreadd. */ 33 struct task_struct *result; 34 struct completion done; 35 36 struct list_head list; 37 }; 38 39 struct kthread { 40 int should_stop; 41 void *data; 42 struct completion exited; 43 }; 44 45 #define to_kthread(tsk) \ 46 container_of((tsk)->vfork_done, struct kthread, exited) 47 48 /** 49 * kthread_should_stop - should this kthread return now? 50 * 51 * When someone calls kthread_stop() on your kthread, it will be woken 52 * and this will return true. You should then return, and your return 53 * value will be passed through to kthread_stop(). 54 */ 55 int kthread_should_stop(void) 56 { 57 return to_kthread(current)->should_stop; 58 } 59 EXPORT_SYMBOL(kthread_should_stop); 60 61 /** 62 * kthread_freezable_should_stop - should this freezable kthread return now? 63 * @was_frozen: optional out parameter, indicates whether %current was frozen 64 * 65 * kthread_should_stop() for freezable kthreads, which will enter 66 * refrigerator if necessary. This function is safe from kthread_stop() / 67 * freezer deadlock and freezable kthreads should use this function instead 68 * of calling try_to_freeze() directly. 69 */ 70 bool kthread_freezable_should_stop(bool *was_frozen) 71 { 72 bool frozen = false; 73 74 might_sleep(); 75 76 if (unlikely(freezing(current))) 77 frozen = __refrigerator(true); 78 79 if (was_frozen) 80 *was_frozen = frozen; 81 82 return kthread_should_stop(); 83 } 84 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); 85 86 /** 87 * kthread_data - return data value specified on kthread creation 88 * @task: kthread task in question 89 * 90 * Return the data value specified when kthread @task was created. 91 * The caller is responsible for ensuring the validity of @task when 92 * calling this function. 93 */ 94 void *kthread_data(struct task_struct *task) 95 { 96 return to_kthread(task)->data; 97 } 98 99 static int kthread(void *_create) 100 { 101 /* Copy data: it's on kthread's stack */ 102 struct kthread_create_info *create = _create; 103 int (*threadfn)(void *data) = create->threadfn; 104 void *data = create->data; 105 struct kthread self; 106 int ret; 107 108 self.should_stop = 0; 109 self.data = data; 110 init_completion(&self.exited); 111 current->vfork_done = &self.exited; 112 113 /* OK, tell user we're spawned, wait for stop or wakeup */ 114 __set_current_state(TASK_UNINTERRUPTIBLE); 115 create->result = current; 116 complete(&create->done); 117 schedule(); 118 119 ret = -EINTR; 120 if (!self.should_stop) 121 ret = threadfn(data); 122 123 /* we can't just return, we must preserve "self" on stack */ 124 do_exit(ret); 125 } 126 127 /* called from do_fork() to get node information for about to be created task */ 128 int tsk_fork_get_node(struct task_struct *tsk) 129 { 130 #ifdef CONFIG_NUMA 131 if (tsk == kthreadd_task) 132 return tsk->pref_node_fork; 133 #endif 134 return numa_node_id(); 135 } 136 137 static void create_kthread(struct kthread_create_info *create) 138 { 139 int pid; 140 141 #ifdef CONFIG_NUMA 142 current->pref_node_fork = create->node; 143 #endif 144 /* We want our own signal handler (we take no signals by default). */ 145 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); 146 if (pid < 0) { 147 create->result = ERR_PTR(pid); 148 complete(&create->done); 149 } 150 } 151 152 /** 153 * kthread_create_on_node - create a kthread. 154 * @threadfn: the function to run until signal_pending(current). 155 * @data: data ptr for @threadfn. 156 * @node: memory node number. 157 * @namefmt: printf-style name for the thread. 158 * 159 * Description: This helper function creates and names a kernel 160 * thread. The thread will be stopped: use wake_up_process() to start 161 * it. See also kthread_run(). 162 * 163 * If thread is going to be bound on a particular cpu, give its node 164 * in @node, to get NUMA affinity for kthread stack, or else give -1. 165 * When woken, the thread will run @threadfn() with @data as its 166 * argument. @threadfn() can either call do_exit() directly if it is a 167 * standalone thread for which no one will call kthread_stop(), or 168 * return when 'kthread_should_stop()' is true (which means 169 * kthread_stop() has been called). The return value should be zero 170 * or a negative error number; it will be passed to kthread_stop(). 171 * 172 * Returns a task_struct or ERR_PTR(-ENOMEM). 173 */ 174 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), 175 void *data, 176 int node, 177 const char namefmt[], 178 ...) 179 { 180 struct kthread_create_info create; 181 182 create.threadfn = threadfn; 183 create.data = data; 184 create.node = node; 185 init_completion(&create.done); 186 187 spin_lock(&kthread_create_lock); 188 list_add_tail(&create.list, &kthread_create_list); 189 spin_unlock(&kthread_create_lock); 190 191 wake_up_process(kthreadd_task); 192 wait_for_completion(&create.done); 193 194 if (!IS_ERR(create.result)) { 195 static const struct sched_param param = { .sched_priority = 0 }; 196 va_list args; 197 198 va_start(args, namefmt); 199 vsnprintf(create.result->comm, sizeof(create.result->comm), 200 namefmt, args); 201 va_end(args); 202 /* 203 * root may have changed our (kthreadd's) priority or CPU mask. 204 * The kernel thread should not inherit these properties. 205 */ 206 sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m); 207 set_cpus_allowed_ptr(create.result, cpu_all_mask); 208 } 209 return create.result; 210 } 211 EXPORT_SYMBOL(kthread_create_on_node); 212 213 /** 214 * kthread_bind - bind a just-created kthread to a cpu. 215 * @p: thread created by kthread_create(). 216 * @cpu: cpu (might not be online, must be possible) for @k to run on. 217 * 218 * Description: This function is equivalent to set_cpus_allowed(), 219 * except that @cpu doesn't need to be online, and the thread must be 220 * stopped (i.e., just returned from kthread_create()). 221 */ 222 void kthread_bind(struct task_struct *p, unsigned int cpu) 223 { 224 /* Must have done schedule() in kthread() before we set_task_cpu */ 225 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { 226 WARN_ON(1); 227 return; 228 } 229 230 /* It's safe because the task is inactive. */ 231 do_set_cpus_allowed(p, cpumask_of(cpu)); 232 p->flags |= PF_THREAD_BOUND; 233 } 234 EXPORT_SYMBOL(kthread_bind); 235 236 /** 237 * kthread_stop - stop a thread created by kthread_create(). 238 * @k: thread created by kthread_create(). 239 * 240 * Sets kthread_should_stop() for @k to return true, wakes it, and 241 * waits for it to exit. This can also be called after kthread_create() 242 * instead of calling wake_up_process(): the thread will exit without 243 * calling threadfn(). 244 * 245 * If threadfn() may call do_exit() itself, the caller must ensure 246 * task_struct can't go away. 247 * 248 * Returns the result of threadfn(), or %-EINTR if wake_up_process() 249 * was never called. 250 */ 251 int kthread_stop(struct task_struct *k) 252 { 253 struct kthread *kthread; 254 int ret; 255 256 trace_sched_kthread_stop(k); 257 get_task_struct(k); 258 259 kthread = to_kthread(k); 260 barrier(); /* it might have exited */ 261 if (k->vfork_done != NULL) { 262 kthread->should_stop = 1; 263 wake_up_process(k); 264 wait_for_completion(&kthread->exited); 265 } 266 ret = k->exit_code; 267 268 put_task_struct(k); 269 trace_sched_kthread_stop_ret(ret); 270 271 return ret; 272 } 273 EXPORT_SYMBOL(kthread_stop); 274 275 int kthreadd(void *unused) 276 { 277 struct task_struct *tsk = current; 278 279 /* Setup a clean context for our children to inherit. */ 280 set_task_comm(tsk, "kthreadd"); 281 ignore_signals(tsk); 282 set_cpus_allowed_ptr(tsk, cpu_all_mask); 283 set_mems_allowed(node_states[N_HIGH_MEMORY]); 284 285 current->flags |= PF_NOFREEZE; 286 287 for (;;) { 288 set_current_state(TASK_INTERRUPTIBLE); 289 if (list_empty(&kthread_create_list)) 290 schedule(); 291 __set_current_state(TASK_RUNNING); 292 293 spin_lock(&kthread_create_lock); 294 while (!list_empty(&kthread_create_list)) { 295 struct kthread_create_info *create; 296 297 create = list_entry(kthread_create_list.next, 298 struct kthread_create_info, list); 299 list_del_init(&create->list); 300 spin_unlock(&kthread_create_lock); 301 302 create_kthread(create); 303 304 spin_lock(&kthread_create_lock); 305 } 306 spin_unlock(&kthread_create_lock); 307 } 308 309 return 0; 310 } 311 312 void __init_kthread_worker(struct kthread_worker *worker, 313 const char *name, 314 struct lock_class_key *key) 315 { 316 spin_lock_init(&worker->lock); 317 lockdep_set_class_and_name(&worker->lock, key, name); 318 INIT_LIST_HEAD(&worker->work_list); 319 worker->task = NULL; 320 } 321 EXPORT_SYMBOL_GPL(__init_kthread_worker); 322 323 /** 324 * kthread_worker_fn - kthread function to process kthread_worker 325 * @worker_ptr: pointer to initialized kthread_worker 326 * 327 * This function can be used as @threadfn to kthread_create() or 328 * kthread_run() with @worker_ptr argument pointing to an initialized 329 * kthread_worker. The started kthread will process work_list until 330 * the it is stopped with kthread_stop(). A kthread can also call 331 * this function directly after extra initialization. 332 * 333 * Different kthreads can be used for the same kthread_worker as long 334 * as there's only one kthread attached to it at any given time. A 335 * kthread_worker without an attached kthread simply collects queued 336 * kthread_works. 337 */ 338 int kthread_worker_fn(void *worker_ptr) 339 { 340 struct kthread_worker *worker = worker_ptr; 341 struct kthread_work *work; 342 343 WARN_ON(worker->task); 344 worker->task = current; 345 repeat: 346 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ 347 348 if (kthread_should_stop()) { 349 __set_current_state(TASK_RUNNING); 350 spin_lock_irq(&worker->lock); 351 worker->task = NULL; 352 spin_unlock_irq(&worker->lock); 353 return 0; 354 } 355 356 work = NULL; 357 spin_lock_irq(&worker->lock); 358 if (!list_empty(&worker->work_list)) { 359 work = list_first_entry(&worker->work_list, 360 struct kthread_work, node); 361 list_del_init(&work->node); 362 } 363 worker->current_work = work; 364 spin_unlock_irq(&worker->lock); 365 366 if (work) { 367 __set_current_state(TASK_RUNNING); 368 work->func(work); 369 } else if (!freezing(current)) 370 schedule(); 371 372 try_to_freeze(); 373 goto repeat; 374 } 375 EXPORT_SYMBOL_GPL(kthread_worker_fn); 376 377 /* insert @work before @pos in @worker */ 378 static void insert_kthread_work(struct kthread_worker *worker, 379 struct kthread_work *work, 380 struct list_head *pos) 381 { 382 lockdep_assert_held(&worker->lock); 383 384 list_add_tail(&work->node, pos); 385 work->worker = worker; 386 if (likely(worker->task)) 387 wake_up_process(worker->task); 388 } 389 390 /** 391 * queue_kthread_work - queue a kthread_work 392 * @worker: target kthread_worker 393 * @work: kthread_work to queue 394 * 395 * Queue @work to work processor @task for async execution. @task 396 * must have been created with kthread_worker_create(). Returns %true 397 * if @work was successfully queued, %false if it was already pending. 398 */ 399 bool queue_kthread_work(struct kthread_worker *worker, 400 struct kthread_work *work) 401 { 402 bool ret = false; 403 unsigned long flags; 404 405 spin_lock_irqsave(&worker->lock, flags); 406 if (list_empty(&work->node)) { 407 insert_kthread_work(worker, work, &worker->work_list); 408 ret = true; 409 } 410 spin_unlock_irqrestore(&worker->lock, flags); 411 return ret; 412 } 413 EXPORT_SYMBOL_GPL(queue_kthread_work); 414 415 struct kthread_flush_work { 416 struct kthread_work work; 417 struct completion done; 418 }; 419 420 static void kthread_flush_work_fn(struct kthread_work *work) 421 { 422 struct kthread_flush_work *fwork = 423 container_of(work, struct kthread_flush_work, work); 424 complete(&fwork->done); 425 } 426 427 /** 428 * flush_kthread_work - flush a kthread_work 429 * @work: work to flush 430 * 431 * If @work is queued or executing, wait for it to finish execution. 432 */ 433 void flush_kthread_work(struct kthread_work *work) 434 { 435 struct kthread_flush_work fwork = { 436 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 437 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 438 }; 439 struct kthread_worker *worker; 440 bool noop = false; 441 442 retry: 443 worker = work->worker; 444 if (!worker) 445 return; 446 447 spin_lock_irq(&worker->lock); 448 if (work->worker != worker) { 449 spin_unlock_irq(&worker->lock); 450 goto retry; 451 } 452 453 if (!list_empty(&work->node)) 454 insert_kthread_work(worker, &fwork.work, work->node.next); 455 else if (worker->current_work == work) 456 insert_kthread_work(worker, &fwork.work, worker->work_list.next); 457 else 458 noop = true; 459 460 spin_unlock_irq(&worker->lock); 461 462 if (!noop) 463 wait_for_completion(&fwork.done); 464 } 465 EXPORT_SYMBOL_GPL(flush_kthread_work); 466 467 /** 468 * flush_kthread_worker - flush all current works on a kthread_worker 469 * @worker: worker to flush 470 * 471 * Wait until all currently executing or pending works on @worker are 472 * finished. 473 */ 474 void flush_kthread_worker(struct kthread_worker *worker) 475 { 476 struct kthread_flush_work fwork = { 477 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 478 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 479 }; 480 481 queue_kthread_work(worker, &fwork.work); 482 wait_for_completion(&fwork.done); 483 } 484 EXPORT_SYMBOL_GPL(flush_kthread_worker); 485