1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Kernel thread helper functions. 3 * Copyright (C) 2004 IBM Corporation, Rusty Russell. 4 * Copyright (C) 2009 Red Hat, Inc. 5 * 6 * Creation is done via kthreadd, so that we get a clean environment 7 * even if we're invoked from userspace (think modprobe, hotplug cpu, 8 * etc.). 9 */ 10 #include <uapi/linux/sched/types.h> 11 #include <linux/mm.h> 12 #include <linux/mmu_context.h> 13 #include <linux/sched.h> 14 #include <linux/sched/mm.h> 15 #include <linux/sched/task.h> 16 #include <linux/kthread.h> 17 #include <linux/completion.h> 18 #include <linux/err.h> 19 #include <linux/cgroup.h> 20 #include <linux/cpuset.h> 21 #include <linux/unistd.h> 22 #include <linux/file.h> 23 #include <linux/export.h> 24 #include <linux/mutex.h> 25 #include <linux/slab.h> 26 #include <linux/freezer.h> 27 #include <linux/ptrace.h> 28 #include <linux/uaccess.h> 29 #include <linux/numa.h> 30 #include <linux/sched/isolation.h> 31 #include <trace/events/sched.h> 32 33 34 static DEFINE_SPINLOCK(kthread_create_lock); 35 static LIST_HEAD(kthread_create_list); 36 struct task_struct *kthreadd_task; 37 38 struct kthread_create_info 39 { 40 /* Information passed to kthread() from kthreadd. */ 41 int (*threadfn)(void *data); 42 void *data; 43 int node; 44 45 /* Result passed back to kthread_create() from kthreadd. */ 46 struct task_struct *result; 47 struct completion *done; 48 49 struct list_head list; 50 }; 51 52 struct kthread { 53 unsigned long flags; 54 unsigned int cpu; 55 int (*threadfn)(void *); 56 void *data; 57 mm_segment_t oldfs; 58 struct completion parked; 59 struct completion exited; 60 #ifdef CONFIG_BLK_CGROUP 61 struct cgroup_subsys_state *blkcg_css; 62 #endif 63 }; 64 65 enum KTHREAD_BITS { 66 KTHREAD_IS_PER_CPU = 0, 67 KTHREAD_SHOULD_STOP, 68 KTHREAD_SHOULD_PARK, 69 }; 70 71 static inline void set_kthread_struct(void *kthread) 72 { 73 /* 74 * We abuse ->set_child_tid to avoid the new member and because it 75 * can't be wrongly copied by copy_process(). We also rely on fact 76 * that the caller can't exec, so PF_KTHREAD can't be cleared. 77 */ 78 current->set_child_tid = (__force void __user *)kthread; 79 } 80 81 static inline struct kthread *to_kthread(struct task_struct *k) 82 { 83 WARN_ON(!(k->flags & PF_KTHREAD)); 84 return (__force void *)k->set_child_tid; 85 } 86 87 void free_kthread_struct(struct task_struct *k) 88 { 89 struct kthread *kthread; 90 91 /* 92 * Can be NULL if this kthread was created by kernel_thread() 93 * or if kmalloc() in kthread() failed. 94 */ 95 kthread = to_kthread(k); 96 #ifdef CONFIG_BLK_CGROUP 97 WARN_ON_ONCE(kthread && kthread->blkcg_css); 98 #endif 99 kfree(kthread); 100 } 101 102 /** 103 * kthread_should_stop - should this kthread return now? 104 * 105 * When someone calls kthread_stop() on your kthread, it will be woken 106 * and this will return true. You should then return, and your return 107 * value will be passed through to kthread_stop(). 108 */ 109 bool kthread_should_stop(void) 110 { 111 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); 112 } 113 EXPORT_SYMBOL(kthread_should_stop); 114 115 bool __kthread_should_park(struct task_struct *k) 116 { 117 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags); 118 } 119 EXPORT_SYMBOL_GPL(__kthread_should_park); 120 121 /** 122 * kthread_should_park - should this kthread park now? 123 * 124 * When someone calls kthread_park() on your kthread, it will be woken 125 * and this will return true. You should then do the necessary 126 * cleanup and call kthread_parkme() 127 * 128 * Similar to kthread_should_stop(), but this keeps the thread alive 129 * and in a park position. kthread_unpark() "restarts" the thread and 130 * calls the thread function again. 131 */ 132 bool kthread_should_park(void) 133 { 134 return __kthread_should_park(current); 135 } 136 EXPORT_SYMBOL_GPL(kthread_should_park); 137 138 /** 139 * kthread_freezable_should_stop - should this freezable kthread return now? 140 * @was_frozen: optional out parameter, indicates whether %current was frozen 141 * 142 * kthread_should_stop() for freezable kthreads, which will enter 143 * refrigerator if necessary. This function is safe from kthread_stop() / 144 * freezer deadlock and freezable kthreads should use this function instead 145 * of calling try_to_freeze() directly. 146 */ 147 bool kthread_freezable_should_stop(bool *was_frozen) 148 { 149 bool frozen = false; 150 151 might_sleep(); 152 153 if (unlikely(freezing(current))) 154 frozen = __refrigerator(true); 155 156 if (was_frozen) 157 *was_frozen = frozen; 158 159 return kthread_should_stop(); 160 } 161 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); 162 163 /** 164 * kthread_func - return the function specified on kthread creation 165 * @task: kthread task in question 166 * 167 * Returns NULL if the task is not a kthread. 168 */ 169 void *kthread_func(struct task_struct *task) 170 { 171 if (task->flags & PF_KTHREAD) 172 return to_kthread(task)->threadfn; 173 return NULL; 174 } 175 EXPORT_SYMBOL_GPL(kthread_func); 176 177 /** 178 * kthread_data - return data value specified on kthread creation 179 * @task: kthread task in question 180 * 181 * Return the data value specified when kthread @task was created. 182 * The caller is responsible for ensuring the validity of @task when 183 * calling this function. 184 */ 185 void *kthread_data(struct task_struct *task) 186 { 187 return to_kthread(task)->data; 188 } 189 EXPORT_SYMBOL_GPL(kthread_data); 190 191 /** 192 * kthread_probe_data - speculative version of kthread_data() 193 * @task: possible kthread task in question 194 * 195 * @task could be a kthread task. Return the data value specified when it 196 * was created if accessible. If @task isn't a kthread task or its data is 197 * inaccessible for any reason, %NULL is returned. This function requires 198 * that @task itself is safe to dereference. 199 */ 200 void *kthread_probe_data(struct task_struct *task) 201 { 202 struct kthread *kthread = to_kthread(task); 203 void *data = NULL; 204 205 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data)); 206 return data; 207 } 208 209 static void __kthread_parkme(struct kthread *self) 210 { 211 for (;;) { 212 /* 213 * TASK_PARKED is a special state; we must serialize against 214 * possible pending wakeups to avoid store-store collisions on 215 * task->state. 216 * 217 * Such a collision might possibly result in the task state 218 * changin from TASK_PARKED and us failing the 219 * wait_task_inactive() in kthread_park(). 220 */ 221 set_special_state(TASK_PARKED); 222 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) 223 break; 224 225 /* 226 * Thread is going to call schedule(), do not preempt it, 227 * or the caller of kthread_park() may spend more time in 228 * wait_task_inactive(). 229 */ 230 preempt_disable(); 231 complete(&self->parked); 232 schedule_preempt_disabled(); 233 preempt_enable(); 234 } 235 __set_current_state(TASK_RUNNING); 236 } 237 238 void kthread_parkme(void) 239 { 240 __kthread_parkme(to_kthread(current)); 241 } 242 EXPORT_SYMBOL_GPL(kthread_parkme); 243 244 static int kthread(void *_create) 245 { 246 /* Copy data: it's on kthread's stack */ 247 struct kthread_create_info *create = _create; 248 int (*threadfn)(void *data) = create->threadfn; 249 void *data = create->data; 250 struct completion *done; 251 struct kthread *self; 252 int ret; 253 254 self = kzalloc(sizeof(*self), GFP_KERNEL); 255 set_kthread_struct(self); 256 257 /* If user was SIGKILLed, I release the structure. */ 258 done = xchg(&create->done, NULL); 259 if (!done) { 260 kfree(create); 261 do_exit(-EINTR); 262 } 263 264 if (!self) { 265 create->result = ERR_PTR(-ENOMEM); 266 complete(done); 267 do_exit(-ENOMEM); 268 } 269 270 self->threadfn = threadfn; 271 self->data = data; 272 init_completion(&self->exited); 273 init_completion(&self->parked); 274 current->vfork_done = &self->exited; 275 276 /* OK, tell user we're spawned, wait for stop or wakeup */ 277 __set_current_state(TASK_UNINTERRUPTIBLE); 278 create->result = current; 279 /* 280 * Thread is going to call schedule(), do not preempt it, 281 * or the creator may spend more time in wait_task_inactive(). 282 */ 283 preempt_disable(); 284 complete(done); 285 schedule_preempt_disabled(); 286 preempt_enable(); 287 288 ret = -EINTR; 289 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { 290 cgroup_kthread_ready(); 291 __kthread_parkme(self); 292 ret = threadfn(data); 293 } 294 do_exit(ret); 295 } 296 297 /* called from do_fork() to get node information for about to be created task */ 298 int tsk_fork_get_node(struct task_struct *tsk) 299 { 300 #ifdef CONFIG_NUMA 301 if (tsk == kthreadd_task) 302 return tsk->pref_node_fork; 303 #endif 304 return NUMA_NO_NODE; 305 } 306 307 static void create_kthread(struct kthread_create_info *create) 308 { 309 int pid; 310 311 #ifdef CONFIG_NUMA 312 current->pref_node_fork = create->node; 313 #endif 314 /* We want our own signal handler (we take no signals by default). */ 315 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); 316 if (pid < 0) { 317 /* If user was SIGKILLed, I release the structure. */ 318 struct completion *done = xchg(&create->done, NULL); 319 320 if (!done) { 321 kfree(create); 322 return; 323 } 324 create->result = ERR_PTR(pid); 325 complete(done); 326 } 327 } 328 329 static __printf(4, 0) 330 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), 331 void *data, int node, 332 const char namefmt[], 333 va_list args) 334 { 335 DECLARE_COMPLETION_ONSTACK(done); 336 struct task_struct *task; 337 struct kthread_create_info *create = kmalloc(sizeof(*create), 338 GFP_KERNEL); 339 340 if (!create) 341 return ERR_PTR(-ENOMEM); 342 create->threadfn = threadfn; 343 create->data = data; 344 create->node = node; 345 create->done = &done; 346 347 spin_lock(&kthread_create_lock); 348 list_add_tail(&create->list, &kthread_create_list); 349 spin_unlock(&kthread_create_lock); 350 351 wake_up_process(kthreadd_task); 352 /* 353 * Wait for completion in killable state, for I might be chosen by 354 * the OOM killer while kthreadd is trying to allocate memory for 355 * new kernel thread. 356 */ 357 if (unlikely(wait_for_completion_killable(&done))) { 358 /* 359 * If I was SIGKILLed before kthreadd (or new kernel thread) 360 * calls complete(), leave the cleanup of this structure to 361 * that thread. 362 */ 363 if (xchg(&create->done, NULL)) 364 return ERR_PTR(-EINTR); 365 /* 366 * kthreadd (or new kernel thread) will call complete() 367 * shortly. 368 */ 369 wait_for_completion(&done); 370 } 371 task = create->result; 372 if (!IS_ERR(task)) { 373 static const struct sched_param param = { .sched_priority = 0 }; 374 char name[TASK_COMM_LEN]; 375 376 /* 377 * task is already visible to other tasks, so updating 378 * COMM must be protected. 379 */ 380 vsnprintf(name, sizeof(name), namefmt, args); 381 set_task_comm(task, name); 382 /* 383 * root may have changed our (kthreadd's) priority or CPU mask. 384 * The kernel thread should not inherit these properties. 385 */ 386 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); 387 set_cpus_allowed_ptr(task, 388 housekeeping_cpumask(HK_FLAG_KTHREAD)); 389 } 390 kfree(create); 391 return task; 392 } 393 394 /** 395 * kthread_create_on_node - create a kthread. 396 * @threadfn: the function to run until signal_pending(current). 397 * @data: data ptr for @threadfn. 398 * @node: task and thread structures for the thread are allocated on this node 399 * @namefmt: printf-style name for the thread. 400 * 401 * Description: This helper function creates and names a kernel 402 * thread. The thread will be stopped: use wake_up_process() to start 403 * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and 404 * is affine to all CPUs. 405 * 406 * If thread is going to be bound on a particular cpu, give its node 407 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE. 408 * When woken, the thread will run @threadfn() with @data as its 409 * argument. @threadfn() can either call do_exit() directly if it is a 410 * standalone thread for which no one will call kthread_stop(), or 411 * return when 'kthread_should_stop()' is true (which means 412 * kthread_stop() has been called). The return value should be zero 413 * or a negative error number; it will be passed to kthread_stop(). 414 * 415 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR). 416 */ 417 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), 418 void *data, int node, 419 const char namefmt[], 420 ...) 421 { 422 struct task_struct *task; 423 va_list args; 424 425 va_start(args, namefmt); 426 task = __kthread_create_on_node(threadfn, data, node, namefmt, args); 427 va_end(args); 428 429 return task; 430 } 431 EXPORT_SYMBOL(kthread_create_on_node); 432 433 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state) 434 { 435 unsigned long flags; 436 437 if (!wait_task_inactive(p, state)) { 438 WARN_ON(1); 439 return; 440 } 441 442 /* It's safe because the task is inactive. */ 443 raw_spin_lock_irqsave(&p->pi_lock, flags); 444 do_set_cpus_allowed(p, mask); 445 p->flags |= PF_NO_SETAFFINITY; 446 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 447 } 448 449 static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) 450 { 451 __kthread_bind_mask(p, cpumask_of(cpu), state); 452 } 453 454 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask) 455 { 456 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE); 457 } 458 459 /** 460 * kthread_bind - bind a just-created kthread to a cpu. 461 * @p: thread created by kthread_create(). 462 * @cpu: cpu (might not be online, must be possible) for @k to run on. 463 * 464 * Description: This function is equivalent to set_cpus_allowed(), 465 * except that @cpu doesn't need to be online, and the thread must be 466 * stopped (i.e., just returned from kthread_create()). 467 */ 468 void kthread_bind(struct task_struct *p, unsigned int cpu) 469 { 470 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); 471 } 472 EXPORT_SYMBOL(kthread_bind); 473 474 /** 475 * kthread_create_on_cpu - Create a cpu bound kthread 476 * @threadfn: the function to run until signal_pending(current). 477 * @data: data ptr for @threadfn. 478 * @cpu: The cpu on which the thread should be bound, 479 * @namefmt: printf-style name for the thread. Format is restricted 480 * to "name.*%u". Code fills in cpu number. 481 * 482 * Description: This helper function creates and names a kernel thread 483 */ 484 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), 485 void *data, unsigned int cpu, 486 const char *namefmt) 487 { 488 struct task_struct *p; 489 490 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, 491 cpu); 492 if (IS_ERR(p)) 493 return p; 494 kthread_bind(p, cpu); 495 /* CPU hotplug need to bind once again when unparking the thread. */ 496 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags); 497 to_kthread(p)->cpu = cpu; 498 return p; 499 } 500 501 /** 502 * kthread_unpark - unpark a thread created by kthread_create(). 503 * @k: thread created by kthread_create(). 504 * 505 * Sets kthread_should_park() for @k to return false, wakes it, and 506 * waits for it to return. If the thread is marked percpu then its 507 * bound to the cpu again. 508 */ 509 void kthread_unpark(struct task_struct *k) 510 { 511 struct kthread *kthread = to_kthread(k); 512 513 /* 514 * Newly created kthread was parked when the CPU was offline. 515 * The binding was lost and we need to set it again. 516 */ 517 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) 518 __kthread_bind(k, kthread->cpu, TASK_PARKED); 519 520 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 521 /* 522 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup. 523 */ 524 wake_up_state(k, TASK_PARKED); 525 } 526 EXPORT_SYMBOL_GPL(kthread_unpark); 527 528 /** 529 * kthread_park - park a thread created by kthread_create(). 530 * @k: thread created by kthread_create(). 531 * 532 * Sets kthread_should_park() for @k to return true, wakes it, and 533 * waits for it to return. This can also be called after kthread_create() 534 * instead of calling wake_up_process(): the thread will park without 535 * calling threadfn(). 536 * 537 * Returns 0 if the thread is parked, -ENOSYS if the thread exited. 538 * If called by the kthread itself just the park bit is set. 539 */ 540 int kthread_park(struct task_struct *k) 541 { 542 struct kthread *kthread = to_kthread(k); 543 544 if (WARN_ON(k->flags & PF_EXITING)) 545 return -ENOSYS; 546 547 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))) 548 return -EBUSY; 549 550 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 551 if (k != current) { 552 wake_up_process(k); 553 /* 554 * Wait for __kthread_parkme() to complete(), this means we 555 * _will_ have TASK_PARKED and are about to call schedule(). 556 */ 557 wait_for_completion(&kthread->parked); 558 /* 559 * Now wait for that schedule() to complete and the task to 560 * get scheduled out. 561 */ 562 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED)); 563 } 564 565 return 0; 566 } 567 EXPORT_SYMBOL_GPL(kthread_park); 568 569 /** 570 * kthread_stop - stop a thread created by kthread_create(). 571 * @k: thread created by kthread_create(). 572 * 573 * Sets kthread_should_stop() for @k to return true, wakes it, and 574 * waits for it to exit. This can also be called after kthread_create() 575 * instead of calling wake_up_process(): the thread will exit without 576 * calling threadfn(). 577 * 578 * If threadfn() may call do_exit() itself, the caller must ensure 579 * task_struct can't go away. 580 * 581 * Returns the result of threadfn(), or %-EINTR if wake_up_process() 582 * was never called. 583 */ 584 int kthread_stop(struct task_struct *k) 585 { 586 struct kthread *kthread; 587 int ret; 588 589 trace_sched_kthread_stop(k); 590 591 get_task_struct(k); 592 kthread = to_kthread(k); 593 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); 594 kthread_unpark(k); 595 wake_up_process(k); 596 wait_for_completion(&kthread->exited); 597 ret = k->exit_code; 598 put_task_struct(k); 599 600 trace_sched_kthread_stop_ret(ret); 601 return ret; 602 } 603 EXPORT_SYMBOL(kthread_stop); 604 605 int kthreadd(void *unused) 606 { 607 struct task_struct *tsk = current; 608 609 /* Setup a clean context for our children to inherit. */ 610 set_task_comm(tsk, "kthreadd"); 611 ignore_signals(tsk); 612 set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD)); 613 set_mems_allowed(node_states[N_MEMORY]); 614 615 current->flags |= PF_NOFREEZE; 616 cgroup_init_kthreadd(); 617 618 for (;;) { 619 set_current_state(TASK_INTERRUPTIBLE); 620 if (list_empty(&kthread_create_list)) 621 schedule(); 622 __set_current_state(TASK_RUNNING); 623 624 spin_lock(&kthread_create_lock); 625 while (!list_empty(&kthread_create_list)) { 626 struct kthread_create_info *create; 627 628 create = list_entry(kthread_create_list.next, 629 struct kthread_create_info, list); 630 list_del_init(&create->list); 631 spin_unlock(&kthread_create_lock); 632 633 create_kthread(create); 634 635 spin_lock(&kthread_create_lock); 636 } 637 spin_unlock(&kthread_create_lock); 638 } 639 640 return 0; 641 } 642 643 void __kthread_init_worker(struct kthread_worker *worker, 644 const char *name, 645 struct lock_class_key *key) 646 { 647 memset(worker, 0, sizeof(struct kthread_worker)); 648 raw_spin_lock_init(&worker->lock); 649 lockdep_set_class_and_name(&worker->lock, key, name); 650 INIT_LIST_HEAD(&worker->work_list); 651 INIT_LIST_HEAD(&worker->delayed_work_list); 652 } 653 EXPORT_SYMBOL_GPL(__kthread_init_worker); 654 655 /** 656 * kthread_worker_fn - kthread function to process kthread_worker 657 * @worker_ptr: pointer to initialized kthread_worker 658 * 659 * This function implements the main cycle of kthread worker. It processes 660 * work_list until it is stopped with kthread_stop(). It sleeps when the queue 661 * is empty. 662 * 663 * The works are not allowed to keep any locks, disable preemption or interrupts 664 * when they finish. There is defined a safe point for freezing when one work 665 * finishes and before a new one is started. 666 * 667 * Also the works must not be handled by more than one worker at the same time, 668 * see also kthread_queue_work(). 669 */ 670 int kthread_worker_fn(void *worker_ptr) 671 { 672 struct kthread_worker *worker = worker_ptr; 673 struct kthread_work *work; 674 675 /* 676 * FIXME: Update the check and remove the assignment when all kthread 677 * worker users are created using kthread_create_worker*() functions. 678 */ 679 WARN_ON(worker->task && worker->task != current); 680 worker->task = current; 681 682 if (worker->flags & KTW_FREEZABLE) 683 set_freezable(); 684 685 repeat: 686 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ 687 688 if (kthread_should_stop()) { 689 __set_current_state(TASK_RUNNING); 690 raw_spin_lock_irq(&worker->lock); 691 worker->task = NULL; 692 raw_spin_unlock_irq(&worker->lock); 693 return 0; 694 } 695 696 work = NULL; 697 raw_spin_lock_irq(&worker->lock); 698 if (!list_empty(&worker->work_list)) { 699 work = list_first_entry(&worker->work_list, 700 struct kthread_work, node); 701 list_del_init(&work->node); 702 } 703 worker->current_work = work; 704 raw_spin_unlock_irq(&worker->lock); 705 706 if (work) { 707 kthread_work_func_t func = work->func; 708 __set_current_state(TASK_RUNNING); 709 trace_sched_kthread_work_execute_start(work); 710 work->func(work); 711 /* 712 * Avoid dereferencing work after this point. The trace 713 * event only cares about the address. 714 */ 715 trace_sched_kthread_work_execute_end(work, func); 716 } else if (!freezing(current)) 717 schedule(); 718 719 try_to_freeze(); 720 cond_resched(); 721 goto repeat; 722 } 723 EXPORT_SYMBOL_GPL(kthread_worker_fn); 724 725 static __printf(3, 0) struct kthread_worker * 726 __kthread_create_worker(int cpu, unsigned int flags, 727 const char namefmt[], va_list args) 728 { 729 struct kthread_worker *worker; 730 struct task_struct *task; 731 int node = NUMA_NO_NODE; 732 733 worker = kzalloc(sizeof(*worker), GFP_KERNEL); 734 if (!worker) 735 return ERR_PTR(-ENOMEM); 736 737 kthread_init_worker(worker); 738 739 if (cpu >= 0) 740 node = cpu_to_node(cpu); 741 742 task = __kthread_create_on_node(kthread_worker_fn, worker, 743 node, namefmt, args); 744 if (IS_ERR(task)) 745 goto fail_task; 746 747 if (cpu >= 0) 748 kthread_bind(task, cpu); 749 750 worker->flags = flags; 751 worker->task = task; 752 wake_up_process(task); 753 return worker; 754 755 fail_task: 756 kfree(worker); 757 return ERR_CAST(task); 758 } 759 760 /** 761 * kthread_create_worker - create a kthread worker 762 * @flags: flags modifying the default behavior of the worker 763 * @namefmt: printf-style name for the kthread worker (task). 764 * 765 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) 766 * when the needed structures could not get allocated, and ERR_PTR(-EINTR) 767 * when the worker was SIGKILLed. 768 */ 769 struct kthread_worker * 770 kthread_create_worker(unsigned int flags, const char namefmt[], ...) 771 { 772 struct kthread_worker *worker; 773 va_list args; 774 775 va_start(args, namefmt); 776 worker = __kthread_create_worker(-1, flags, namefmt, args); 777 va_end(args); 778 779 return worker; 780 } 781 EXPORT_SYMBOL(kthread_create_worker); 782 783 /** 784 * kthread_create_worker_on_cpu - create a kthread worker and bind it 785 * to a given CPU and the associated NUMA node. 786 * @cpu: CPU number 787 * @flags: flags modifying the default behavior of the worker 788 * @namefmt: printf-style name for the kthread worker (task). 789 * 790 * Use a valid CPU number if you want to bind the kthread worker 791 * to the given CPU and the associated NUMA node. 792 * 793 * A good practice is to add the cpu number also into the worker name. 794 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu). 795 * 796 * CPU hotplug: 797 * The kthread worker API is simple and generic. It just provides a way 798 * to create, use, and destroy workers. 799 * 800 * It is up to the API user how to handle CPU hotplug. They have to decide 801 * how to handle pending work items, prevent queuing new ones, and 802 * restore the functionality when the CPU goes off and on. There are a 803 * few catches: 804 * 805 * - CPU affinity gets lost when it is scheduled on an offline CPU. 806 * 807 * - The worker might not exist when the CPU was off when the user 808 * created the workers. 809 * 810 * Good practice is to implement two CPU hotplug callbacks and to 811 * destroy/create the worker when the CPU goes down/up. 812 * 813 * Return: 814 * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM) 815 * when the needed structures could not get allocated, and ERR_PTR(-EINTR) 816 * when the worker was SIGKILLed. 817 */ 818 struct kthread_worker * 819 kthread_create_worker_on_cpu(int cpu, unsigned int flags, 820 const char namefmt[], ...) 821 { 822 struct kthread_worker *worker; 823 va_list args; 824 825 va_start(args, namefmt); 826 worker = __kthread_create_worker(cpu, flags, namefmt, args); 827 va_end(args); 828 829 return worker; 830 } 831 EXPORT_SYMBOL(kthread_create_worker_on_cpu); 832 833 /* 834 * Returns true when the work could not be queued at the moment. 835 * It happens when it is already pending in a worker list 836 * or when it is being cancelled. 837 */ 838 static inline bool queuing_blocked(struct kthread_worker *worker, 839 struct kthread_work *work) 840 { 841 lockdep_assert_held(&worker->lock); 842 843 return !list_empty(&work->node) || work->canceling; 844 } 845 846 static void kthread_insert_work_sanity_check(struct kthread_worker *worker, 847 struct kthread_work *work) 848 { 849 lockdep_assert_held(&worker->lock); 850 WARN_ON_ONCE(!list_empty(&work->node)); 851 /* Do not use a work with >1 worker, see kthread_queue_work() */ 852 WARN_ON_ONCE(work->worker && work->worker != worker); 853 } 854 855 /* insert @work before @pos in @worker */ 856 static void kthread_insert_work(struct kthread_worker *worker, 857 struct kthread_work *work, 858 struct list_head *pos) 859 { 860 kthread_insert_work_sanity_check(worker, work); 861 862 trace_sched_kthread_work_queue_work(worker, work); 863 864 list_add_tail(&work->node, pos); 865 work->worker = worker; 866 if (!worker->current_work && likely(worker->task)) 867 wake_up_process(worker->task); 868 } 869 870 /** 871 * kthread_queue_work - queue a kthread_work 872 * @worker: target kthread_worker 873 * @work: kthread_work to queue 874 * 875 * Queue @work to work processor @task for async execution. @task 876 * must have been created with kthread_worker_create(). Returns %true 877 * if @work was successfully queued, %false if it was already pending. 878 * 879 * Reinitialize the work if it needs to be used by another worker. 880 * For example, when the worker was stopped and started again. 881 */ 882 bool kthread_queue_work(struct kthread_worker *worker, 883 struct kthread_work *work) 884 { 885 bool ret = false; 886 unsigned long flags; 887 888 raw_spin_lock_irqsave(&worker->lock, flags); 889 if (!queuing_blocked(worker, work)) { 890 kthread_insert_work(worker, work, &worker->work_list); 891 ret = true; 892 } 893 raw_spin_unlock_irqrestore(&worker->lock, flags); 894 return ret; 895 } 896 EXPORT_SYMBOL_GPL(kthread_queue_work); 897 898 /** 899 * kthread_delayed_work_timer_fn - callback that queues the associated kthread 900 * delayed work when the timer expires. 901 * @t: pointer to the expired timer 902 * 903 * The format of the function is defined by struct timer_list. 904 * It should have been called from irqsafe timer with irq already off. 905 */ 906 void kthread_delayed_work_timer_fn(struct timer_list *t) 907 { 908 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer); 909 struct kthread_work *work = &dwork->work; 910 struct kthread_worker *worker = work->worker; 911 unsigned long flags; 912 913 /* 914 * This might happen when a pending work is reinitialized. 915 * It means that it is used a wrong way. 916 */ 917 if (WARN_ON_ONCE(!worker)) 918 return; 919 920 raw_spin_lock_irqsave(&worker->lock, flags); 921 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 922 WARN_ON_ONCE(work->worker != worker); 923 924 /* Move the work from worker->delayed_work_list. */ 925 WARN_ON_ONCE(list_empty(&work->node)); 926 list_del_init(&work->node); 927 if (!work->canceling) 928 kthread_insert_work(worker, work, &worker->work_list); 929 930 raw_spin_unlock_irqrestore(&worker->lock, flags); 931 } 932 EXPORT_SYMBOL(kthread_delayed_work_timer_fn); 933 934 static void __kthread_queue_delayed_work(struct kthread_worker *worker, 935 struct kthread_delayed_work *dwork, 936 unsigned long delay) 937 { 938 struct timer_list *timer = &dwork->timer; 939 struct kthread_work *work = &dwork->work; 940 941 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn); 942 943 /* 944 * If @delay is 0, queue @dwork->work immediately. This is for 945 * both optimization and correctness. The earliest @timer can 946 * expire is on the closest next tick and delayed_work users depend 947 * on that there's no such delay when @delay is 0. 948 */ 949 if (!delay) { 950 kthread_insert_work(worker, work, &worker->work_list); 951 return; 952 } 953 954 /* Be paranoid and try to detect possible races already now. */ 955 kthread_insert_work_sanity_check(worker, work); 956 957 list_add(&work->node, &worker->delayed_work_list); 958 work->worker = worker; 959 timer->expires = jiffies + delay; 960 add_timer(timer); 961 } 962 963 /** 964 * kthread_queue_delayed_work - queue the associated kthread work 965 * after a delay. 966 * @worker: target kthread_worker 967 * @dwork: kthread_delayed_work to queue 968 * @delay: number of jiffies to wait before queuing 969 * 970 * If the work has not been pending it starts a timer that will queue 971 * the work after the given @delay. If @delay is zero, it queues the 972 * work immediately. 973 * 974 * Return: %false if the @work has already been pending. It means that 975 * either the timer was running or the work was queued. It returns %true 976 * otherwise. 977 */ 978 bool kthread_queue_delayed_work(struct kthread_worker *worker, 979 struct kthread_delayed_work *dwork, 980 unsigned long delay) 981 { 982 struct kthread_work *work = &dwork->work; 983 unsigned long flags; 984 bool ret = false; 985 986 raw_spin_lock_irqsave(&worker->lock, flags); 987 988 if (!queuing_blocked(worker, work)) { 989 __kthread_queue_delayed_work(worker, dwork, delay); 990 ret = true; 991 } 992 993 raw_spin_unlock_irqrestore(&worker->lock, flags); 994 return ret; 995 } 996 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); 997 998 struct kthread_flush_work { 999 struct kthread_work work; 1000 struct completion done; 1001 }; 1002 1003 static void kthread_flush_work_fn(struct kthread_work *work) 1004 { 1005 struct kthread_flush_work *fwork = 1006 container_of(work, struct kthread_flush_work, work); 1007 complete(&fwork->done); 1008 } 1009 1010 /** 1011 * kthread_flush_work - flush a kthread_work 1012 * @work: work to flush 1013 * 1014 * If @work is queued or executing, wait for it to finish execution. 1015 */ 1016 void kthread_flush_work(struct kthread_work *work) 1017 { 1018 struct kthread_flush_work fwork = { 1019 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 1020 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 1021 }; 1022 struct kthread_worker *worker; 1023 bool noop = false; 1024 1025 worker = work->worker; 1026 if (!worker) 1027 return; 1028 1029 raw_spin_lock_irq(&worker->lock); 1030 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 1031 WARN_ON_ONCE(work->worker != worker); 1032 1033 if (!list_empty(&work->node)) 1034 kthread_insert_work(worker, &fwork.work, work->node.next); 1035 else if (worker->current_work == work) 1036 kthread_insert_work(worker, &fwork.work, 1037 worker->work_list.next); 1038 else 1039 noop = true; 1040 1041 raw_spin_unlock_irq(&worker->lock); 1042 1043 if (!noop) 1044 wait_for_completion(&fwork.done); 1045 } 1046 EXPORT_SYMBOL_GPL(kthread_flush_work); 1047 1048 /* 1049 * This function removes the work from the worker queue. Also it makes sure 1050 * that it won't get queued later via the delayed work's timer. 1051 * 1052 * The work might still be in use when this function finishes. See the 1053 * current_work proceed by the worker. 1054 * 1055 * Return: %true if @work was pending and successfully canceled, 1056 * %false if @work was not pending 1057 */ 1058 static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork, 1059 unsigned long *flags) 1060 { 1061 /* Try to cancel the timer if exists. */ 1062 if (is_dwork) { 1063 struct kthread_delayed_work *dwork = 1064 container_of(work, struct kthread_delayed_work, work); 1065 struct kthread_worker *worker = work->worker; 1066 1067 /* 1068 * del_timer_sync() must be called to make sure that the timer 1069 * callback is not running. The lock must be temporary released 1070 * to avoid a deadlock with the callback. In the meantime, 1071 * any queuing is blocked by setting the canceling counter. 1072 */ 1073 work->canceling++; 1074 raw_spin_unlock_irqrestore(&worker->lock, *flags); 1075 del_timer_sync(&dwork->timer); 1076 raw_spin_lock_irqsave(&worker->lock, *flags); 1077 work->canceling--; 1078 } 1079 1080 /* 1081 * Try to remove the work from a worker list. It might either 1082 * be from worker->work_list or from worker->delayed_work_list. 1083 */ 1084 if (!list_empty(&work->node)) { 1085 list_del_init(&work->node); 1086 return true; 1087 } 1088 1089 return false; 1090 } 1091 1092 /** 1093 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work 1094 * @worker: kthread worker to use 1095 * @dwork: kthread delayed work to queue 1096 * @delay: number of jiffies to wait before queuing 1097 * 1098 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise, 1099 * modify @dwork's timer so that it expires after @delay. If @delay is zero, 1100 * @work is guaranteed to be queued immediately. 1101 * 1102 * Return: %true if @dwork was pending and its timer was modified, 1103 * %false otherwise. 1104 * 1105 * A special case is when the work is being canceled in parallel. 1106 * It might be caused either by the real kthread_cancel_delayed_work_sync() 1107 * or yet another kthread_mod_delayed_work() call. We let the other command 1108 * win and return %false here. The caller is supposed to synchronize these 1109 * operations a reasonable way. 1110 * 1111 * This function is safe to call from any context including IRQ handler. 1112 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn() 1113 * for details. 1114 */ 1115 bool kthread_mod_delayed_work(struct kthread_worker *worker, 1116 struct kthread_delayed_work *dwork, 1117 unsigned long delay) 1118 { 1119 struct kthread_work *work = &dwork->work; 1120 unsigned long flags; 1121 int ret = false; 1122 1123 raw_spin_lock_irqsave(&worker->lock, flags); 1124 1125 /* Do not bother with canceling when never queued. */ 1126 if (!work->worker) 1127 goto fast_queue; 1128 1129 /* Work must not be used with >1 worker, see kthread_queue_work() */ 1130 WARN_ON_ONCE(work->worker != worker); 1131 1132 /* Do not fight with another command that is canceling this work. */ 1133 if (work->canceling) 1134 goto out; 1135 1136 ret = __kthread_cancel_work(work, true, &flags); 1137 fast_queue: 1138 __kthread_queue_delayed_work(worker, dwork, delay); 1139 out: 1140 raw_spin_unlock_irqrestore(&worker->lock, flags); 1141 return ret; 1142 } 1143 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work); 1144 1145 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) 1146 { 1147 struct kthread_worker *worker = work->worker; 1148 unsigned long flags; 1149 int ret = false; 1150 1151 if (!worker) 1152 goto out; 1153 1154 raw_spin_lock_irqsave(&worker->lock, flags); 1155 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 1156 WARN_ON_ONCE(work->worker != worker); 1157 1158 ret = __kthread_cancel_work(work, is_dwork, &flags); 1159 1160 if (worker->current_work != work) 1161 goto out_fast; 1162 1163 /* 1164 * The work is in progress and we need to wait with the lock released. 1165 * In the meantime, block any queuing by setting the canceling counter. 1166 */ 1167 work->canceling++; 1168 raw_spin_unlock_irqrestore(&worker->lock, flags); 1169 kthread_flush_work(work); 1170 raw_spin_lock_irqsave(&worker->lock, flags); 1171 work->canceling--; 1172 1173 out_fast: 1174 raw_spin_unlock_irqrestore(&worker->lock, flags); 1175 out: 1176 return ret; 1177 } 1178 1179 /** 1180 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish 1181 * @work: the kthread work to cancel 1182 * 1183 * Cancel @work and wait for its execution to finish. This function 1184 * can be used even if the work re-queues itself. On return from this 1185 * function, @work is guaranteed to be not pending or executing on any CPU. 1186 * 1187 * kthread_cancel_work_sync(&delayed_work->work) must not be used for 1188 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead. 1189 * 1190 * The caller must ensure that the worker on which @work was last 1191 * queued can't be destroyed before this function returns. 1192 * 1193 * Return: %true if @work was pending, %false otherwise. 1194 */ 1195 bool kthread_cancel_work_sync(struct kthread_work *work) 1196 { 1197 return __kthread_cancel_work_sync(work, false); 1198 } 1199 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync); 1200 1201 /** 1202 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and 1203 * wait for it to finish. 1204 * @dwork: the kthread delayed work to cancel 1205 * 1206 * This is kthread_cancel_work_sync() for delayed works. 1207 * 1208 * Return: %true if @dwork was pending, %false otherwise. 1209 */ 1210 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork) 1211 { 1212 return __kthread_cancel_work_sync(&dwork->work, true); 1213 } 1214 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync); 1215 1216 /** 1217 * kthread_flush_worker - flush all current works on a kthread_worker 1218 * @worker: worker to flush 1219 * 1220 * Wait until all currently executing or pending works on @worker are 1221 * finished. 1222 */ 1223 void kthread_flush_worker(struct kthread_worker *worker) 1224 { 1225 struct kthread_flush_work fwork = { 1226 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 1227 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 1228 }; 1229 1230 kthread_queue_work(worker, &fwork.work); 1231 wait_for_completion(&fwork.done); 1232 } 1233 EXPORT_SYMBOL_GPL(kthread_flush_worker); 1234 1235 /** 1236 * kthread_destroy_worker - destroy a kthread worker 1237 * @worker: worker to be destroyed 1238 * 1239 * Flush and destroy @worker. The simple flush is enough because the kthread 1240 * worker API is used only in trivial scenarios. There are no multi-step state 1241 * machines needed. 1242 */ 1243 void kthread_destroy_worker(struct kthread_worker *worker) 1244 { 1245 struct task_struct *task; 1246 1247 task = worker->task; 1248 if (WARN_ON(!task)) 1249 return; 1250 1251 kthread_flush_worker(worker); 1252 kthread_stop(task); 1253 WARN_ON(!list_empty(&worker->work_list)); 1254 kfree(worker); 1255 } 1256 EXPORT_SYMBOL(kthread_destroy_worker); 1257 1258 /** 1259 * kthread_use_mm - make the calling kthread operate on an address space 1260 * @mm: address space to operate on 1261 */ 1262 void kthread_use_mm(struct mm_struct *mm) 1263 { 1264 struct mm_struct *active_mm; 1265 struct task_struct *tsk = current; 1266 1267 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); 1268 WARN_ON_ONCE(tsk->mm); 1269 1270 task_lock(tsk); 1271 /* Hold off tlb flush IPIs while switching mm's */ 1272 local_irq_disable(); 1273 active_mm = tsk->active_mm; 1274 if (active_mm != mm) { 1275 mmgrab(mm); 1276 tsk->active_mm = mm; 1277 } 1278 tsk->mm = mm; 1279 membarrier_update_current_mm(mm); 1280 switch_mm_irqs_off(active_mm, mm, tsk); 1281 local_irq_enable(); 1282 task_unlock(tsk); 1283 #ifdef finish_arch_post_lock_switch 1284 finish_arch_post_lock_switch(); 1285 #endif 1286 1287 /* 1288 * When a kthread starts operating on an address space, the loop 1289 * in membarrier_{private,global}_expedited() may not observe 1290 * that tsk->mm, and not issue an IPI. Membarrier requires a 1291 * memory barrier after storing to tsk->mm, before accessing 1292 * user-space memory. A full memory barrier for membarrier 1293 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by 1294 * mmdrop(), or explicitly with smp_mb(). 1295 */ 1296 if (active_mm != mm) 1297 mmdrop(active_mm); 1298 else 1299 smp_mb(); 1300 1301 to_kthread(tsk)->oldfs = force_uaccess_begin(); 1302 } 1303 EXPORT_SYMBOL_GPL(kthread_use_mm); 1304 1305 /** 1306 * kthread_unuse_mm - reverse the effect of kthread_use_mm() 1307 * @mm: address space to operate on 1308 */ 1309 void kthread_unuse_mm(struct mm_struct *mm) 1310 { 1311 struct task_struct *tsk = current; 1312 1313 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); 1314 WARN_ON_ONCE(!tsk->mm); 1315 1316 force_uaccess_end(to_kthread(tsk)->oldfs); 1317 1318 task_lock(tsk); 1319 /* 1320 * When a kthread stops operating on an address space, the loop 1321 * in membarrier_{private,global}_expedited() may not observe 1322 * that tsk->mm, and not issue an IPI. Membarrier requires a 1323 * memory barrier after accessing user-space memory, before 1324 * clearing tsk->mm. 1325 */ 1326 smp_mb__after_spinlock(); 1327 sync_mm_rss(mm); 1328 local_irq_disable(); 1329 tsk->mm = NULL; 1330 membarrier_update_current_mm(NULL); 1331 /* active_mm is still 'mm' */ 1332 enter_lazy_tlb(mm, tsk); 1333 local_irq_enable(); 1334 task_unlock(tsk); 1335 } 1336 EXPORT_SYMBOL_GPL(kthread_unuse_mm); 1337 1338 #ifdef CONFIG_BLK_CGROUP 1339 /** 1340 * kthread_associate_blkcg - associate blkcg to current kthread 1341 * @css: the cgroup info 1342 * 1343 * Current thread must be a kthread. The thread is running jobs on behalf of 1344 * other threads. In some cases, we expect the jobs attach cgroup info of 1345 * original threads instead of that of current thread. This function stores 1346 * original thread's cgroup info in current kthread context for later 1347 * retrieval. 1348 */ 1349 void kthread_associate_blkcg(struct cgroup_subsys_state *css) 1350 { 1351 struct kthread *kthread; 1352 1353 if (!(current->flags & PF_KTHREAD)) 1354 return; 1355 kthread = to_kthread(current); 1356 if (!kthread) 1357 return; 1358 1359 if (kthread->blkcg_css) { 1360 css_put(kthread->blkcg_css); 1361 kthread->blkcg_css = NULL; 1362 } 1363 if (css) { 1364 css_get(css); 1365 kthread->blkcg_css = css; 1366 } 1367 } 1368 EXPORT_SYMBOL(kthread_associate_blkcg); 1369 1370 /** 1371 * kthread_blkcg - get associated blkcg css of current kthread 1372 * 1373 * Current thread must be a kthread. 1374 */ 1375 struct cgroup_subsys_state *kthread_blkcg(void) 1376 { 1377 struct kthread *kthread; 1378 1379 if (current->flags & PF_KTHREAD) { 1380 kthread = to_kthread(current); 1381 if (kthread) 1382 return kthread->blkcg_css; 1383 } 1384 return NULL; 1385 } 1386 EXPORT_SYMBOL(kthread_blkcg); 1387 #endif 1388