1 /* Kernel thread helper functions. 2 * Copyright (C) 2004 IBM Corporation, Rusty Russell. 3 * 4 * Creation is done via kthreadd, so that we get a clean environment 5 * even if we're invoked from userspace (think modprobe, hotplug cpu, 6 * etc.). 7 */ 8 #include <uapi/linux/sched/types.h> 9 #include <linux/sched.h> 10 #include <linux/sched/task.h> 11 #include <linux/kthread.h> 12 #include <linux/completion.h> 13 #include <linux/err.h> 14 #include <linux/cpuset.h> 15 #include <linux/unistd.h> 16 #include <linux/file.h> 17 #include <linux/export.h> 18 #include <linux/mutex.h> 19 #include <linux/slab.h> 20 #include <linux/freezer.h> 21 #include <linux/ptrace.h> 22 #include <linux/uaccess.h> 23 #include <linux/cgroup.h> 24 #include <trace/events/sched.h> 25 26 static DEFINE_SPINLOCK(kthread_create_lock); 27 static LIST_HEAD(kthread_create_list); 28 struct task_struct *kthreadd_task; 29 30 struct kthread_create_info 31 { 32 /* Information passed to kthread() from kthreadd. */ 33 int (*threadfn)(void *data); 34 void *data; 35 int node; 36 37 /* Result passed back to kthread_create() from kthreadd. */ 38 struct task_struct *result; 39 struct completion *done; 40 41 struct list_head list; 42 }; 43 44 struct kthread { 45 unsigned long flags; 46 unsigned int cpu; 47 void *data; 48 struct completion parked; 49 struct completion exited; 50 }; 51 52 enum KTHREAD_BITS { 53 KTHREAD_IS_PER_CPU = 0, 54 KTHREAD_SHOULD_STOP, 55 KTHREAD_SHOULD_PARK, 56 KTHREAD_IS_PARKED, 57 }; 58 59 static inline void set_kthread_struct(void *kthread) 60 { 61 /* 62 * We abuse ->set_child_tid to avoid the new member and because it 63 * can't be wrongly copied by copy_process(). We also rely on fact 64 * that the caller can't exec, so PF_KTHREAD can't be cleared. 65 */ 66 current->set_child_tid = (__force void __user *)kthread; 67 } 68 69 static inline struct kthread *to_kthread(struct task_struct *k) 70 { 71 WARN_ON(!(k->flags & PF_KTHREAD)); 72 return (__force void *)k->set_child_tid; 73 } 74 75 void free_kthread_struct(struct task_struct *k) 76 { 77 /* 78 * Can be NULL if this kthread was created by kernel_thread() 79 * or if kmalloc() in kthread() failed. 80 */ 81 kfree(to_kthread(k)); 82 } 83 84 /** 85 * kthread_should_stop - should this kthread return now? 86 * 87 * When someone calls kthread_stop() on your kthread, it will be woken 88 * and this will return true. You should then return, and your return 89 * value will be passed through to kthread_stop(). 90 */ 91 bool kthread_should_stop(void) 92 { 93 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); 94 } 95 EXPORT_SYMBOL(kthread_should_stop); 96 97 /** 98 * kthread_should_park - should this kthread park now? 99 * 100 * When someone calls kthread_park() on your kthread, it will be woken 101 * and this will return true. You should then do the necessary 102 * cleanup and call kthread_parkme() 103 * 104 * Similar to kthread_should_stop(), but this keeps the thread alive 105 * and in a park position. kthread_unpark() "restarts" the thread and 106 * calls the thread function again. 107 */ 108 bool kthread_should_park(void) 109 { 110 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); 111 } 112 EXPORT_SYMBOL_GPL(kthread_should_park); 113 114 /** 115 * kthread_freezable_should_stop - should this freezable kthread return now? 116 * @was_frozen: optional out parameter, indicates whether %current was frozen 117 * 118 * kthread_should_stop() for freezable kthreads, which will enter 119 * refrigerator if necessary. This function is safe from kthread_stop() / 120 * freezer deadlock and freezable kthreads should use this function instead 121 * of calling try_to_freeze() directly. 122 */ 123 bool kthread_freezable_should_stop(bool *was_frozen) 124 { 125 bool frozen = false; 126 127 might_sleep(); 128 129 if (unlikely(freezing(current))) 130 frozen = __refrigerator(true); 131 132 if (was_frozen) 133 *was_frozen = frozen; 134 135 return kthread_should_stop(); 136 } 137 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); 138 139 /** 140 * kthread_data - return data value specified on kthread creation 141 * @task: kthread task in question 142 * 143 * Return the data value specified when kthread @task was created. 144 * The caller is responsible for ensuring the validity of @task when 145 * calling this function. 146 */ 147 void *kthread_data(struct task_struct *task) 148 { 149 return to_kthread(task)->data; 150 } 151 152 /** 153 * kthread_probe_data - speculative version of kthread_data() 154 * @task: possible kthread task in question 155 * 156 * @task could be a kthread task. Return the data value specified when it 157 * was created if accessible. If @task isn't a kthread task or its data is 158 * inaccessible for any reason, %NULL is returned. This function requires 159 * that @task itself is safe to dereference. 160 */ 161 void *kthread_probe_data(struct task_struct *task) 162 { 163 struct kthread *kthread = to_kthread(task); 164 void *data = NULL; 165 166 probe_kernel_read(&data, &kthread->data, sizeof(data)); 167 return data; 168 } 169 170 static void __kthread_parkme(struct kthread *self) 171 { 172 __set_current_state(TASK_PARKED); 173 while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { 174 if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) 175 complete(&self->parked); 176 schedule(); 177 __set_current_state(TASK_PARKED); 178 } 179 clear_bit(KTHREAD_IS_PARKED, &self->flags); 180 __set_current_state(TASK_RUNNING); 181 } 182 183 void kthread_parkme(void) 184 { 185 __kthread_parkme(to_kthread(current)); 186 } 187 EXPORT_SYMBOL_GPL(kthread_parkme); 188 189 static int kthread(void *_create) 190 { 191 /* Copy data: it's on kthread's stack */ 192 struct kthread_create_info *create = _create; 193 int (*threadfn)(void *data) = create->threadfn; 194 void *data = create->data; 195 struct completion *done; 196 struct kthread *self; 197 int ret; 198 199 self = kmalloc(sizeof(*self), GFP_KERNEL); 200 set_kthread_struct(self); 201 202 /* If user was SIGKILLed, I release the structure. */ 203 done = xchg(&create->done, NULL); 204 if (!done) { 205 kfree(create); 206 do_exit(-EINTR); 207 } 208 209 if (!self) { 210 create->result = ERR_PTR(-ENOMEM); 211 complete(done); 212 do_exit(-ENOMEM); 213 } 214 215 self->flags = 0; 216 self->data = data; 217 init_completion(&self->exited); 218 init_completion(&self->parked); 219 current->vfork_done = &self->exited; 220 221 /* OK, tell user we're spawned, wait for stop or wakeup */ 222 __set_current_state(TASK_UNINTERRUPTIBLE); 223 create->result = current; 224 complete(done); 225 schedule(); 226 227 ret = -EINTR; 228 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { 229 cgroup_kthread_ready(); 230 __kthread_parkme(self); 231 ret = threadfn(data); 232 } 233 do_exit(ret); 234 } 235 236 /* called from do_fork() to get node information for about to be created task */ 237 int tsk_fork_get_node(struct task_struct *tsk) 238 { 239 #ifdef CONFIG_NUMA 240 if (tsk == kthreadd_task) 241 return tsk->pref_node_fork; 242 #endif 243 return NUMA_NO_NODE; 244 } 245 246 static void create_kthread(struct kthread_create_info *create) 247 { 248 int pid; 249 250 #ifdef CONFIG_NUMA 251 current->pref_node_fork = create->node; 252 #endif 253 /* We want our own signal handler (we take no signals by default). */ 254 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); 255 if (pid < 0) { 256 /* If user was SIGKILLed, I release the structure. */ 257 struct completion *done = xchg(&create->done, NULL); 258 259 if (!done) { 260 kfree(create); 261 return; 262 } 263 create->result = ERR_PTR(pid); 264 complete(done); 265 } 266 } 267 268 static __printf(4, 0) 269 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), 270 void *data, int node, 271 const char namefmt[], 272 va_list args) 273 { 274 DECLARE_COMPLETION_ONSTACK(done); 275 struct task_struct *task; 276 struct kthread_create_info *create = kmalloc(sizeof(*create), 277 GFP_KERNEL); 278 279 if (!create) 280 return ERR_PTR(-ENOMEM); 281 create->threadfn = threadfn; 282 create->data = data; 283 create->node = node; 284 create->done = &done; 285 286 spin_lock(&kthread_create_lock); 287 list_add_tail(&create->list, &kthread_create_list); 288 spin_unlock(&kthread_create_lock); 289 290 wake_up_process(kthreadd_task); 291 /* 292 * Wait for completion in killable state, for I might be chosen by 293 * the OOM killer while kthreadd is trying to allocate memory for 294 * new kernel thread. 295 */ 296 if (unlikely(wait_for_completion_killable(&done))) { 297 /* 298 * If I was SIGKILLed before kthreadd (or new kernel thread) 299 * calls complete(), leave the cleanup of this structure to 300 * that thread. 301 */ 302 if (xchg(&create->done, NULL)) 303 return ERR_PTR(-EINTR); 304 /* 305 * kthreadd (or new kernel thread) will call complete() 306 * shortly. 307 */ 308 wait_for_completion(&done); 309 } 310 task = create->result; 311 if (!IS_ERR(task)) { 312 static const struct sched_param param = { .sched_priority = 0 }; 313 314 vsnprintf(task->comm, sizeof(task->comm), namefmt, args); 315 /* 316 * root may have changed our (kthreadd's) priority or CPU mask. 317 * The kernel thread should not inherit these properties. 318 */ 319 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); 320 set_cpus_allowed_ptr(task, cpu_all_mask); 321 } 322 kfree(create); 323 return task; 324 } 325 326 /** 327 * kthread_create_on_node - create a kthread. 328 * @threadfn: the function to run until signal_pending(current). 329 * @data: data ptr for @threadfn. 330 * @node: task and thread structures for the thread are allocated on this node 331 * @namefmt: printf-style name for the thread. 332 * 333 * Description: This helper function creates and names a kernel 334 * thread. The thread will be stopped: use wake_up_process() to start 335 * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and 336 * is affine to all CPUs. 337 * 338 * If thread is going to be bound on a particular cpu, give its node 339 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE. 340 * When woken, the thread will run @threadfn() with @data as its 341 * argument. @threadfn() can either call do_exit() directly if it is a 342 * standalone thread for which no one will call kthread_stop(), or 343 * return when 'kthread_should_stop()' is true (which means 344 * kthread_stop() has been called). The return value should be zero 345 * or a negative error number; it will be passed to kthread_stop(). 346 * 347 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR). 348 */ 349 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), 350 void *data, int node, 351 const char namefmt[], 352 ...) 353 { 354 struct task_struct *task; 355 va_list args; 356 357 va_start(args, namefmt); 358 task = __kthread_create_on_node(threadfn, data, node, namefmt, args); 359 va_end(args); 360 361 return task; 362 } 363 EXPORT_SYMBOL(kthread_create_on_node); 364 365 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state) 366 { 367 unsigned long flags; 368 369 if (!wait_task_inactive(p, state)) { 370 WARN_ON(1); 371 return; 372 } 373 374 /* It's safe because the task is inactive. */ 375 raw_spin_lock_irqsave(&p->pi_lock, flags); 376 do_set_cpus_allowed(p, mask); 377 p->flags |= PF_NO_SETAFFINITY; 378 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 379 } 380 381 static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) 382 { 383 __kthread_bind_mask(p, cpumask_of(cpu), state); 384 } 385 386 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask) 387 { 388 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE); 389 } 390 391 /** 392 * kthread_bind - bind a just-created kthread to a cpu. 393 * @p: thread created by kthread_create(). 394 * @cpu: cpu (might not be online, must be possible) for @k to run on. 395 * 396 * Description: This function is equivalent to set_cpus_allowed(), 397 * except that @cpu doesn't need to be online, and the thread must be 398 * stopped (i.e., just returned from kthread_create()). 399 */ 400 void kthread_bind(struct task_struct *p, unsigned int cpu) 401 { 402 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); 403 } 404 EXPORT_SYMBOL(kthread_bind); 405 406 /** 407 * kthread_create_on_cpu - Create a cpu bound kthread 408 * @threadfn: the function to run until signal_pending(current). 409 * @data: data ptr for @threadfn. 410 * @cpu: The cpu on which the thread should be bound, 411 * @namefmt: printf-style name for the thread. Format is restricted 412 * to "name.*%u". Code fills in cpu number. 413 * 414 * Description: This helper function creates and names a kernel thread 415 * The thread will be woken and put into park mode. 416 */ 417 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), 418 void *data, unsigned int cpu, 419 const char *namefmt) 420 { 421 struct task_struct *p; 422 423 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, 424 cpu); 425 if (IS_ERR(p)) 426 return p; 427 kthread_bind(p, cpu); 428 /* CPU hotplug need to bind once again when unparking the thread. */ 429 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags); 430 to_kthread(p)->cpu = cpu; 431 return p; 432 } 433 434 /** 435 * kthread_unpark - unpark a thread created by kthread_create(). 436 * @k: thread created by kthread_create(). 437 * 438 * Sets kthread_should_park() for @k to return false, wakes it, and 439 * waits for it to return. If the thread is marked percpu then its 440 * bound to the cpu again. 441 */ 442 void kthread_unpark(struct task_struct *k) 443 { 444 struct kthread *kthread = to_kthread(k); 445 446 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 447 /* 448 * We clear the IS_PARKED bit here as we don't wait 449 * until the task has left the park code. So if we'd 450 * park before that happens we'd see the IS_PARKED bit 451 * which might be about to be cleared. 452 */ 453 if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { 454 /* 455 * Newly created kthread was parked when the CPU was offline. 456 * The binding was lost and we need to set it again. 457 */ 458 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) 459 __kthread_bind(k, kthread->cpu, TASK_PARKED); 460 wake_up_state(k, TASK_PARKED); 461 } 462 } 463 EXPORT_SYMBOL_GPL(kthread_unpark); 464 465 /** 466 * kthread_park - park a thread created by kthread_create(). 467 * @k: thread created by kthread_create(). 468 * 469 * Sets kthread_should_park() for @k to return true, wakes it, and 470 * waits for it to return. This can also be called after kthread_create() 471 * instead of calling wake_up_process(): the thread will park without 472 * calling threadfn(). 473 * 474 * Returns 0 if the thread is parked, -ENOSYS if the thread exited. 475 * If called by the kthread itself just the park bit is set. 476 */ 477 int kthread_park(struct task_struct *k) 478 { 479 struct kthread *kthread = to_kthread(k); 480 481 if (WARN_ON(k->flags & PF_EXITING)) 482 return -ENOSYS; 483 484 if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) { 485 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 486 if (k != current) { 487 wake_up_process(k); 488 wait_for_completion(&kthread->parked); 489 } 490 } 491 492 return 0; 493 } 494 EXPORT_SYMBOL_GPL(kthread_park); 495 496 /** 497 * kthread_stop - stop a thread created by kthread_create(). 498 * @k: thread created by kthread_create(). 499 * 500 * Sets kthread_should_stop() for @k to return true, wakes it, and 501 * waits for it to exit. This can also be called after kthread_create() 502 * instead of calling wake_up_process(): the thread will exit without 503 * calling threadfn(). 504 * 505 * If threadfn() may call do_exit() itself, the caller must ensure 506 * task_struct can't go away. 507 * 508 * Returns the result of threadfn(), or %-EINTR if wake_up_process() 509 * was never called. 510 */ 511 int kthread_stop(struct task_struct *k) 512 { 513 struct kthread *kthread; 514 int ret; 515 516 trace_sched_kthread_stop(k); 517 518 get_task_struct(k); 519 kthread = to_kthread(k); 520 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); 521 kthread_unpark(k); 522 wake_up_process(k); 523 wait_for_completion(&kthread->exited); 524 ret = k->exit_code; 525 put_task_struct(k); 526 527 trace_sched_kthread_stop_ret(ret); 528 return ret; 529 } 530 EXPORT_SYMBOL(kthread_stop); 531 532 int kthreadd(void *unused) 533 { 534 struct task_struct *tsk = current; 535 536 /* Setup a clean context for our children to inherit. */ 537 set_task_comm(tsk, "kthreadd"); 538 ignore_signals(tsk); 539 set_cpus_allowed_ptr(tsk, cpu_all_mask); 540 set_mems_allowed(node_states[N_MEMORY]); 541 542 current->flags |= PF_NOFREEZE; 543 cgroup_init_kthreadd(); 544 545 for (;;) { 546 set_current_state(TASK_INTERRUPTIBLE); 547 if (list_empty(&kthread_create_list)) 548 schedule(); 549 __set_current_state(TASK_RUNNING); 550 551 spin_lock(&kthread_create_lock); 552 while (!list_empty(&kthread_create_list)) { 553 struct kthread_create_info *create; 554 555 create = list_entry(kthread_create_list.next, 556 struct kthread_create_info, list); 557 list_del_init(&create->list); 558 spin_unlock(&kthread_create_lock); 559 560 create_kthread(create); 561 562 spin_lock(&kthread_create_lock); 563 } 564 spin_unlock(&kthread_create_lock); 565 } 566 567 return 0; 568 } 569 570 void __kthread_init_worker(struct kthread_worker *worker, 571 const char *name, 572 struct lock_class_key *key) 573 { 574 memset(worker, 0, sizeof(struct kthread_worker)); 575 spin_lock_init(&worker->lock); 576 lockdep_set_class_and_name(&worker->lock, key, name); 577 INIT_LIST_HEAD(&worker->work_list); 578 INIT_LIST_HEAD(&worker->delayed_work_list); 579 } 580 EXPORT_SYMBOL_GPL(__kthread_init_worker); 581 582 /** 583 * kthread_worker_fn - kthread function to process kthread_worker 584 * @worker_ptr: pointer to initialized kthread_worker 585 * 586 * This function implements the main cycle of kthread worker. It processes 587 * work_list until it is stopped with kthread_stop(). It sleeps when the queue 588 * is empty. 589 * 590 * The works are not allowed to keep any locks, disable preemption or interrupts 591 * when they finish. There is defined a safe point for freezing when one work 592 * finishes and before a new one is started. 593 * 594 * Also the works must not be handled by more than one worker at the same time, 595 * see also kthread_queue_work(). 596 */ 597 int kthread_worker_fn(void *worker_ptr) 598 { 599 struct kthread_worker *worker = worker_ptr; 600 struct kthread_work *work; 601 602 /* 603 * FIXME: Update the check and remove the assignment when all kthread 604 * worker users are created using kthread_create_worker*() functions. 605 */ 606 WARN_ON(worker->task && worker->task != current); 607 worker->task = current; 608 609 if (worker->flags & KTW_FREEZABLE) 610 set_freezable(); 611 612 repeat: 613 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ 614 615 if (kthread_should_stop()) { 616 __set_current_state(TASK_RUNNING); 617 spin_lock_irq(&worker->lock); 618 worker->task = NULL; 619 spin_unlock_irq(&worker->lock); 620 return 0; 621 } 622 623 work = NULL; 624 spin_lock_irq(&worker->lock); 625 if (!list_empty(&worker->work_list)) { 626 work = list_first_entry(&worker->work_list, 627 struct kthread_work, node); 628 list_del_init(&work->node); 629 } 630 worker->current_work = work; 631 spin_unlock_irq(&worker->lock); 632 633 if (work) { 634 __set_current_state(TASK_RUNNING); 635 work->func(work); 636 } else if (!freezing(current)) 637 schedule(); 638 639 try_to_freeze(); 640 cond_resched(); 641 goto repeat; 642 } 643 EXPORT_SYMBOL_GPL(kthread_worker_fn); 644 645 static __printf(3, 0) struct kthread_worker * 646 __kthread_create_worker(int cpu, unsigned int flags, 647 const char namefmt[], va_list args) 648 { 649 struct kthread_worker *worker; 650 struct task_struct *task; 651 int node = -1; 652 653 worker = kzalloc(sizeof(*worker), GFP_KERNEL); 654 if (!worker) 655 return ERR_PTR(-ENOMEM); 656 657 kthread_init_worker(worker); 658 659 if (cpu >= 0) 660 node = cpu_to_node(cpu); 661 662 task = __kthread_create_on_node(kthread_worker_fn, worker, 663 node, namefmt, args); 664 if (IS_ERR(task)) 665 goto fail_task; 666 667 if (cpu >= 0) 668 kthread_bind(task, cpu); 669 670 worker->flags = flags; 671 worker->task = task; 672 wake_up_process(task); 673 return worker; 674 675 fail_task: 676 kfree(worker); 677 return ERR_CAST(task); 678 } 679 680 /** 681 * kthread_create_worker - create a kthread worker 682 * @flags: flags modifying the default behavior of the worker 683 * @namefmt: printf-style name for the kthread worker (task). 684 * 685 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) 686 * when the needed structures could not get allocated, and ERR_PTR(-EINTR) 687 * when the worker was SIGKILLed. 688 */ 689 struct kthread_worker * 690 kthread_create_worker(unsigned int flags, const char namefmt[], ...) 691 { 692 struct kthread_worker *worker; 693 va_list args; 694 695 va_start(args, namefmt); 696 worker = __kthread_create_worker(-1, flags, namefmt, args); 697 va_end(args); 698 699 return worker; 700 } 701 EXPORT_SYMBOL(kthread_create_worker); 702 703 /** 704 * kthread_create_worker_on_cpu - create a kthread worker and bind it 705 * it to a given CPU and the associated NUMA node. 706 * @cpu: CPU number 707 * @flags: flags modifying the default behavior of the worker 708 * @namefmt: printf-style name for the kthread worker (task). 709 * 710 * Use a valid CPU number if you want to bind the kthread worker 711 * to the given CPU and the associated NUMA node. 712 * 713 * A good practice is to add the cpu number also into the worker name. 714 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu). 715 * 716 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) 717 * when the needed structures could not get allocated, and ERR_PTR(-EINTR) 718 * when the worker was SIGKILLed. 719 */ 720 struct kthread_worker * 721 kthread_create_worker_on_cpu(int cpu, unsigned int flags, 722 const char namefmt[], ...) 723 { 724 struct kthread_worker *worker; 725 va_list args; 726 727 va_start(args, namefmt); 728 worker = __kthread_create_worker(cpu, flags, namefmt, args); 729 va_end(args); 730 731 return worker; 732 } 733 EXPORT_SYMBOL(kthread_create_worker_on_cpu); 734 735 /* 736 * Returns true when the work could not be queued at the moment. 737 * It happens when it is already pending in a worker list 738 * or when it is being cancelled. 739 */ 740 static inline bool queuing_blocked(struct kthread_worker *worker, 741 struct kthread_work *work) 742 { 743 lockdep_assert_held(&worker->lock); 744 745 return !list_empty(&work->node) || work->canceling; 746 } 747 748 static void kthread_insert_work_sanity_check(struct kthread_worker *worker, 749 struct kthread_work *work) 750 { 751 lockdep_assert_held(&worker->lock); 752 WARN_ON_ONCE(!list_empty(&work->node)); 753 /* Do not use a work with >1 worker, see kthread_queue_work() */ 754 WARN_ON_ONCE(work->worker && work->worker != worker); 755 } 756 757 /* insert @work before @pos in @worker */ 758 static void kthread_insert_work(struct kthread_worker *worker, 759 struct kthread_work *work, 760 struct list_head *pos) 761 { 762 kthread_insert_work_sanity_check(worker, work); 763 764 list_add_tail(&work->node, pos); 765 work->worker = worker; 766 if (!worker->current_work && likely(worker->task)) 767 wake_up_process(worker->task); 768 } 769 770 /** 771 * kthread_queue_work - queue a kthread_work 772 * @worker: target kthread_worker 773 * @work: kthread_work to queue 774 * 775 * Queue @work to work processor @task for async execution. @task 776 * must have been created with kthread_worker_create(). Returns %true 777 * if @work was successfully queued, %false if it was already pending. 778 * 779 * Reinitialize the work if it needs to be used by another worker. 780 * For example, when the worker was stopped and started again. 781 */ 782 bool kthread_queue_work(struct kthread_worker *worker, 783 struct kthread_work *work) 784 { 785 bool ret = false; 786 unsigned long flags; 787 788 spin_lock_irqsave(&worker->lock, flags); 789 if (!queuing_blocked(worker, work)) { 790 kthread_insert_work(worker, work, &worker->work_list); 791 ret = true; 792 } 793 spin_unlock_irqrestore(&worker->lock, flags); 794 return ret; 795 } 796 EXPORT_SYMBOL_GPL(kthread_queue_work); 797 798 /** 799 * kthread_delayed_work_timer_fn - callback that queues the associated kthread 800 * delayed work when the timer expires. 801 * @__data: pointer to the data associated with the timer 802 * 803 * The format of the function is defined by struct timer_list. 804 * It should have been called from irqsafe timer with irq already off. 805 */ 806 void kthread_delayed_work_timer_fn(unsigned long __data) 807 { 808 struct kthread_delayed_work *dwork = 809 (struct kthread_delayed_work *)__data; 810 struct kthread_work *work = &dwork->work; 811 struct kthread_worker *worker = work->worker; 812 813 /* 814 * This might happen when a pending work is reinitialized. 815 * It means that it is used a wrong way. 816 */ 817 if (WARN_ON_ONCE(!worker)) 818 return; 819 820 spin_lock(&worker->lock); 821 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 822 WARN_ON_ONCE(work->worker != worker); 823 824 /* Move the work from worker->delayed_work_list. */ 825 WARN_ON_ONCE(list_empty(&work->node)); 826 list_del_init(&work->node); 827 kthread_insert_work(worker, work, &worker->work_list); 828 829 spin_unlock(&worker->lock); 830 } 831 EXPORT_SYMBOL(kthread_delayed_work_timer_fn); 832 833 void __kthread_queue_delayed_work(struct kthread_worker *worker, 834 struct kthread_delayed_work *dwork, 835 unsigned long delay) 836 { 837 struct timer_list *timer = &dwork->timer; 838 struct kthread_work *work = &dwork->work; 839 840 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn || 841 timer->data != (unsigned long)dwork); 842 843 /* 844 * If @delay is 0, queue @dwork->work immediately. This is for 845 * both optimization and correctness. The earliest @timer can 846 * expire is on the closest next tick and delayed_work users depend 847 * on that there's no such delay when @delay is 0. 848 */ 849 if (!delay) { 850 kthread_insert_work(worker, work, &worker->work_list); 851 return; 852 } 853 854 /* Be paranoid and try to detect possible races already now. */ 855 kthread_insert_work_sanity_check(worker, work); 856 857 list_add(&work->node, &worker->delayed_work_list); 858 work->worker = worker; 859 timer->expires = jiffies + delay; 860 add_timer(timer); 861 } 862 863 /** 864 * kthread_queue_delayed_work - queue the associated kthread work 865 * after a delay. 866 * @worker: target kthread_worker 867 * @dwork: kthread_delayed_work to queue 868 * @delay: number of jiffies to wait before queuing 869 * 870 * If the work has not been pending it starts a timer that will queue 871 * the work after the given @delay. If @delay is zero, it queues the 872 * work immediately. 873 * 874 * Return: %false if the @work has already been pending. It means that 875 * either the timer was running or the work was queued. It returns %true 876 * otherwise. 877 */ 878 bool kthread_queue_delayed_work(struct kthread_worker *worker, 879 struct kthread_delayed_work *dwork, 880 unsigned long delay) 881 { 882 struct kthread_work *work = &dwork->work; 883 unsigned long flags; 884 bool ret = false; 885 886 spin_lock_irqsave(&worker->lock, flags); 887 888 if (!queuing_blocked(worker, work)) { 889 __kthread_queue_delayed_work(worker, dwork, delay); 890 ret = true; 891 } 892 893 spin_unlock_irqrestore(&worker->lock, flags); 894 return ret; 895 } 896 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); 897 898 struct kthread_flush_work { 899 struct kthread_work work; 900 struct completion done; 901 }; 902 903 static void kthread_flush_work_fn(struct kthread_work *work) 904 { 905 struct kthread_flush_work *fwork = 906 container_of(work, struct kthread_flush_work, work); 907 complete(&fwork->done); 908 } 909 910 /** 911 * kthread_flush_work - flush a kthread_work 912 * @work: work to flush 913 * 914 * If @work is queued or executing, wait for it to finish execution. 915 */ 916 void kthread_flush_work(struct kthread_work *work) 917 { 918 struct kthread_flush_work fwork = { 919 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 920 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 921 }; 922 struct kthread_worker *worker; 923 bool noop = false; 924 925 worker = work->worker; 926 if (!worker) 927 return; 928 929 spin_lock_irq(&worker->lock); 930 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 931 WARN_ON_ONCE(work->worker != worker); 932 933 if (!list_empty(&work->node)) 934 kthread_insert_work(worker, &fwork.work, work->node.next); 935 else if (worker->current_work == work) 936 kthread_insert_work(worker, &fwork.work, 937 worker->work_list.next); 938 else 939 noop = true; 940 941 spin_unlock_irq(&worker->lock); 942 943 if (!noop) 944 wait_for_completion(&fwork.done); 945 } 946 EXPORT_SYMBOL_GPL(kthread_flush_work); 947 948 /* 949 * This function removes the work from the worker queue. Also it makes sure 950 * that it won't get queued later via the delayed work's timer. 951 * 952 * The work might still be in use when this function finishes. See the 953 * current_work proceed by the worker. 954 * 955 * Return: %true if @work was pending and successfully canceled, 956 * %false if @work was not pending 957 */ 958 static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork, 959 unsigned long *flags) 960 { 961 /* Try to cancel the timer if exists. */ 962 if (is_dwork) { 963 struct kthread_delayed_work *dwork = 964 container_of(work, struct kthread_delayed_work, work); 965 struct kthread_worker *worker = work->worker; 966 967 /* 968 * del_timer_sync() must be called to make sure that the timer 969 * callback is not running. The lock must be temporary released 970 * to avoid a deadlock with the callback. In the meantime, 971 * any queuing is blocked by setting the canceling counter. 972 */ 973 work->canceling++; 974 spin_unlock_irqrestore(&worker->lock, *flags); 975 del_timer_sync(&dwork->timer); 976 spin_lock_irqsave(&worker->lock, *flags); 977 work->canceling--; 978 } 979 980 /* 981 * Try to remove the work from a worker list. It might either 982 * be from worker->work_list or from worker->delayed_work_list. 983 */ 984 if (!list_empty(&work->node)) { 985 list_del_init(&work->node); 986 return true; 987 } 988 989 return false; 990 } 991 992 /** 993 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work 994 * @worker: kthread worker to use 995 * @dwork: kthread delayed work to queue 996 * @delay: number of jiffies to wait before queuing 997 * 998 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise, 999 * modify @dwork's timer so that it expires after @delay. If @delay is zero, 1000 * @work is guaranteed to be queued immediately. 1001 * 1002 * Return: %true if @dwork was pending and its timer was modified, 1003 * %false otherwise. 1004 * 1005 * A special case is when the work is being canceled in parallel. 1006 * It might be caused either by the real kthread_cancel_delayed_work_sync() 1007 * or yet another kthread_mod_delayed_work() call. We let the other command 1008 * win and return %false here. The caller is supposed to synchronize these 1009 * operations a reasonable way. 1010 * 1011 * This function is safe to call from any context including IRQ handler. 1012 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn() 1013 * for details. 1014 */ 1015 bool kthread_mod_delayed_work(struct kthread_worker *worker, 1016 struct kthread_delayed_work *dwork, 1017 unsigned long delay) 1018 { 1019 struct kthread_work *work = &dwork->work; 1020 unsigned long flags; 1021 int ret = false; 1022 1023 spin_lock_irqsave(&worker->lock, flags); 1024 1025 /* Do not bother with canceling when never queued. */ 1026 if (!work->worker) 1027 goto fast_queue; 1028 1029 /* Work must not be used with >1 worker, see kthread_queue_work() */ 1030 WARN_ON_ONCE(work->worker != worker); 1031 1032 /* Do not fight with another command that is canceling this work. */ 1033 if (work->canceling) 1034 goto out; 1035 1036 ret = __kthread_cancel_work(work, true, &flags); 1037 fast_queue: 1038 __kthread_queue_delayed_work(worker, dwork, delay); 1039 out: 1040 spin_unlock_irqrestore(&worker->lock, flags); 1041 return ret; 1042 } 1043 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work); 1044 1045 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) 1046 { 1047 struct kthread_worker *worker = work->worker; 1048 unsigned long flags; 1049 int ret = false; 1050 1051 if (!worker) 1052 goto out; 1053 1054 spin_lock_irqsave(&worker->lock, flags); 1055 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 1056 WARN_ON_ONCE(work->worker != worker); 1057 1058 ret = __kthread_cancel_work(work, is_dwork, &flags); 1059 1060 if (worker->current_work != work) 1061 goto out_fast; 1062 1063 /* 1064 * The work is in progress and we need to wait with the lock released. 1065 * In the meantime, block any queuing by setting the canceling counter. 1066 */ 1067 work->canceling++; 1068 spin_unlock_irqrestore(&worker->lock, flags); 1069 kthread_flush_work(work); 1070 spin_lock_irqsave(&worker->lock, flags); 1071 work->canceling--; 1072 1073 out_fast: 1074 spin_unlock_irqrestore(&worker->lock, flags); 1075 out: 1076 return ret; 1077 } 1078 1079 /** 1080 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish 1081 * @work: the kthread work to cancel 1082 * 1083 * Cancel @work and wait for its execution to finish. This function 1084 * can be used even if the work re-queues itself. On return from this 1085 * function, @work is guaranteed to be not pending or executing on any CPU. 1086 * 1087 * kthread_cancel_work_sync(&delayed_work->work) must not be used for 1088 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead. 1089 * 1090 * The caller must ensure that the worker on which @work was last 1091 * queued can't be destroyed before this function returns. 1092 * 1093 * Return: %true if @work was pending, %false otherwise. 1094 */ 1095 bool kthread_cancel_work_sync(struct kthread_work *work) 1096 { 1097 return __kthread_cancel_work_sync(work, false); 1098 } 1099 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync); 1100 1101 /** 1102 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and 1103 * wait for it to finish. 1104 * @dwork: the kthread delayed work to cancel 1105 * 1106 * This is kthread_cancel_work_sync() for delayed works. 1107 * 1108 * Return: %true if @dwork was pending, %false otherwise. 1109 */ 1110 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork) 1111 { 1112 return __kthread_cancel_work_sync(&dwork->work, true); 1113 } 1114 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync); 1115 1116 /** 1117 * kthread_flush_worker - flush all current works on a kthread_worker 1118 * @worker: worker to flush 1119 * 1120 * Wait until all currently executing or pending works on @worker are 1121 * finished. 1122 */ 1123 void kthread_flush_worker(struct kthread_worker *worker) 1124 { 1125 struct kthread_flush_work fwork = { 1126 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 1127 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 1128 }; 1129 1130 kthread_queue_work(worker, &fwork.work); 1131 wait_for_completion(&fwork.done); 1132 } 1133 EXPORT_SYMBOL_GPL(kthread_flush_worker); 1134 1135 /** 1136 * kthread_destroy_worker - destroy a kthread worker 1137 * @worker: worker to be destroyed 1138 * 1139 * Flush and destroy @worker. The simple flush is enough because the kthread 1140 * worker API is used only in trivial scenarios. There are no multi-step state 1141 * machines needed. 1142 */ 1143 void kthread_destroy_worker(struct kthread_worker *worker) 1144 { 1145 struct task_struct *task; 1146 1147 task = worker->task; 1148 if (WARN_ON(!task)) 1149 return; 1150 1151 kthread_flush_worker(worker); 1152 kthread_stop(task); 1153 WARN_ON(!list_empty(&worker->work_list)); 1154 kfree(worker); 1155 } 1156 EXPORT_SYMBOL(kthread_destroy_worker); 1157