1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Kernel thread helper functions. 3 * Copyright (C) 2004 IBM Corporation, Rusty Russell. 4 * Copyright (C) 2009 Red Hat, Inc. 5 * 6 * Creation is done via kthreadd, so that we get a clean environment 7 * even if we're invoked from userspace (think modprobe, hotplug cpu, 8 * etc.). 9 */ 10 #include <uapi/linux/sched/types.h> 11 #include <linux/mm.h> 12 #include <linux/mmu_context.h> 13 #include <linux/sched.h> 14 #include <linux/sched/mm.h> 15 #include <linux/sched/task.h> 16 #include <linux/kthread.h> 17 #include <linux/completion.h> 18 #include <linux/err.h> 19 #include <linux/cgroup.h> 20 #include <linux/cpuset.h> 21 #include <linux/unistd.h> 22 #include <linux/file.h> 23 #include <linux/export.h> 24 #include <linux/mutex.h> 25 #include <linux/slab.h> 26 #include <linux/freezer.h> 27 #include <linux/ptrace.h> 28 #include <linux/uaccess.h> 29 #include <linux/numa.h> 30 #include <linux/sched/isolation.h> 31 #include <trace/events/sched.h> 32 33 34 static DEFINE_SPINLOCK(kthread_create_lock); 35 static LIST_HEAD(kthread_create_list); 36 struct task_struct *kthreadd_task; 37 38 struct kthread_create_info 39 { 40 /* Information passed to kthread() from kthreadd. */ 41 char *full_name; 42 int (*threadfn)(void *data); 43 void *data; 44 int node; 45 46 /* Result passed back to kthread_create() from kthreadd. */ 47 struct task_struct *result; 48 struct completion *done; 49 50 struct list_head list; 51 }; 52 53 struct kthread { 54 unsigned long flags; 55 unsigned int cpu; 56 int result; 57 int (*threadfn)(void *); 58 void *data; 59 struct completion parked; 60 struct completion exited; 61 #ifdef CONFIG_BLK_CGROUP 62 struct cgroup_subsys_state *blkcg_css; 63 #endif 64 /* To store the full name if task comm is truncated. */ 65 char *full_name; 66 }; 67 68 enum KTHREAD_BITS { 69 KTHREAD_IS_PER_CPU = 0, 70 KTHREAD_SHOULD_STOP, 71 KTHREAD_SHOULD_PARK, 72 }; 73 74 static inline struct kthread *to_kthread(struct task_struct *k) 75 { 76 WARN_ON(!(k->flags & PF_KTHREAD)); 77 return k->worker_private; 78 } 79 80 /* 81 * Variant of to_kthread() that doesn't assume @p is a kthread. 82 * 83 * Per construction; when: 84 * 85 * (p->flags & PF_KTHREAD) && p->worker_private 86 * 87 * the task is both a kthread and struct kthread is persistent. However 88 * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and 89 * begin_new_exec()). 90 */ 91 static inline struct kthread *__to_kthread(struct task_struct *p) 92 { 93 void *kthread = p->worker_private; 94 if (kthread && !(p->flags & PF_KTHREAD)) 95 kthread = NULL; 96 return kthread; 97 } 98 99 void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk) 100 { 101 struct kthread *kthread = to_kthread(tsk); 102 103 if (!kthread || !kthread->full_name) { 104 __get_task_comm(buf, buf_size, tsk); 105 return; 106 } 107 108 strscpy_pad(buf, kthread->full_name, buf_size); 109 } 110 111 bool set_kthread_struct(struct task_struct *p) 112 { 113 struct kthread *kthread; 114 115 if (WARN_ON_ONCE(to_kthread(p))) 116 return false; 117 118 kthread = kzalloc(sizeof(*kthread), GFP_KERNEL); 119 if (!kthread) 120 return false; 121 122 init_completion(&kthread->exited); 123 init_completion(&kthread->parked); 124 p->vfork_done = &kthread->exited; 125 126 p->worker_private = kthread; 127 return true; 128 } 129 130 void free_kthread_struct(struct task_struct *k) 131 { 132 struct kthread *kthread; 133 134 /* 135 * Can be NULL if kmalloc() in set_kthread_struct() failed. 136 */ 137 kthread = to_kthread(k); 138 if (!kthread) 139 return; 140 141 #ifdef CONFIG_BLK_CGROUP 142 WARN_ON_ONCE(kthread->blkcg_css); 143 #endif 144 k->worker_private = NULL; 145 kfree(kthread->full_name); 146 kfree(kthread); 147 } 148 149 /** 150 * kthread_should_stop - should this kthread return now? 151 * 152 * When someone calls kthread_stop() on your kthread, it will be woken 153 * and this will return true. You should then return, and your return 154 * value will be passed through to kthread_stop(). 155 */ 156 bool kthread_should_stop(void) 157 { 158 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); 159 } 160 EXPORT_SYMBOL(kthread_should_stop); 161 162 static bool __kthread_should_park(struct task_struct *k) 163 { 164 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags); 165 } 166 167 /** 168 * kthread_should_park - should this kthread park now? 169 * 170 * When someone calls kthread_park() on your kthread, it will be woken 171 * and this will return true. You should then do the necessary 172 * cleanup and call kthread_parkme() 173 * 174 * Similar to kthread_should_stop(), but this keeps the thread alive 175 * and in a park position. kthread_unpark() "restarts" the thread and 176 * calls the thread function again. 177 */ 178 bool kthread_should_park(void) 179 { 180 return __kthread_should_park(current); 181 } 182 EXPORT_SYMBOL_GPL(kthread_should_park); 183 184 bool kthread_should_stop_or_park(void) 185 { 186 struct kthread *kthread = __to_kthread(current); 187 188 if (!kthread) 189 return false; 190 191 return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK)); 192 } 193 194 /** 195 * kthread_freezable_should_stop - should this freezable kthread return now? 196 * @was_frozen: optional out parameter, indicates whether %current was frozen 197 * 198 * kthread_should_stop() for freezable kthreads, which will enter 199 * refrigerator if necessary. This function is safe from kthread_stop() / 200 * freezer deadlock and freezable kthreads should use this function instead 201 * of calling try_to_freeze() directly. 202 */ 203 bool kthread_freezable_should_stop(bool *was_frozen) 204 { 205 bool frozen = false; 206 207 might_sleep(); 208 209 if (unlikely(freezing(current))) 210 frozen = __refrigerator(true); 211 212 if (was_frozen) 213 *was_frozen = frozen; 214 215 return kthread_should_stop(); 216 } 217 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); 218 219 /** 220 * kthread_func - return the function specified on kthread creation 221 * @task: kthread task in question 222 * 223 * Returns NULL if the task is not a kthread. 224 */ 225 void *kthread_func(struct task_struct *task) 226 { 227 struct kthread *kthread = __to_kthread(task); 228 if (kthread) 229 return kthread->threadfn; 230 return NULL; 231 } 232 EXPORT_SYMBOL_GPL(kthread_func); 233 234 /** 235 * kthread_data - return data value specified on kthread creation 236 * @task: kthread task in question 237 * 238 * Return the data value specified when kthread @task was created. 239 * The caller is responsible for ensuring the validity of @task when 240 * calling this function. 241 */ 242 void *kthread_data(struct task_struct *task) 243 { 244 return to_kthread(task)->data; 245 } 246 EXPORT_SYMBOL_GPL(kthread_data); 247 248 /** 249 * kthread_probe_data - speculative version of kthread_data() 250 * @task: possible kthread task in question 251 * 252 * @task could be a kthread task. Return the data value specified when it 253 * was created if accessible. If @task isn't a kthread task or its data is 254 * inaccessible for any reason, %NULL is returned. This function requires 255 * that @task itself is safe to dereference. 256 */ 257 void *kthread_probe_data(struct task_struct *task) 258 { 259 struct kthread *kthread = __to_kthread(task); 260 void *data = NULL; 261 262 if (kthread) 263 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data)); 264 return data; 265 } 266 267 static void __kthread_parkme(struct kthread *self) 268 { 269 for (;;) { 270 /* 271 * TASK_PARKED is a special state; we must serialize against 272 * possible pending wakeups to avoid store-store collisions on 273 * task->state. 274 * 275 * Such a collision might possibly result in the task state 276 * changin from TASK_PARKED and us failing the 277 * wait_task_inactive() in kthread_park(). 278 */ 279 set_special_state(TASK_PARKED); 280 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) 281 break; 282 283 /* 284 * Thread is going to call schedule(), do not preempt it, 285 * or the caller of kthread_park() may spend more time in 286 * wait_task_inactive(). 287 */ 288 preempt_disable(); 289 complete(&self->parked); 290 schedule_preempt_disabled(); 291 preempt_enable(); 292 } 293 __set_current_state(TASK_RUNNING); 294 } 295 296 void kthread_parkme(void) 297 { 298 __kthread_parkme(to_kthread(current)); 299 } 300 EXPORT_SYMBOL_GPL(kthread_parkme); 301 302 /** 303 * kthread_exit - Cause the current kthread return @result to kthread_stop(). 304 * @result: The integer value to return to kthread_stop(). 305 * 306 * While kthread_exit can be called directly, it exists so that 307 * functions which do some additional work in non-modular code such as 308 * module_put_and_kthread_exit can be implemented. 309 * 310 * Does not return. 311 */ 312 void __noreturn kthread_exit(long result) 313 { 314 struct kthread *kthread = to_kthread(current); 315 kthread->result = result; 316 do_exit(0); 317 } 318 319 /** 320 * kthread_complete_and_exit - Exit the current kthread. 321 * @comp: Completion to complete 322 * @code: The integer value to return to kthread_stop(). 323 * 324 * If present, complete @comp and then return code to kthread_stop(). 325 * 326 * A kernel thread whose module may be removed after the completion of 327 * @comp can use this function to exit safely. 328 * 329 * Does not return. 330 */ 331 void __noreturn kthread_complete_and_exit(struct completion *comp, long code) 332 { 333 if (comp) 334 complete(comp); 335 336 kthread_exit(code); 337 } 338 EXPORT_SYMBOL(kthread_complete_and_exit); 339 340 static int kthread(void *_create) 341 { 342 static const struct sched_param param = { .sched_priority = 0 }; 343 /* Copy data: it's on kthread's stack */ 344 struct kthread_create_info *create = _create; 345 int (*threadfn)(void *data) = create->threadfn; 346 void *data = create->data; 347 struct completion *done; 348 struct kthread *self; 349 int ret; 350 351 self = to_kthread(current); 352 353 /* Release the structure when caller killed by a fatal signal. */ 354 done = xchg(&create->done, NULL); 355 if (!done) { 356 kfree(create->full_name); 357 kfree(create); 358 kthread_exit(-EINTR); 359 } 360 361 self->full_name = create->full_name; 362 self->threadfn = threadfn; 363 self->data = data; 364 365 /* 366 * The new thread inherited kthreadd's priority and CPU mask. Reset 367 * back to default in case they have been changed. 368 */ 369 sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m); 370 set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD)); 371 372 /* OK, tell user we're spawned, wait for stop or wakeup */ 373 __set_current_state(TASK_UNINTERRUPTIBLE); 374 create->result = current; 375 /* 376 * Thread is going to call schedule(), do not preempt it, 377 * or the creator may spend more time in wait_task_inactive(). 378 */ 379 preempt_disable(); 380 complete(done); 381 schedule_preempt_disabled(); 382 preempt_enable(); 383 384 ret = -EINTR; 385 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { 386 cgroup_kthread_ready(); 387 __kthread_parkme(self); 388 ret = threadfn(data); 389 } 390 kthread_exit(ret); 391 } 392 393 /* called from kernel_clone() to get node information for about to be created task */ 394 int tsk_fork_get_node(struct task_struct *tsk) 395 { 396 #ifdef CONFIG_NUMA 397 if (tsk == kthreadd_task) 398 return tsk->pref_node_fork; 399 #endif 400 return NUMA_NO_NODE; 401 } 402 403 static void create_kthread(struct kthread_create_info *create) 404 { 405 int pid; 406 407 #ifdef CONFIG_NUMA 408 current->pref_node_fork = create->node; 409 #endif 410 /* We want our own signal handler (we take no signals by default). */ 411 pid = kernel_thread(kthread, create, create->full_name, 412 CLONE_FS | CLONE_FILES | SIGCHLD); 413 if (pid < 0) { 414 /* Release the structure when caller killed by a fatal signal. */ 415 struct completion *done = xchg(&create->done, NULL); 416 417 kfree(create->full_name); 418 if (!done) { 419 kfree(create); 420 return; 421 } 422 create->result = ERR_PTR(pid); 423 complete(done); 424 } 425 } 426 427 static __printf(4, 0) 428 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), 429 void *data, int node, 430 const char namefmt[], 431 va_list args) 432 { 433 DECLARE_COMPLETION_ONSTACK(done); 434 struct task_struct *task; 435 struct kthread_create_info *create = kmalloc(sizeof(*create), 436 GFP_KERNEL); 437 438 if (!create) 439 return ERR_PTR(-ENOMEM); 440 create->threadfn = threadfn; 441 create->data = data; 442 create->node = node; 443 create->done = &done; 444 create->full_name = kvasprintf(GFP_KERNEL, namefmt, args); 445 if (!create->full_name) { 446 task = ERR_PTR(-ENOMEM); 447 goto free_create; 448 } 449 450 spin_lock(&kthread_create_lock); 451 list_add_tail(&create->list, &kthread_create_list); 452 spin_unlock(&kthread_create_lock); 453 454 wake_up_process(kthreadd_task); 455 /* 456 * Wait for completion in killable state, for I might be chosen by 457 * the OOM killer while kthreadd is trying to allocate memory for 458 * new kernel thread. 459 */ 460 if (unlikely(wait_for_completion_killable(&done))) { 461 /* 462 * If I was killed by a fatal signal before kthreadd (or new 463 * kernel thread) calls complete(), leave the cleanup of this 464 * structure to that thread. 465 */ 466 if (xchg(&create->done, NULL)) 467 return ERR_PTR(-EINTR); 468 /* 469 * kthreadd (or new kernel thread) will call complete() 470 * shortly. 471 */ 472 wait_for_completion(&done); 473 } 474 task = create->result; 475 free_create: 476 kfree(create); 477 return task; 478 } 479 480 /** 481 * kthread_create_on_node - create a kthread. 482 * @threadfn: the function to run until signal_pending(current). 483 * @data: data ptr for @threadfn. 484 * @node: task and thread structures for the thread are allocated on this node 485 * @namefmt: printf-style name for the thread. 486 * 487 * Description: This helper function creates and names a kernel 488 * thread. The thread will be stopped: use wake_up_process() to start 489 * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and 490 * is affine to all CPUs. 491 * 492 * If thread is going to be bound on a particular cpu, give its node 493 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE. 494 * When woken, the thread will run @threadfn() with @data as its 495 * argument. @threadfn() can either return directly if it is a 496 * standalone thread for which no one will call kthread_stop(), or 497 * return when 'kthread_should_stop()' is true (which means 498 * kthread_stop() has been called). The return value should be zero 499 * or a negative error number; it will be passed to kthread_stop(). 500 * 501 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR). 502 */ 503 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), 504 void *data, int node, 505 const char namefmt[], 506 ...) 507 { 508 struct task_struct *task; 509 va_list args; 510 511 va_start(args, namefmt); 512 task = __kthread_create_on_node(threadfn, data, node, namefmt, args); 513 va_end(args); 514 515 return task; 516 } 517 EXPORT_SYMBOL(kthread_create_on_node); 518 519 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state) 520 { 521 unsigned long flags; 522 523 if (!wait_task_inactive(p, state)) { 524 WARN_ON(1); 525 return; 526 } 527 528 /* It's safe because the task is inactive. */ 529 raw_spin_lock_irqsave(&p->pi_lock, flags); 530 do_set_cpus_allowed(p, mask); 531 p->flags |= PF_NO_SETAFFINITY; 532 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 533 } 534 535 static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state) 536 { 537 __kthread_bind_mask(p, cpumask_of(cpu), state); 538 } 539 540 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask) 541 { 542 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE); 543 } 544 545 /** 546 * kthread_bind - bind a just-created kthread to a cpu. 547 * @p: thread created by kthread_create(). 548 * @cpu: cpu (might not be online, must be possible) for @k to run on. 549 * 550 * Description: This function is equivalent to set_cpus_allowed(), 551 * except that @cpu doesn't need to be online, and the thread must be 552 * stopped (i.e., just returned from kthread_create()). 553 */ 554 void kthread_bind(struct task_struct *p, unsigned int cpu) 555 { 556 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); 557 } 558 EXPORT_SYMBOL(kthread_bind); 559 560 /** 561 * kthread_create_on_cpu - Create a cpu bound kthread 562 * @threadfn: the function to run until signal_pending(current). 563 * @data: data ptr for @threadfn. 564 * @cpu: The cpu on which the thread should be bound, 565 * @namefmt: printf-style name for the thread. Format is restricted 566 * to "name.*%u". Code fills in cpu number. 567 * 568 * Description: This helper function creates and names a kernel thread 569 */ 570 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), 571 void *data, unsigned int cpu, 572 const char *namefmt) 573 { 574 struct task_struct *p; 575 576 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, 577 cpu); 578 if (IS_ERR(p)) 579 return p; 580 kthread_bind(p, cpu); 581 /* CPU hotplug need to bind once again when unparking the thread. */ 582 to_kthread(p)->cpu = cpu; 583 return p; 584 } 585 EXPORT_SYMBOL(kthread_create_on_cpu); 586 587 void kthread_set_per_cpu(struct task_struct *k, int cpu) 588 { 589 struct kthread *kthread = to_kthread(k); 590 if (!kthread) 591 return; 592 593 WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY)); 594 595 if (cpu < 0) { 596 clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags); 597 return; 598 } 599 600 kthread->cpu = cpu; 601 set_bit(KTHREAD_IS_PER_CPU, &kthread->flags); 602 } 603 604 bool kthread_is_per_cpu(struct task_struct *p) 605 { 606 struct kthread *kthread = __to_kthread(p); 607 if (!kthread) 608 return false; 609 610 return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags); 611 } 612 613 /** 614 * kthread_unpark - unpark a thread created by kthread_create(). 615 * @k: thread created by kthread_create(). 616 * 617 * Sets kthread_should_park() for @k to return false, wakes it, and 618 * waits for it to return. If the thread is marked percpu then its 619 * bound to the cpu again. 620 */ 621 void kthread_unpark(struct task_struct *k) 622 { 623 struct kthread *kthread = to_kthread(k); 624 625 /* 626 * Newly created kthread was parked when the CPU was offline. 627 * The binding was lost and we need to set it again. 628 */ 629 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) 630 __kthread_bind(k, kthread->cpu, TASK_PARKED); 631 632 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 633 /* 634 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup. 635 */ 636 wake_up_state(k, TASK_PARKED); 637 } 638 EXPORT_SYMBOL_GPL(kthread_unpark); 639 640 /** 641 * kthread_park - park a thread created by kthread_create(). 642 * @k: thread created by kthread_create(). 643 * 644 * Sets kthread_should_park() for @k to return true, wakes it, and 645 * waits for it to return. This can also be called after kthread_create() 646 * instead of calling wake_up_process(): the thread will park without 647 * calling threadfn(). 648 * 649 * Returns 0 if the thread is parked, -ENOSYS if the thread exited. 650 * If called by the kthread itself just the park bit is set. 651 */ 652 int kthread_park(struct task_struct *k) 653 { 654 struct kthread *kthread = to_kthread(k); 655 656 if (WARN_ON(k->flags & PF_EXITING)) 657 return -ENOSYS; 658 659 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))) 660 return -EBUSY; 661 662 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 663 if (k != current) { 664 wake_up_process(k); 665 /* 666 * Wait for __kthread_parkme() to complete(), this means we 667 * _will_ have TASK_PARKED and are about to call schedule(). 668 */ 669 wait_for_completion(&kthread->parked); 670 /* 671 * Now wait for that schedule() to complete and the task to 672 * get scheduled out. 673 */ 674 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED)); 675 } 676 677 return 0; 678 } 679 EXPORT_SYMBOL_GPL(kthread_park); 680 681 /** 682 * kthread_stop - stop a thread created by kthread_create(). 683 * @k: thread created by kthread_create(). 684 * 685 * Sets kthread_should_stop() for @k to return true, wakes it, and 686 * waits for it to exit. This can also be called after kthread_create() 687 * instead of calling wake_up_process(): the thread will exit without 688 * calling threadfn(). 689 * 690 * If threadfn() may call kthread_exit() itself, the caller must ensure 691 * task_struct can't go away. 692 * 693 * Returns the result of threadfn(), or %-EINTR if wake_up_process() 694 * was never called. 695 */ 696 int kthread_stop(struct task_struct *k) 697 { 698 struct kthread *kthread; 699 int ret; 700 701 trace_sched_kthread_stop(k); 702 703 get_task_struct(k); 704 kthread = to_kthread(k); 705 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); 706 kthread_unpark(k); 707 set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL); 708 wake_up_process(k); 709 wait_for_completion(&kthread->exited); 710 ret = kthread->result; 711 put_task_struct(k); 712 713 trace_sched_kthread_stop_ret(ret); 714 return ret; 715 } 716 EXPORT_SYMBOL(kthread_stop); 717 718 /** 719 * kthread_stop_put - stop a thread and put its task struct 720 * @k: thread created by kthread_create(). 721 * 722 * Stops a thread created by kthread_create() and put its task_struct. 723 * Only use when holding an extra task struct reference obtained by 724 * calling get_task_struct(). 725 */ 726 int kthread_stop_put(struct task_struct *k) 727 { 728 int ret; 729 730 ret = kthread_stop(k); 731 put_task_struct(k); 732 return ret; 733 } 734 EXPORT_SYMBOL(kthread_stop_put); 735 736 int kthreadd(void *unused) 737 { 738 struct task_struct *tsk = current; 739 740 /* Setup a clean context for our children to inherit. */ 741 set_task_comm(tsk, "kthreadd"); 742 ignore_signals(tsk); 743 set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD)); 744 set_mems_allowed(node_states[N_MEMORY]); 745 746 current->flags |= PF_NOFREEZE; 747 cgroup_init_kthreadd(); 748 749 for (;;) { 750 set_current_state(TASK_INTERRUPTIBLE); 751 if (list_empty(&kthread_create_list)) 752 schedule(); 753 __set_current_state(TASK_RUNNING); 754 755 spin_lock(&kthread_create_lock); 756 while (!list_empty(&kthread_create_list)) { 757 struct kthread_create_info *create; 758 759 create = list_entry(kthread_create_list.next, 760 struct kthread_create_info, list); 761 list_del_init(&create->list); 762 spin_unlock(&kthread_create_lock); 763 764 create_kthread(create); 765 766 spin_lock(&kthread_create_lock); 767 } 768 spin_unlock(&kthread_create_lock); 769 } 770 771 return 0; 772 } 773 774 void __kthread_init_worker(struct kthread_worker *worker, 775 const char *name, 776 struct lock_class_key *key) 777 { 778 memset(worker, 0, sizeof(struct kthread_worker)); 779 raw_spin_lock_init(&worker->lock); 780 lockdep_set_class_and_name(&worker->lock, key, name); 781 INIT_LIST_HEAD(&worker->work_list); 782 INIT_LIST_HEAD(&worker->delayed_work_list); 783 } 784 EXPORT_SYMBOL_GPL(__kthread_init_worker); 785 786 /** 787 * kthread_worker_fn - kthread function to process kthread_worker 788 * @worker_ptr: pointer to initialized kthread_worker 789 * 790 * This function implements the main cycle of kthread worker. It processes 791 * work_list until it is stopped with kthread_stop(). It sleeps when the queue 792 * is empty. 793 * 794 * The works are not allowed to keep any locks, disable preemption or interrupts 795 * when they finish. There is defined a safe point for freezing when one work 796 * finishes and before a new one is started. 797 * 798 * Also the works must not be handled by more than one worker at the same time, 799 * see also kthread_queue_work(). 800 */ 801 int kthread_worker_fn(void *worker_ptr) 802 { 803 struct kthread_worker *worker = worker_ptr; 804 struct kthread_work *work; 805 806 /* 807 * FIXME: Update the check and remove the assignment when all kthread 808 * worker users are created using kthread_create_worker*() functions. 809 */ 810 WARN_ON(worker->task && worker->task != current); 811 worker->task = current; 812 813 if (worker->flags & KTW_FREEZABLE) 814 set_freezable(); 815 816 repeat: 817 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ 818 819 if (kthread_should_stop()) { 820 __set_current_state(TASK_RUNNING); 821 raw_spin_lock_irq(&worker->lock); 822 worker->task = NULL; 823 raw_spin_unlock_irq(&worker->lock); 824 return 0; 825 } 826 827 work = NULL; 828 raw_spin_lock_irq(&worker->lock); 829 if (!list_empty(&worker->work_list)) { 830 work = list_first_entry(&worker->work_list, 831 struct kthread_work, node); 832 list_del_init(&work->node); 833 } 834 worker->current_work = work; 835 raw_spin_unlock_irq(&worker->lock); 836 837 if (work) { 838 kthread_work_func_t func = work->func; 839 __set_current_state(TASK_RUNNING); 840 trace_sched_kthread_work_execute_start(work); 841 work->func(work); 842 /* 843 * Avoid dereferencing work after this point. The trace 844 * event only cares about the address. 845 */ 846 trace_sched_kthread_work_execute_end(work, func); 847 } else if (!freezing(current)) { 848 schedule(); 849 } else { 850 /* 851 * Handle the case where the current remains 852 * TASK_INTERRUPTIBLE. try_to_freeze() expects 853 * the current to be TASK_RUNNING. 854 */ 855 __set_current_state(TASK_RUNNING); 856 } 857 858 try_to_freeze(); 859 cond_resched(); 860 goto repeat; 861 } 862 EXPORT_SYMBOL_GPL(kthread_worker_fn); 863 864 static __printf(3, 0) struct kthread_worker * 865 __kthread_create_worker(int cpu, unsigned int flags, 866 const char namefmt[], va_list args) 867 { 868 struct kthread_worker *worker; 869 struct task_struct *task; 870 int node = NUMA_NO_NODE; 871 872 worker = kzalloc(sizeof(*worker), GFP_KERNEL); 873 if (!worker) 874 return ERR_PTR(-ENOMEM); 875 876 kthread_init_worker(worker); 877 878 if (cpu >= 0) 879 node = cpu_to_node(cpu); 880 881 task = __kthread_create_on_node(kthread_worker_fn, worker, 882 node, namefmt, args); 883 if (IS_ERR(task)) 884 goto fail_task; 885 886 if (cpu >= 0) 887 kthread_bind(task, cpu); 888 889 worker->flags = flags; 890 worker->task = task; 891 wake_up_process(task); 892 return worker; 893 894 fail_task: 895 kfree(worker); 896 return ERR_CAST(task); 897 } 898 899 /** 900 * kthread_create_worker - create a kthread worker 901 * @flags: flags modifying the default behavior of the worker 902 * @namefmt: printf-style name for the kthread worker (task). 903 * 904 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) 905 * when the needed structures could not get allocated, and ERR_PTR(-EINTR) 906 * when the caller was killed by a fatal signal. 907 */ 908 struct kthread_worker * 909 kthread_create_worker(unsigned int flags, const char namefmt[], ...) 910 { 911 struct kthread_worker *worker; 912 va_list args; 913 914 va_start(args, namefmt); 915 worker = __kthread_create_worker(-1, flags, namefmt, args); 916 va_end(args); 917 918 return worker; 919 } 920 EXPORT_SYMBOL(kthread_create_worker); 921 922 /** 923 * kthread_create_worker_on_cpu - create a kthread worker and bind it 924 * to a given CPU and the associated NUMA node. 925 * @cpu: CPU number 926 * @flags: flags modifying the default behavior of the worker 927 * @namefmt: printf-style name for the kthread worker (task). 928 * 929 * Use a valid CPU number if you want to bind the kthread worker 930 * to the given CPU and the associated NUMA node. 931 * 932 * A good practice is to add the cpu number also into the worker name. 933 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu). 934 * 935 * CPU hotplug: 936 * The kthread worker API is simple and generic. It just provides a way 937 * to create, use, and destroy workers. 938 * 939 * It is up to the API user how to handle CPU hotplug. They have to decide 940 * how to handle pending work items, prevent queuing new ones, and 941 * restore the functionality when the CPU goes off and on. There are a 942 * few catches: 943 * 944 * - CPU affinity gets lost when it is scheduled on an offline CPU. 945 * 946 * - The worker might not exist when the CPU was off when the user 947 * created the workers. 948 * 949 * Good practice is to implement two CPU hotplug callbacks and to 950 * destroy/create the worker when the CPU goes down/up. 951 * 952 * Return: 953 * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM) 954 * when the needed structures could not get allocated, and ERR_PTR(-EINTR) 955 * when the caller was killed by a fatal signal. 956 */ 957 struct kthread_worker * 958 kthread_create_worker_on_cpu(int cpu, unsigned int flags, 959 const char namefmt[], ...) 960 { 961 struct kthread_worker *worker; 962 va_list args; 963 964 va_start(args, namefmt); 965 worker = __kthread_create_worker(cpu, flags, namefmt, args); 966 va_end(args); 967 968 return worker; 969 } 970 EXPORT_SYMBOL(kthread_create_worker_on_cpu); 971 972 /* 973 * Returns true when the work could not be queued at the moment. 974 * It happens when it is already pending in a worker list 975 * or when it is being cancelled. 976 */ 977 static inline bool queuing_blocked(struct kthread_worker *worker, 978 struct kthread_work *work) 979 { 980 lockdep_assert_held(&worker->lock); 981 982 return !list_empty(&work->node) || work->canceling; 983 } 984 985 static void kthread_insert_work_sanity_check(struct kthread_worker *worker, 986 struct kthread_work *work) 987 { 988 lockdep_assert_held(&worker->lock); 989 WARN_ON_ONCE(!list_empty(&work->node)); 990 /* Do not use a work with >1 worker, see kthread_queue_work() */ 991 WARN_ON_ONCE(work->worker && work->worker != worker); 992 } 993 994 /* insert @work before @pos in @worker */ 995 static void kthread_insert_work(struct kthread_worker *worker, 996 struct kthread_work *work, 997 struct list_head *pos) 998 { 999 kthread_insert_work_sanity_check(worker, work); 1000 1001 trace_sched_kthread_work_queue_work(worker, work); 1002 1003 list_add_tail(&work->node, pos); 1004 work->worker = worker; 1005 if (!worker->current_work && likely(worker->task)) 1006 wake_up_process(worker->task); 1007 } 1008 1009 /** 1010 * kthread_queue_work - queue a kthread_work 1011 * @worker: target kthread_worker 1012 * @work: kthread_work to queue 1013 * 1014 * Queue @work to work processor @task for async execution. @task 1015 * must have been created with kthread_worker_create(). Returns %true 1016 * if @work was successfully queued, %false if it was already pending. 1017 * 1018 * Reinitialize the work if it needs to be used by another worker. 1019 * For example, when the worker was stopped and started again. 1020 */ 1021 bool kthread_queue_work(struct kthread_worker *worker, 1022 struct kthread_work *work) 1023 { 1024 bool ret = false; 1025 unsigned long flags; 1026 1027 raw_spin_lock_irqsave(&worker->lock, flags); 1028 if (!queuing_blocked(worker, work)) { 1029 kthread_insert_work(worker, work, &worker->work_list); 1030 ret = true; 1031 } 1032 raw_spin_unlock_irqrestore(&worker->lock, flags); 1033 return ret; 1034 } 1035 EXPORT_SYMBOL_GPL(kthread_queue_work); 1036 1037 /** 1038 * kthread_delayed_work_timer_fn - callback that queues the associated kthread 1039 * delayed work when the timer expires. 1040 * @t: pointer to the expired timer 1041 * 1042 * The format of the function is defined by struct timer_list. 1043 * It should have been called from irqsafe timer with irq already off. 1044 */ 1045 void kthread_delayed_work_timer_fn(struct timer_list *t) 1046 { 1047 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer); 1048 struct kthread_work *work = &dwork->work; 1049 struct kthread_worker *worker = work->worker; 1050 unsigned long flags; 1051 1052 /* 1053 * This might happen when a pending work is reinitialized. 1054 * It means that it is used a wrong way. 1055 */ 1056 if (WARN_ON_ONCE(!worker)) 1057 return; 1058 1059 raw_spin_lock_irqsave(&worker->lock, flags); 1060 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 1061 WARN_ON_ONCE(work->worker != worker); 1062 1063 /* Move the work from worker->delayed_work_list. */ 1064 WARN_ON_ONCE(list_empty(&work->node)); 1065 list_del_init(&work->node); 1066 if (!work->canceling) 1067 kthread_insert_work(worker, work, &worker->work_list); 1068 1069 raw_spin_unlock_irqrestore(&worker->lock, flags); 1070 } 1071 EXPORT_SYMBOL(kthread_delayed_work_timer_fn); 1072 1073 static void __kthread_queue_delayed_work(struct kthread_worker *worker, 1074 struct kthread_delayed_work *dwork, 1075 unsigned long delay) 1076 { 1077 struct timer_list *timer = &dwork->timer; 1078 struct kthread_work *work = &dwork->work; 1079 1080 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn); 1081 1082 /* 1083 * If @delay is 0, queue @dwork->work immediately. This is for 1084 * both optimization and correctness. The earliest @timer can 1085 * expire is on the closest next tick and delayed_work users depend 1086 * on that there's no such delay when @delay is 0. 1087 */ 1088 if (!delay) { 1089 kthread_insert_work(worker, work, &worker->work_list); 1090 return; 1091 } 1092 1093 /* Be paranoid and try to detect possible races already now. */ 1094 kthread_insert_work_sanity_check(worker, work); 1095 1096 list_add(&work->node, &worker->delayed_work_list); 1097 work->worker = worker; 1098 timer->expires = jiffies + delay; 1099 add_timer(timer); 1100 } 1101 1102 /** 1103 * kthread_queue_delayed_work - queue the associated kthread work 1104 * after a delay. 1105 * @worker: target kthread_worker 1106 * @dwork: kthread_delayed_work to queue 1107 * @delay: number of jiffies to wait before queuing 1108 * 1109 * If the work has not been pending it starts a timer that will queue 1110 * the work after the given @delay. If @delay is zero, it queues the 1111 * work immediately. 1112 * 1113 * Return: %false if the @work has already been pending. It means that 1114 * either the timer was running or the work was queued. It returns %true 1115 * otherwise. 1116 */ 1117 bool kthread_queue_delayed_work(struct kthread_worker *worker, 1118 struct kthread_delayed_work *dwork, 1119 unsigned long delay) 1120 { 1121 struct kthread_work *work = &dwork->work; 1122 unsigned long flags; 1123 bool ret = false; 1124 1125 raw_spin_lock_irqsave(&worker->lock, flags); 1126 1127 if (!queuing_blocked(worker, work)) { 1128 __kthread_queue_delayed_work(worker, dwork, delay); 1129 ret = true; 1130 } 1131 1132 raw_spin_unlock_irqrestore(&worker->lock, flags); 1133 return ret; 1134 } 1135 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); 1136 1137 struct kthread_flush_work { 1138 struct kthread_work work; 1139 struct completion done; 1140 }; 1141 1142 static void kthread_flush_work_fn(struct kthread_work *work) 1143 { 1144 struct kthread_flush_work *fwork = 1145 container_of(work, struct kthread_flush_work, work); 1146 complete(&fwork->done); 1147 } 1148 1149 /** 1150 * kthread_flush_work - flush a kthread_work 1151 * @work: work to flush 1152 * 1153 * If @work is queued or executing, wait for it to finish execution. 1154 */ 1155 void kthread_flush_work(struct kthread_work *work) 1156 { 1157 struct kthread_flush_work fwork = { 1158 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 1159 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 1160 }; 1161 struct kthread_worker *worker; 1162 bool noop = false; 1163 1164 worker = work->worker; 1165 if (!worker) 1166 return; 1167 1168 raw_spin_lock_irq(&worker->lock); 1169 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 1170 WARN_ON_ONCE(work->worker != worker); 1171 1172 if (!list_empty(&work->node)) 1173 kthread_insert_work(worker, &fwork.work, work->node.next); 1174 else if (worker->current_work == work) 1175 kthread_insert_work(worker, &fwork.work, 1176 worker->work_list.next); 1177 else 1178 noop = true; 1179 1180 raw_spin_unlock_irq(&worker->lock); 1181 1182 if (!noop) 1183 wait_for_completion(&fwork.done); 1184 } 1185 EXPORT_SYMBOL_GPL(kthread_flush_work); 1186 1187 /* 1188 * Make sure that the timer is neither set nor running and could 1189 * not manipulate the work list_head any longer. 1190 * 1191 * The function is called under worker->lock. The lock is temporary 1192 * released but the timer can't be set again in the meantime. 1193 */ 1194 static void kthread_cancel_delayed_work_timer(struct kthread_work *work, 1195 unsigned long *flags) 1196 { 1197 struct kthread_delayed_work *dwork = 1198 container_of(work, struct kthread_delayed_work, work); 1199 struct kthread_worker *worker = work->worker; 1200 1201 /* 1202 * del_timer_sync() must be called to make sure that the timer 1203 * callback is not running. The lock must be temporary released 1204 * to avoid a deadlock with the callback. In the meantime, 1205 * any queuing is blocked by setting the canceling counter. 1206 */ 1207 work->canceling++; 1208 raw_spin_unlock_irqrestore(&worker->lock, *flags); 1209 del_timer_sync(&dwork->timer); 1210 raw_spin_lock_irqsave(&worker->lock, *flags); 1211 work->canceling--; 1212 } 1213 1214 /* 1215 * This function removes the work from the worker queue. 1216 * 1217 * It is called under worker->lock. The caller must make sure that 1218 * the timer used by delayed work is not running, e.g. by calling 1219 * kthread_cancel_delayed_work_timer(). 1220 * 1221 * The work might still be in use when this function finishes. See the 1222 * current_work proceed by the worker. 1223 * 1224 * Return: %true if @work was pending and successfully canceled, 1225 * %false if @work was not pending 1226 */ 1227 static bool __kthread_cancel_work(struct kthread_work *work) 1228 { 1229 /* 1230 * Try to remove the work from a worker list. It might either 1231 * be from worker->work_list or from worker->delayed_work_list. 1232 */ 1233 if (!list_empty(&work->node)) { 1234 list_del_init(&work->node); 1235 return true; 1236 } 1237 1238 return false; 1239 } 1240 1241 /** 1242 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work 1243 * @worker: kthread worker to use 1244 * @dwork: kthread delayed work to queue 1245 * @delay: number of jiffies to wait before queuing 1246 * 1247 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise, 1248 * modify @dwork's timer so that it expires after @delay. If @delay is zero, 1249 * @work is guaranteed to be queued immediately. 1250 * 1251 * Return: %false if @dwork was idle and queued, %true otherwise. 1252 * 1253 * A special case is when the work is being canceled in parallel. 1254 * It might be caused either by the real kthread_cancel_delayed_work_sync() 1255 * or yet another kthread_mod_delayed_work() call. We let the other command 1256 * win and return %true here. The return value can be used for reference 1257 * counting and the number of queued works stays the same. Anyway, the caller 1258 * is supposed to synchronize these operations a reasonable way. 1259 * 1260 * This function is safe to call from any context including IRQ handler. 1261 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn() 1262 * for details. 1263 */ 1264 bool kthread_mod_delayed_work(struct kthread_worker *worker, 1265 struct kthread_delayed_work *dwork, 1266 unsigned long delay) 1267 { 1268 struct kthread_work *work = &dwork->work; 1269 unsigned long flags; 1270 int ret; 1271 1272 raw_spin_lock_irqsave(&worker->lock, flags); 1273 1274 /* Do not bother with canceling when never queued. */ 1275 if (!work->worker) { 1276 ret = false; 1277 goto fast_queue; 1278 } 1279 1280 /* Work must not be used with >1 worker, see kthread_queue_work() */ 1281 WARN_ON_ONCE(work->worker != worker); 1282 1283 /* 1284 * Temporary cancel the work but do not fight with another command 1285 * that is canceling the work as well. 1286 * 1287 * It is a bit tricky because of possible races with another 1288 * mod_delayed_work() and cancel_delayed_work() callers. 1289 * 1290 * The timer must be canceled first because worker->lock is released 1291 * when doing so. But the work can be removed from the queue (list) 1292 * only when it can be queued again so that the return value can 1293 * be used for reference counting. 1294 */ 1295 kthread_cancel_delayed_work_timer(work, &flags); 1296 if (work->canceling) { 1297 /* The number of works in the queue does not change. */ 1298 ret = true; 1299 goto out; 1300 } 1301 ret = __kthread_cancel_work(work); 1302 1303 fast_queue: 1304 __kthread_queue_delayed_work(worker, dwork, delay); 1305 out: 1306 raw_spin_unlock_irqrestore(&worker->lock, flags); 1307 return ret; 1308 } 1309 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work); 1310 1311 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) 1312 { 1313 struct kthread_worker *worker = work->worker; 1314 unsigned long flags; 1315 int ret = false; 1316 1317 if (!worker) 1318 goto out; 1319 1320 raw_spin_lock_irqsave(&worker->lock, flags); 1321 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 1322 WARN_ON_ONCE(work->worker != worker); 1323 1324 if (is_dwork) 1325 kthread_cancel_delayed_work_timer(work, &flags); 1326 1327 ret = __kthread_cancel_work(work); 1328 1329 if (worker->current_work != work) 1330 goto out_fast; 1331 1332 /* 1333 * The work is in progress and we need to wait with the lock released. 1334 * In the meantime, block any queuing by setting the canceling counter. 1335 */ 1336 work->canceling++; 1337 raw_spin_unlock_irqrestore(&worker->lock, flags); 1338 kthread_flush_work(work); 1339 raw_spin_lock_irqsave(&worker->lock, flags); 1340 work->canceling--; 1341 1342 out_fast: 1343 raw_spin_unlock_irqrestore(&worker->lock, flags); 1344 out: 1345 return ret; 1346 } 1347 1348 /** 1349 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish 1350 * @work: the kthread work to cancel 1351 * 1352 * Cancel @work and wait for its execution to finish. This function 1353 * can be used even if the work re-queues itself. On return from this 1354 * function, @work is guaranteed to be not pending or executing on any CPU. 1355 * 1356 * kthread_cancel_work_sync(&delayed_work->work) must not be used for 1357 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead. 1358 * 1359 * The caller must ensure that the worker on which @work was last 1360 * queued can't be destroyed before this function returns. 1361 * 1362 * Return: %true if @work was pending, %false otherwise. 1363 */ 1364 bool kthread_cancel_work_sync(struct kthread_work *work) 1365 { 1366 return __kthread_cancel_work_sync(work, false); 1367 } 1368 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync); 1369 1370 /** 1371 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and 1372 * wait for it to finish. 1373 * @dwork: the kthread delayed work to cancel 1374 * 1375 * This is kthread_cancel_work_sync() for delayed works. 1376 * 1377 * Return: %true if @dwork was pending, %false otherwise. 1378 */ 1379 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork) 1380 { 1381 return __kthread_cancel_work_sync(&dwork->work, true); 1382 } 1383 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync); 1384 1385 /** 1386 * kthread_flush_worker - flush all current works on a kthread_worker 1387 * @worker: worker to flush 1388 * 1389 * Wait until all currently executing or pending works on @worker are 1390 * finished. 1391 */ 1392 void kthread_flush_worker(struct kthread_worker *worker) 1393 { 1394 struct kthread_flush_work fwork = { 1395 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 1396 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 1397 }; 1398 1399 kthread_queue_work(worker, &fwork.work); 1400 wait_for_completion(&fwork.done); 1401 } 1402 EXPORT_SYMBOL_GPL(kthread_flush_worker); 1403 1404 /** 1405 * kthread_destroy_worker - destroy a kthread worker 1406 * @worker: worker to be destroyed 1407 * 1408 * Flush and destroy @worker. The simple flush is enough because the kthread 1409 * worker API is used only in trivial scenarios. There are no multi-step state 1410 * machines needed. 1411 * 1412 * Note that this function is not responsible for handling delayed work, so 1413 * caller should be responsible for queuing or canceling all delayed work items 1414 * before invoke this function. 1415 */ 1416 void kthread_destroy_worker(struct kthread_worker *worker) 1417 { 1418 struct task_struct *task; 1419 1420 task = worker->task; 1421 if (WARN_ON(!task)) 1422 return; 1423 1424 kthread_flush_worker(worker); 1425 kthread_stop(task); 1426 WARN_ON(!list_empty(&worker->delayed_work_list)); 1427 WARN_ON(!list_empty(&worker->work_list)); 1428 kfree(worker); 1429 } 1430 EXPORT_SYMBOL(kthread_destroy_worker); 1431 1432 /** 1433 * kthread_use_mm - make the calling kthread operate on an address space 1434 * @mm: address space to operate on 1435 */ 1436 void kthread_use_mm(struct mm_struct *mm) 1437 { 1438 struct mm_struct *active_mm; 1439 struct task_struct *tsk = current; 1440 1441 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); 1442 WARN_ON_ONCE(tsk->mm); 1443 1444 /* 1445 * It is possible for mm to be the same as tsk->active_mm, but 1446 * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm), 1447 * because these references are not equivalent. 1448 */ 1449 mmgrab(mm); 1450 1451 task_lock(tsk); 1452 /* Hold off tlb flush IPIs while switching mm's */ 1453 local_irq_disable(); 1454 active_mm = tsk->active_mm; 1455 tsk->active_mm = mm; 1456 tsk->mm = mm; 1457 membarrier_update_current_mm(mm); 1458 switch_mm_irqs_off(active_mm, mm, tsk); 1459 local_irq_enable(); 1460 task_unlock(tsk); 1461 #ifdef finish_arch_post_lock_switch 1462 finish_arch_post_lock_switch(); 1463 #endif 1464 1465 /* 1466 * When a kthread starts operating on an address space, the loop 1467 * in membarrier_{private,global}_expedited() may not observe 1468 * that tsk->mm, and not issue an IPI. Membarrier requires a 1469 * memory barrier after storing to tsk->mm, before accessing 1470 * user-space memory. A full memory barrier for membarrier 1471 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by 1472 * mmdrop_lazy_tlb(). 1473 */ 1474 mmdrop_lazy_tlb(active_mm); 1475 } 1476 EXPORT_SYMBOL_GPL(kthread_use_mm); 1477 1478 /** 1479 * kthread_unuse_mm - reverse the effect of kthread_use_mm() 1480 * @mm: address space to operate on 1481 */ 1482 void kthread_unuse_mm(struct mm_struct *mm) 1483 { 1484 struct task_struct *tsk = current; 1485 1486 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); 1487 WARN_ON_ONCE(!tsk->mm); 1488 1489 task_lock(tsk); 1490 /* 1491 * When a kthread stops operating on an address space, the loop 1492 * in membarrier_{private,global}_expedited() may not observe 1493 * that tsk->mm, and not issue an IPI. Membarrier requires a 1494 * memory barrier after accessing user-space memory, before 1495 * clearing tsk->mm. 1496 */ 1497 smp_mb__after_spinlock(); 1498 sync_mm_rss(mm); 1499 local_irq_disable(); 1500 tsk->mm = NULL; 1501 membarrier_update_current_mm(NULL); 1502 mmgrab_lazy_tlb(mm); 1503 /* active_mm is still 'mm' */ 1504 enter_lazy_tlb(mm, tsk); 1505 local_irq_enable(); 1506 task_unlock(tsk); 1507 1508 mmdrop(mm); 1509 } 1510 EXPORT_SYMBOL_GPL(kthread_unuse_mm); 1511 1512 #ifdef CONFIG_BLK_CGROUP 1513 /** 1514 * kthread_associate_blkcg - associate blkcg to current kthread 1515 * @css: the cgroup info 1516 * 1517 * Current thread must be a kthread. The thread is running jobs on behalf of 1518 * other threads. In some cases, we expect the jobs attach cgroup info of 1519 * original threads instead of that of current thread. This function stores 1520 * original thread's cgroup info in current kthread context for later 1521 * retrieval. 1522 */ 1523 void kthread_associate_blkcg(struct cgroup_subsys_state *css) 1524 { 1525 struct kthread *kthread; 1526 1527 if (!(current->flags & PF_KTHREAD)) 1528 return; 1529 kthread = to_kthread(current); 1530 if (!kthread) 1531 return; 1532 1533 if (kthread->blkcg_css) { 1534 css_put(kthread->blkcg_css); 1535 kthread->blkcg_css = NULL; 1536 } 1537 if (css) { 1538 css_get(css); 1539 kthread->blkcg_css = css; 1540 } 1541 } 1542 EXPORT_SYMBOL(kthread_associate_blkcg); 1543 1544 /** 1545 * kthread_blkcg - get associated blkcg css of current kthread 1546 * 1547 * Current thread must be a kthread. 1548 */ 1549 struct cgroup_subsys_state *kthread_blkcg(void) 1550 { 1551 struct kthread *kthread; 1552 1553 if (current->flags & PF_KTHREAD) { 1554 kthread = to_kthread(current); 1555 if (kthread) 1556 return kthread->blkcg_css; 1557 } 1558 return NULL; 1559 } 1560 #endif 1561