xref: /openbmc/linux/kernel/kthread.c (revision fb4a5dfc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Kernel thread helper functions.
3  *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
4  *   Copyright (C) 2009 Red Hat, Inc.
5  *
6  * Creation is done via kthreadd, so that we get a clean environment
7  * even if we're invoked from userspace (think modprobe, hotplug cpu,
8  * etc.).
9  */
10 #include <uapi/linux/sched/types.h>
11 #include <linux/mm.h>
12 #include <linux/mmu_context.h>
13 #include <linux/sched.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/task.h>
16 #include <linux/kthread.h>
17 #include <linux/completion.h>
18 #include <linux/err.h>
19 #include <linux/cgroup.h>
20 #include <linux/cpuset.h>
21 #include <linux/unistd.h>
22 #include <linux/file.h>
23 #include <linux/export.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <linux/freezer.h>
27 #include <linux/ptrace.h>
28 #include <linux/uaccess.h>
29 #include <linux/numa.h>
30 #include <linux/sched/isolation.h>
31 #include <trace/events/sched.h>
32 
33 
34 static DEFINE_SPINLOCK(kthread_create_lock);
35 static LIST_HEAD(kthread_create_list);
36 struct task_struct *kthreadd_task;
37 
38 struct kthread_create_info
39 {
40 	/* Information passed to kthread() from kthreadd. */
41 	int (*threadfn)(void *data);
42 	void *data;
43 	int node;
44 
45 	/* Result passed back to kthread_create() from kthreadd. */
46 	struct task_struct *result;
47 	struct completion *done;
48 
49 	struct list_head list;
50 };
51 
52 struct kthread {
53 	unsigned long flags;
54 	unsigned int cpu;
55 	int result;
56 	int (*threadfn)(void *);
57 	void *data;
58 	struct completion parked;
59 	struct completion exited;
60 #ifdef CONFIG_BLK_CGROUP
61 	struct cgroup_subsys_state *blkcg_css;
62 #endif
63 	/* To store the full name if task comm is truncated. */
64 	char *full_name;
65 };
66 
67 enum KTHREAD_BITS {
68 	KTHREAD_IS_PER_CPU = 0,
69 	KTHREAD_SHOULD_STOP,
70 	KTHREAD_SHOULD_PARK,
71 };
72 
73 static inline struct kthread *to_kthread(struct task_struct *k)
74 {
75 	WARN_ON(!(k->flags & PF_KTHREAD));
76 	return k->worker_private;
77 }
78 
79 /*
80  * Variant of to_kthread() that doesn't assume @p is a kthread.
81  *
82  * Per construction; when:
83  *
84  *   (p->flags & PF_KTHREAD) && p->worker_private
85  *
86  * the task is both a kthread and struct kthread is persistent. However
87  * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
88  * begin_new_exec()).
89  */
90 static inline struct kthread *__to_kthread(struct task_struct *p)
91 {
92 	void *kthread = p->worker_private;
93 	if (kthread && !(p->flags & PF_KTHREAD))
94 		kthread = NULL;
95 	return kthread;
96 }
97 
98 void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
99 {
100 	struct kthread *kthread = to_kthread(tsk);
101 
102 	if (!kthread || !kthread->full_name) {
103 		__get_task_comm(buf, buf_size, tsk);
104 		return;
105 	}
106 
107 	strscpy_pad(buf, kthread->full_name, buf_size);
108 }
109 
110 bool set_kthread_struct(struct task_struct *p)
111 {
112 	struct kthread *kthread;
113 
114 	if (WARN_ON_ONCE(to_kthread(p)))
115 		return false;
116 
117 	kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
118 	if (!kthread)
119 		return false;
120 
121 	init_completion(&kthread->exited);
122 	init_completion(&kthread->parked);
123 	p->vfork_done = &kthread->exited;
124 
125 	p->worker_private = kthread;
126 	return true;
127 }
128 
129 void free_kthread_struct(struct task_struct *k)
130 {
131 	struct kthread *kthread;
132 
133 	/*
134 	 * Can be NULL if kmalloc() in set_kthread_struct() failed.
135 	 */
136 	kthread = to_kthread(k);
137 	if (!kthread)
138 		return;
139 
140 #ifdef CONFIG_BLK_CGROUP
141 	WARN_ON_ONCE(kthread->blkcg_css);
142 #endif
143 	k->worker_private = NULL;
144 	kfree(kthread->full_name);
145 	kfree(kthread);
146 }
147 
148 /**
149  * kthread_should_stop - should this kthread return now?
150  *
151  * When someone calls kthread_stop() on your kthread, it will be woken
152  * and this will return true.  You should then return, and your return
153  * value will be passed through to kthread_stop().
154  */
155 bool kthread_should_stop(void)
156 {
157 	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
158 }
159 EXPORT_SYMBOL(kthread_should_stop);
160 
161 bool __kthread_should_park(struct task_struct *k)
162 {
163 	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
164 }
165 EXPORT_SYMBOL_GPL(__kthread_should_park);
166 
167 /**
168  * kthread_should_park - should this kthread park now?
169  *
170  * When someone calls kthread_park() on your kthread, it will be woken
171  * and this will return true.  You should then do the necessary
172  * cleanup and call kthread_parkme()
173  *
174  * Similar to kthread_should_stop(), but this keeps the thread alive
175  * and in a park position. kthread_unpark() "restarts" the thread and
176  * calls the thread function again.
177  */
178 bool kthread_should_park(void)
179 {
180 	return __kthread_should_park(current);
181 }
182 EXPORT_SYMBOL_GPL(kthread_should_park);
183 
184 /**
185  * kthread_freezable_should_stop - should this freezable kthread return now?
186  * @was_frozen: optional out parameter, indicates whether %current was frozen
187  *
188  * kthread_should_stop() for freezable kthreads, which will enter
189  * refrigerator if necessary.  This function is safe from kthread_stop() /
190  * freezer deadlock and freezable kthreads should use this function instead
191  * of calling try_to_freeze() directly.
192  */
193 bool kthread_freezable_should_stop(bool *was_frozen)
194 {
195 	bool frozen = false;
196 
197 	might_sleep();
198 
199 	if (unlikely(freezing(current)))
200 		frozen = __refrigerator(true);
201 
202 	if (was_frozen)
203 		*was_frozen = frozen;
204 
205 	return kthread_should_stop();
206 }
207 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
208 
209 /**
210  * kthread_func - return the function specified on kthread creation
211  * @task: kthread task in question
212  *
213  * Returns NULL if the task is not a kthread.
214  */
215 void *kthread_func(struct task_struct *task)
216 {
217 	struct kthread *kthread = __to_kthread(task);
218 	if (kthread)
219 		return kthread->threadfn;
220 	return NULL;
221 }
222 EXPORT_SYMBOL_GPL(kthread_func);
223 
224 /**
225  * kthread_data - return data value specified on kthread creation
226  * @task: kthread task in question
227  *
228  * Return the data value specified when kthread @task was created.
229  * The caller is responsible for ensuring the validity of @task when
230  * calling this function.
231  */
232 void *kthread_data(struct task_struct *task)
233 {
234 	return to_kthread(task)->data;
235 }
236 EXPORT_SYMBOL_GPL(kthread_data);
237 
238 /**
239  * kthread_probe_data - speculative version of kthread_data()
240  * @task: possible kthread task in question
241  *
242  * @task could be a kthread task.  Return the data value specified when it
243  * was created if accessible.  If @task isn't a kthread task or its data is
244  * inaccessible for any reason, %NULL is returned.  This function requires
245  * that @task itself is safe to dereference.
246  */
247 void *kthread_probe_data(struct task_struct *task)
248 {
249 	struct kthread *kthread = __to_kthread(task);
250 	void *data = NULL;
251 
252 	if (kthread)
253 		copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
254 	return data;
255 }
256 
257 static void __kthread_parkme(struct kthread *self)
258 {
259 	for (;;) {
260 		/*
261 		 * TASK_PARKED is a special state; we must serialize against
262 		 * possible pending wakeups to avoid store-store collisions on
263 		 * task->state.
264 		 *
265 		 * Such a collision might possibly result in the task state
266 		 * changin from TASK_PARKED and us failing the
267 		 * wait_task_inactive() in kthread_park().
268 		 */
269 		set_special_state(TASK_PARKED);
270 		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
271 			break;
272 
273 		/*
274 		 * Thread is going to call schedule(), do not preempt it,
275 		 * or the caller of kthread_park() may spend more time in
276 		 * wait_task_inactive().
277 		 */
278 		preempt_disable();
279 		complete(&self->parked);
280 		schedule_preempt_disabled();
281 		preempt_enable();
282 	}
283 	__set_current_state(TASK_RUNNING);
284 }
285 
286 void kthread_parkme(void)
287 {
288 	__kthread_parkme(to_kthread(current));
289 }
290 EXPORT_SYMBOL_GPL(kthread_parkme);
291 
292 /**
293  * kthread_exit - Cause the current kthread return @result to kthread_stop().
294  * @result: The integer value to return to kthread_stop().
295  *
296  * While kthread_exit can be called directly, it exists so that
297  * functions which do some additional work in non-modular code such as
298  * module_put_and_kthread_exit can be implemented.
299  *
300  * Does not return.
301  */
302 void __noreturn kthread_exit(long result)
303 {
304 	struct kthread *kthread = to_kthread(current);
305 	kthread->result = result;
306 	do_exit(0);
307 }
308 
309 /**
310  * kthread_complete_and_exit - Exit the current kthread.
311  * @comp: Completion to complete
312  * @code: The integer value to return to kthread_stop().
313  *
314  * If present complete @comp and the reuturn code to kthread_stop().
315  *
316  * A kernel thread whose module may be removed after the completion of
317  * @comp can use this function exit safely.
318  *
319  * Does not return.
320  */
321 void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
322 {
323 	if (comp)
324 		complete(comp);
325 
326 	kthread_exit(code);
327 }
328 EXPORT_SYMBOL(kthread_complete_and_exit);
329 
330 static int kthread(void *_create)
331 {
332 	static const struct sched_param param = { .sched_priority = 0 };
333 	/* Copy data: it's on kthread's stack */
334 	struct kthread_create_info *create = _create;
335 	int (*threadfn)(void *data) = create->threadfn;
336 	void *data = create->data;
337 	struct completion *done;
338 	struct kthread *self;
339 	int ret;
340 
341 	self = to_kthread(current);
342 
343 	/* Release the structure when caller killed by a fatal signal. */
344 	done = xchg(&create->done, NULL);
345 	if (!done) {
346 		kfree(create);
347 		kthread_exit(-EINTR);
348 	}
349 
350 	self->threadfn = threadfn;
351 	self->data = data;
352 
353 	/*
354 	 * The new thread inherited kthreadd's priority and CPU mask. Reset
355 	 * back to default in case they have been changed.
356 	 */
357 	sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
358 	set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
359 
360 	/* OK, tell user we're spawned, wait for stop or wakeup */
361 	__set_current_state(TASK_UNINTERRUPTIBLE);
362 	create->result = current;
363 	/*
364 	 * Thread is going to call schedule(), do not preempt it,
365 	 * or the creator may spend more time in wait_task_inactive().
366 	 */
367 	preempt_disable();
368 	complete(done);
369 	schedule_preempt_disabled();
370 	preempt_enable();
371 
372 	ret = -EINTR;
373 	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
374 		cgroup_kthread_ready();
375 		__kthread_parkme(self);
376 		ret = threadfn(data);
377 	}
378 	kthread_exit(ret);
379 }
380 
381 /* called from kernel_clone() to get node information for about to be created task */
382 int tsk_fork_get_node(struct task_struct *tsk)
383 {
384 #ifdef CONFIG_NUMA
385 	if (tsk == kthreadd_task)
386 		return tsk->pref_node_fork;
387 #endif
388 	return NUMA_NO_NODE;
389 }
390 
391 static void create_kthread(struct kthread_create_info *create)
392 {
393 	int pid;
394 
395 #ifdef CONFIG_NUMA
396 	current->pref_node_fork = create->node;
397 #endif
398 	/* We want our own signal handler (we take no signals by default). */
399 	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
400 	if (pid < 0) {
401 		/* Release the structure when caller killed by a fatal signal. */
402 		struct completion *done = xchg(&create->done, NULL);
403 
404 		if (!done) {
405 			kfree(create);
406 			return;
407 		}
408 		create->result = ERR_PTR(pid);
409 		complete(done);
410 	}
411 }
412 
413 static __printf(4, 0)
414 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
415 						    void *data, int node,
416 						    const char namefmt[],
417 						    va_list args)
418 {
419 	DECLARE_COMPLETION_ONSTACK(done);
420 	struct task_struct *task;
421 	struct kthread_create_info *create = kmalloc(sizeof(*create),
422 						     GFP_KERNEL);
423 
424 	if (!create)
425 		return ERR_PTR(-ENOMEM);
426 	create->threadfn = threadfn;
427 	create->data = data;
428 	create->node = node;
429 	create->done = &done;
430 
431 	spin_lock(&kthread_create_lock);
432 	list_add_tail(&create->list, &kthread_create_list);
433 	spin_unlock(&kthread_create_lock);
434 
435 	wake_up_process(kthreadd_task);
436 	/*
437 	 * Wait for completion in killable state, for I might be chosen by
438 	 * the OOM killer while kthreadd is trying to allocate memory for
439 	 * new kernel thread.
440 	 */
441 	if (unlikely(wait_for_completion_killable(&done))) {
442 		/*
443 		 * If I was killed by a fatal signal before kthreadd (or new
444 		 * kernel thread) calls complete(), leave the cleanup of this
445 		 * structure to that thread.
446 		 */
447 		if (xchg(&create->done, NULL))
448 			return ERR_PTR(-EINTR);
449 		/*
450 		 * kthreadd (or new kernel thread) will call complete()
451 		 * shortly.
452 		 */
453 		wait_for_completion(&done);
454 	}
455 	task = create->result;
456 	if (!IS_ERR(task)) {
457 		char name[TASK_COMM_LEN];
458 		va_list aq;
459 		int len;
460 
461 		/*
462 		 * task is already visible to other tasks, so updating
463 		 * COMM must be protected.
464 		 */
465 		va_copy(aq, args);
466 		len = vsnprintf(name, sizeof(name), namefmt, aq);
467 		va_end(aq);
468 		if (len >= TASK_COMM_LEN) {
469 			struct kthread *kthread = to_kthread(task);
470 
471 			/* leave it truncated when out of memory. */
472 			kthread->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
473 		}
474 		set_task_comm(task, name);
475 	}
476 	kfree(create);
477 	return task;
478 }
479 
480 /**
481  * kthread_create_on_node - create a kthread.
482  * @threadfn: the function to run until signal_pending(current).
483  * @data: data ptr for @threadfn.
484  * @node: task and thread structures for the thread are allocated on this node
485  * @namefmt: printf-style name for the thread.
486  *
487  * Description: This helper function creates and names a kernel
488  * thread.  The thread will be stopped: use wake_up_process() to start
489  * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
490  * is affine to all CPUs.
491  *
492  * If thread is going to be bound on a particular cpu, give its node
493  * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
494  * When woken, the thread will run @threadfn() with @data as its
495  * argument. @threadfn() can either return directly if it is a
496  * standalone thread for which no one will call kthread_stop(), or
497  * return when 'kthread_should_stop()' is true (which means
498  * kthread_stop() has been called).  The return value should be zero
499  * or a negative error number; it will be passed to kthread_stop().
500  *
501  * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
502  */
503 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
504 					   void *data, int node,
505 					   const char namefmt[],
506 					   ...)
507 {
508 	struct task_struct *task;
509 	va_list args;
510 
511 	va_start(args, namefmt);
512 	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
513 	va_end(args);
514 
515 	return task;
516 }
517 EXPORT_SYMBOL(kthread_create_on_node);
518 
519 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
520 {
521 	unsigned long flags;
522 
523 	if (!wait_task_inactive(p, state)) {
524 		WARN_ON(1);
525 		return;
526 	}
527 
528 	/* It's safe because the task is inactive. */
529 	raw_spin_lock_irqsave(&p->pi_lock, flags);
530 	do_set_cpus_allowed(p, mask);
531 	p->flags |= PF_NO_SETAFFINITY;
532 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
533 }
534 
535 static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
536 {
537 	__kthread_bind_mask(p, cpumask_of(cpu), state);
538 }
539 
540 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
541 {
542 	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
543 }
544 
545 /**
546  * kthread_bind - bind a just-created kthread to a cpu.
547  * @p: thread created by kthread_create().
548  * @cpu: cpu (might not be online, must be possible) for @k to run on.
549  *
550  * Description: This function is equivalent to set_cpus_allowed(),
551  * except that @cpu doesn't need to be online, and the thread must be
552  * stopped (i.e., just returned from kthread_create()).
553  */
554 void kthread_bind(struct task_struct *p, unsigned int cpu)
555 {
556 	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
557 }
558 EXPORT_SYMBOL(kthread_bind);
559 
560 /**
561  * kthread_create_on_cpu - Create a cpu bound kthread
562  * @threadfn: the function to run until signal_pending(current).
563  * @data: data ptr for @threadfn.
564  * @cpu: The cpu on which the thread should be bound,
565  * @namefmt: printf-style name for the thread. Format is restricted
566  *	     to "name.*%u". Code fills in cpu number.
567  *
568  * Description: This helper function creates and names a kernel thread
569  */
570 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
571 					  void *data, unsigned int cpu,
572 					  const char *namefmt)
573 {
574 	struct task_struct *p;
575 
576 	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
577 				   cpu);
578 	if (IS_ERR(p))
579 		return p;
580 	kthread_bind(p, cpu);
581 	/* CPU hotplug need to bind once again when unparking the thread. */
582 	to_kthread(p)->cpu = cpu;
583 	return p;
584 }
585 EXPORT_SYMBOL(kthread_create_on_cpu);
586 
587 void kthread_set_per_cpu(struct task_struct *k, int cpu)
588 {
589 	struct kthread *kthread = to_kthread(k);
590 	if (!kthread)
591 		return;
592 
593 	WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
594 
595 	if (cpu < 0) {
596 		clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
597 		return;
598 	}
599 
600 	kthread->cpu = cpu;
601 	set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
602 }
603 
604 bool kthread_is_per_cpu(struct task_struct *p)
605 {
606 	struct kthread *kthread = __to_kthread(p);
607 	if (!kthread)
608 		return false;
609 
610 	return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
611 }
612 
613 /**
614  * kthread_unpark - unpark a thread created by kthread_create().
615  * @k:		thread created by kthread_create().
616  *
617  * Sets kthread_should_park() for @k to return false, wakes it, and
618  * waits for it to return. If the thread is marked percpu then its
619  * bound to the cpu again.
620  */
621 void kthread_unpark(struct task_struct *k)
622 {
623 	struct kthread *kthread = to_kthread(k);
624 
625 	/*
626 	 * Newly created kthread was parked when the CPU was offline.
627 	 * The binding was lost and we need to set it again.
628 	 */
629 	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
630 		__kthread_bind(k, kthread->cpu, TASK_PARKED);
631 
632 	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
633 	/*
634 	 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
635 	 */
636 	wake_up_state(k, TASK_PARKED);
637 }
638 EXPORT_SYMBOL_GPL(kthread_unpark);
639 
640 /**
641  * kthread_park - park a thread created by kthread_create().
642  * @k: thread created by kthread_create().
643  *
644  * Sets kthread_should_park() for @k to return true, wakes it, and
645  * waits for it to return. This can also be called after kthread_create()
646  * instead of calling wake_up_process(): the thread will park without
647  * calling threadfn().
648  *
649  * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
650  * If called by the kthread itself just the park bit is set.
651  */
652 int kthread_park(struct task_struct *k)
653 {
654 	struct kthread *kthread = to_kthread(k);
655 
656 	if (WARN_ON(k->flags & PF_EXITING))
657 		return -ENOSYS;
658 
659 	if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
660 		return -EBUSY;
661 
662 	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
663 	if (k != current) {
664 		wake_up_process(k);
665 		/*
666 		 * Wait for __kthread_parkme() to complete(), this means we
667 		 * _will_ have TASK_PARKED and are about to call schedule().
668 		 */
669 		wait_for_completion(&kthread->parked);
670 		/*
671 		 * Now wait for that schedule() to complete and the task to
672 		 * get scheduled out.
673 		 */
674 		WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
675 	}
676 
677 	return 0;
678 }
679 EXPORT_SYMBOL_GPL(kthread_park);
680 
681 /**
682  * kthread_stop - stop a thread created by kthread_create().
683  * @k: thread created by kthread_create().
684  *
685  * Sets kthread_should_stop() for @k to return true, wakes it, and
686  * waits for it to exit. This can also be called after kthread_create()
687  * instead of calling wake_up_process(): the thread will exit without
688  * calling threadfn().
689  *
690  * If threadfn() may call kthread_exit() itself, the caller must ensure
691  * task_struct can't go away.
692  *
693  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
694  * was never called.
695  */
696 int kthread_stop(struct task_struct *k)
697 {
698 	struct kthread *kthread;
699 	int ret;
700 
701 	trace_sched_kthread_stop(k);
702 
703 	get_task_struct(k);
704 	kthread = to_kthread(k);
705 	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
706 	kthread_unpark(k);
707 	wake_up_process(k);
708 	wait_for_completion(&kthread->exited);
709 	ret = kthread->result;
710 	put_task_struct(k);
711 
712 	trace_sched_kthread_stop_ret(ret);
713 	return ret;
714 }
715 EXPORT_SYMBOL(kthread_stop);
716 
717 int kthreadd(void *unused)
718 {
719 	struct task_struct *tsk = current;
720 
721 	/* Setup a clean context for our children to inherit. */
722 	set_task_comm(tsk, "kthreadd");
723 	ignore_signals(tsk);
724 	set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
725 	set_mems_allowed(node_states[N_MEMORY]);
726 
727 	current->flags |= PF_NOFREEZE;
728 	cgroup_init_kthreadd();
729 
730 	for (;;) {
731 		set_current_state(TASK_INTERRUPTIBLE);
732 		if (list_empty(&kthread_create_list))
733 			schedule();
734 		__set_current_state(TASK_RUNNING);
735 
736 		spin_lock(&kthread_create_lock);
737 		while (!list_empty(&kthread_create_list)) {
738 			struct kthread_create_info *create;
739 
740 			create = list_entry(kthread_create_list.next,
741 					    struct kthread_create_info, list);
742 			list_del_init(&create->list);
743 			spin_unlock(&kthread_create_lock);
744 
745 			create_kthread(create);
746 
747 			spin_lock(&kthread_create_lock);
748 		}
749 		spin_unlock(&kthread_create_lock);
750 	}
751 
752 	return 0;
753 }
754 
755 void __kthread_init_worker(struct kthread_worker *worker,
756 				const char *name,
757 				struct lock_class_key *key)
758 {
759 	memset(worker, 0, sizeof(struct kthread_worker));
760 	raw_spin_lock_init(&worker->lock);
761 	lockdep_set_class_and_name(&worker->lock, key, name);
762 	INIT_LIST_HEAD(&worker->work_list);
763 	INIT_LIST_HEAD(&worker->delayed_work_list);
764 }
765 EXPORT_SYMBOL_GPL(__kthread_init_worker);
766 
767 /**
768  * kthread_worker_fn - kthread function to process kthread_worker
769  * @worker_ptr: pointer to initialized kthread_worker
770  *
771  * This function implements the main cycle of kthread worker. It processes
772  * work_list until it is stopped with kthread_stop(). It sleeps when the queue
773  * is empty.
774  *
775  * The works are not allowed to keep any locks, disable preemption or interrupts
776  * when they finish. There is defined a safe point for freezing when one work
777  * finishes and before a new one is started.
778  *
779  * Also the works must not be handled by more than one worker at the same time,
780  * see also kthread_queue_work().
781  */
782 int kthread_worker_fn(void *worker_ptr)
783 {
784 	struct kthread_worker *worker = worker_ptr;
785 	struct kthread_work *work;
786 
787 	/*
788 	 * FIXME: Update the check and remove the assignment when all kthread
789 	 * worker users are created using kthread_create_worker*() functions.
790 	 */
791 	WARN_ON(worker->task && worker->task != current);
792 	worker->task = current;
793 
794 	if (worker->flags & KTW_FREEZABLE)
795 		set_freezable();
796 
797 repeat:
798 	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
799 
800 	if (kthread_should_stop()) {
801 		__set_current_state(TASK_RUNNING);
802 		raw_spin_lock_irq(&worker->lock);
803 		worker->task = NULL;
804 		raw_spin_unlock_irq(&worker->lock);
805 		return 0;
806 	}
807 
808 	work = NULL;
809 	raw_spin_lock_irq(&worker->lock);
810 	if (!list_empty(&worker->work_list)) {
811 		work = list_first_entry(&worker->work_list,
812 					struct kthread_work, node);
813 		list_del_init(&work->node);
814 	}
815 	worker->current_work = work;
816 	raw_spin_unlock_irq(&worker->lock);
817 
818 	if (work) {
819 		kthread_work_func_t func = work->func;
820 		__set_current_state(TASK_RUNNING);
821 		trace_sched_kthread_work_execute_start(work);
822 		work->func(work);
823 		/*
824 		 * Avoid dereferencing work after this point.  The trace
825 		 * event only cares about the address.
826 		 */
827 		trace_sched_kthread_work_execute_end(work, func);
828 	} else if (!freezing(current))
829 		schedule();
830 
831 	try_to_freeze();
832 	cond_resched();
833 	goto repeat;
834 }
835 EXPORT_SYMBOL_GPL(kthread_worker_fn);
836 
837 static __printf(3, 0) struct kthread_worker *
838 __kthread_create_worker(int cpu, unsigned int flags,
839 			const char namefmt[], va_list args)
840 {
841 	struct kthread_worker *worker;
842 	struct task_struct *task;
843 	int node = NUMA_NO_NODE;
844 
845 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
846 	if (!worker)
847 		return ERR_PTR(-ENOMEM);
848 
849 	kthread_init_worker(worker);
850 
851 	if (cpu >= 0)
852 		node = cpu_to_node(cpu);
853 
854 	task = __kthread_create_on_node(kthread_worker_fn, worker,
855 						node, namefmt, args);
856 	if (IS_ERR(task))
857 		goto fail_task;
858 
859 	if (cpu >= 0)
860 		kthread_bind(task, cpu);
861 
862 	worker->flags = flags;
863 	worker->task = task;
864 	wake_up_process(task);
865 	return worker;
866 
867 fail_task:
868 	kfree(worker);
869 	return ERR_CAST(task);
870 }
871 
872 /**
873  * kthread_create_worker - create a kthread worker
874  * @flags: flags modifying the default behavior of the worker
875  * @namefmt: printf-style name for the kthread worker (task).
876  *
877  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
878  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
879  * when the caller was killed by a fatal signal.
880  */
881 struct kthread_worker *
882 kthread_create_worker(unsigned int flags, const char namefmt[], ...)
883 {
884 	struct kthread_worker *worker;
885 	va_list args;
886 
887 	va_start(args, namefmt);
888 	worker = __kthread_create_worker(-1, flags, namefmt, args);
889 	va_end(args);
890 
891 	return worker;
892 }
893 EXPORT_SYMBOL(kthread_create_worker);
894 
895 /**
896  * kthread_create_worker_on_cpu - create a kthread worker and bind it
897  *	to a given CPU and the associated NUMA node.
898  * @cpu: CPU number
899  * @flags: flags modifying the default behavior of the worker
900  * @namefmt: printf-style name for the kthread worker (task).
901  *
902  * Use a valid CPU number if you want to bind the kthread worker
903  * to the given CPU and the associated NUMA node.
904  *
905  * A good practice is to add the cpu number also into the worker name.
906  * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
907  *
908  * CPU hotplug:
909  * The kthread worker API is simple and generic. It just provides a way
910  * to create, use, and destroy workers.
911  *
912  * It is up to the API user how to handle CPU hotplug. They have to decide
913  * how to handle pending work items, prevent queuing new ones, and
914  * restore the functionality when the CPU goes off and on. There are a
915  * few catches:
916  *
917  *    - CPU affinity gets lost when it is scheduled on an offline CPU.
918  *
919  *    - The worker might not exist when the CPU was off when the user
920  *      created the workers.
921  *
922  * Good practice is to implement two CPU hotplug callbacks and to
923  * destroy/create the worker when the CPU goes down/up.
924  *
925  * Return:
926  * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
927  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
928  * when the caller was killed by a fatal signal.
929  */
930 struct kthread_worker *
931 kthread_create_worker_on_cpu(int cpu, unsigned int flags,
932 			     const char namefmt[], ...)
933 {
934 	struct kthread_worker *worker;
935 	va_list args;
936 
937 	va_start(args, namefmt);
938 	worker = __kthread_create_worker(cpu, flags, namefmt, args);
939 	va_end(args);
940 
941 	return worker;
942 }
943 EXPORT_SYMBOL(kthread_create_worker_on_cpu);
944 
945 /*
946  * Returns true when the work could not be queued at the moment.
947  * It happens when it is already pending in a worker list
948  * or when it is being cancelled.
949  */
950 static inline bool queuing_blocked(struct kthread_worker *worker,
951 				   struct kthread_work *work)
952 {
953 	lockdep_assert_held(&worker->lock);
954 
955 	return !list_empty(&work->node) || work->canceling;
956 }
957 
958 static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
959 					     struct kthread_work *work)
960 {
961 	lockdep_assert_held(&worker->lock);
962 	WARN_ON_ONCE(!list_empty(&work->node));
963 	/* Do not use a work with >1 worker, see kthread_queue_work() */
964 	WARN_ON_ONCE(work->worker && work->worker != worker);
965 }
966 
967 /* insert @work before @pos in @worker */
968 static void kthread_insert_work(struct kthread_worker *worker,
969 				struct kthread_work *work,
970 				struct list_head *pos)
971 {
972 	kthread_insert_work_sanity_check(worker, work);
973 
974 	trace_sched_kthread_work_queue_work(worker, work);
975 
976 	list_add_tail(&work->node, pos);
977 	work->worker = worker;
978 	if (!worker->current_work && likely(worker->task))
979 		wake_up_process(worker->task);
980 }
981 
982 /**
983  * kthread_queue_work - queue a kthread_work
984  * @worker: target kthread_worker
985  * @work: kthread_work to queue
986  *
987  * Queue @work to work processor @task for async execution.  @task
988  * must have been created with kthread_worker_create().  Returns %true
989  * if @work was successfully queued, %false if it was already pending.
990  *
991  * Reinitialize the work if it needs to be used by another worker.
992  * For example, when the worker was stopped and started again.
993  */
994 bool kthread_queue_work(struct kthread_worker *worker,
995 			struct kthread_work *work)
996 {
997 	bool ret = false;
998 	unsigned long flags;
999 
1000 	raw_spin_lock_irqsave(&worker->lock, flags);
1001 	if (!queuing_blocked(worker, work)) {
1002 		kthread_insert_work(worker, work, &worker->work_list);
1003 		ret = true;
1004 	}
1005 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1006 	return ret;
1007 }
1008 EXPORT_SYMBOL_GPL(kthread_queue_work);
1009 
1010 /**
1011  * kthread_delayed_work_timer_fn - callback that queues the associated kthread
1012  *	delayed work when the timer expires.
1013  * @t: pointer to the expired timer
1014  *
1015  * The format of the function is defined by struct timer_list.
1016  * It should have been called from irqsafe timer with irq already off.
1017  */
1018 void kthread_delayed_work_timer_fn(struct timer_list *t)
1019 {
1020 	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
1021 	struct kthread_work *work = &dwork->work;
1022 	struct kthread_worker *worker = work->worker;
1023 	unsigned long flags;
1024 
1025 	/*
1026 	 * This might happen when a pending work is reinitialized.
1027 	 * It means that it is used a wrong way.
1028 	 */
1029 	if (WARN_ON_ONCE(!worker))
1030 		return;
1031 
1032 	raw_spin_lock_irqsave(&worker->lock, flags);
1033 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1034 	WARN_ON_ONCE(work->worker != worker);
1035 
1036 	/* Move the work from worker->delayed_work_list. */
1037 	WARN_ON_ONCE(list_empty(&work->node));
1038 	list_del_init(&work->node);
1039 	if (!work->canceling)
1040 		kthread_insert_work(worker, work, &worker->work_list);
1041 
1042 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1043 }
1044 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
1045 
1046 static void __kthread_queue_delayed_work(struct kthread_worker *worker,
1047 					 struct kthread_delayed_work *dwork,
1048 					 unsigned long delay)
1049 {
1050 	struct timer_list *timer = &dwork->timer;
1051 	struct kthread_work *work = &dwork->work;
1052 
1053 	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
1054 
1055 	/*
1056 	 * If @delay is 0, queue @dwork->work immediately.  This is for
1057 	 * both optimization and correctness.  The earliest @timer can
1058 	 * expire is on the closest next tick and delayed_work users depend
1059 	 * on that there's no such delay when @delay is 0.
1060 	 */
1061 	if (!delay) {
1062 		kthread_insert_work(worker, work, &worker->work_list);
1063 		return;
1064 	}
1065 
1066 	/* Be paranoid and try to detect possible races already now. */
1067 	kthread_insert_work_sanity_check(worker, work);
1068 
1069 	list_add(&work->node, &worker->delayed_work_list);
1070 	work->worker = worker;
1071 	timer->expires = jiffies + delay;
1072 	add_timer(timer);
1073 }
1074 
1075 /**
1076  * kthread_queue_delayed_work - queue the associated kthread work
1077  *	after a delay.
1078  * @worker: target kthread_worker
1079  * @dwork: kthread_delayed_work to queue
1080  * @delay: number of jiffies to wait before queuing
1081  *
1082  * If the work has not been pending it starts a timer that will queue
1083  * the work after the given @delay. If @delay is zero, it queues the
1084  * work immediately.
1085  *
1086  * Return: %false if the @work has already been pending. It means that
1087  * either the timer was running or the work was queued. It returns %true
1088  * otherwise.
1089  */
1090 bool kthread_queue_delayed_work(struct kthread_worker *worker,
1091 				struct kthread_delayed_work *dwork,
1092 				unsigned long delay)
1093 {
1094 	struct kthread_work *work = &dwork->work;
1095 	unsigned long flags;
1096 	bool ret = false;
1097 
1098 	raw_spin_lock_irqsave(&worker->lock, flags);
1099 
1100 	if (!queuing_blocked(worker, work)) {
1101 		__kthread_queue_delayed_work(worker, dwork, delay);
1102 		ret = true;
1103 	}
1104 
1105 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1106 	return ret;
1107 }
1108 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1109 
1110 struct kthread_flush_work {
1111 	struct kthread_work	work;
1112 	struct completion	done;
1113 };
1114 
1115 static void kthread_flush_work_fn(struct kthread_work *work)
1116 {
1117 	struct kthread_flush_work *fwork =
1118 		container_of(work, struct kthread_flush_work, work);
1119 	complete(&fwork->done);
1120 }
1121 
1122 /**
1123  * kthread_flush_work - flush a kthread_work
1124  * @work: work to flush
1125  *
1126  * If @work is queued or executing, wait for it to finish execution.
1127  */
1128 void kthread_flush_work(struct kthread_work *work)
1129 {
1130 	struct kthread_flush_work fwork = {
1131 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1132 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1133 	};
1134 	struct kthread_worker *worker;
1135 	bool noop = false;
1136 
1137 	worker = work->worker;
1138 	if (!worker)
1139 		return;
1140 
1141 	raw_spin_lock_irq(&worker->lock);
1142 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1143 	WARN_ON_ONCE(work->worker != worker);
1144 
1145 	if (!list_empty(&work->node))
1146 		kthread_insert_work(worker, &fwork.work, work->node.next);
1147 	else if (worker->current_work == work)
1148 		kthread_insert_work(worker, &fwork.work,
1149 				    worker->work_list.next);
1150 	else
1151 		noop = true;
1152 
1153 	raw_spin_unlock_irq(&worker->lock);
1154 
1155 	if (!noop)
1156 		wait_for_completion(&fwork.done);
1157 }
1158 EXPORT_SYMBOL_GPL(kthread_flush_work);
1159 
1160 /*
1161  * Make sure that the timer is neither set nor running and could
1162  * not manipulate the work list_head any longer.
1163  *
1164  * The function is called under worker->lock. The lock is temporary
1165  * released but the timer can't be set again in the meantime.
1166  */
1167 static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1168 					      unsigned long *flags)
1169 {
1170 	struct kthread_delayed_work *dwork =
1171 		container_of(work, struct kthread_delayed_work, work);
1172 	struct kthread_worker *worker = work->worker;
1173 
1174 	/*
1175 	 * del_timer_sync() must be called to make sure that the timer
1176 	 * callback is not running. The lock must be temporary released
1177 	 * to avoid a deadlock with the callback. In the meantime,
1178 	 * any queuing is blocked by setting the canceling counter.
1179 	 */
1180 	work->canceling++;
1181 	raw_spin_unlock_irqrestore(&worker->lock, *flags);
1182 	del_timer_sync(&dwork->timer);
1183 	raw_spin_lock_irqsave(&worker->lock, *flags);
1184 	work->canceling--;
1185 }
1186 
1187 /*
1188  * This function removes the work from the worker queue.
1189  *
1190  * It is called under worker->lock. The caller must make sure that
1191  * the timer used by delayed work is not running, e.g. by calling
1192  * kthread_cancel_delayed_work_timer().
1193  *
1194  * The work might still be in use when this function finishes. See the
1195  * current_work proceed by the worker.
1196  *
1197  * Return: %true if @work was pending and successfully canceled,
1198  *	%false if @work was not pending
1199  */
1200 static bool __kthread_cancel_work(struct kthread_work *work)
1201 {
1202 	/*
1203 	 * Try to remove the work from a worker list. It might either
1204 	 * be from worker->work_list or from worker->delayed_work_list.
1205 	 */
1206 	if (!list_empty(&work->node)) {
1207 		list_del_init(&work->node);
1208 		return true;
1209 	}
1210 
1211 	return false;
1212 }
1213 
1214 /**
1215  * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1216  * @worker: kthread worker to use
1217  * @dwork: kthread delayed work to queue
1218  * @delay: number of jiffies to wait before queuing
1219  *
1220  * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1221  * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1222  * @work is guaranteed to be queued immediately.
1223  *
1224  * Return: %false if @dwork was idle and queued, %true otherwise.
1225  *
1226  * A special case is when the work is being canceled in parallel.
1227  * It might be caused either by the real kthread_cancel_delayed_work_sync()
1228  * or yet another kthread_mod_delayed_work() call. We let the other command
1229  * win and return %true here. The return value can be used for reference
1230  * counting and the number of queued works stays the same. Anyway, the caller
1231  * is supposed to synchronize these operations a reasonable way.
1232  *
1233  * This function is safe to call from any context including IRQ handler.
1234  * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1235  * for details.
1236  */
1237 bool kthread_mod_delayed_work(struct kthread_worker *worker,
1238 			      struct kthread_delayed_work *dwork,
1239 			      unsigned long delay)
1240 {
1241 	struct kthread_work *work = &dwork->work;
1242 	unsigned long flags;
1243 	int ret;
1244 
1245 	raw_spin_lock_irqsave(&worker->lock, flags);
1246 
1247 	/* Do not bother with canceling when never queued. */
1248 	if (!work->worker) {
1249 		ret = false;
1250 		goto fast_queue;
1251 	}
1252 
1253 	/* Work must not be used with >1 worker, see kthread_queue_work() */
1254 	WARN_ON_ONCE(work->worker != worker);
1255 
1256 	/*
1257 	 * Temporary cancel the work but do not fight with another command
1258 	 * that is canceling the work as well.
1259 	 *
1260 	 * It is a bit tricky because of possible races with another
1261 	 * mod_delayed_work() and cancel_delayed_work() callers.
1262 	 *
1263 	 * The timer must be canceled first because worker->lock is released
1264 	 * when doing so. But the work can be removed from the queue (list)
1265 	 * only when it can be queued again so that the return value can
1266 	 * be used for reference counting.
1267 	 */
1268 	kthread_cancel_delayed_work_timer(work, &flags);
1269 	if (work->canceling) {
1270 		/* The number of works in the queue does not change. */
1271 		ret = true;
1272 		goto out;
1273 	}
1274 	ret = __kthread_cancel_work(work);
1275 
1276 fast_queue:
1277 	__kthread_queue_delayed_work(worker, dwork, delay);
1278 out:
1279 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1280 	return ret;
1281 }
1282 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1283 
1284 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1285 {
1286 	struct kthread_worker *worker = work->worker;
1287 	unsigned long flags;
1288 	int ret = false;
1289 
1290 	if (!worker)
1291 		goto out;
1292 
1293 	raw_spin_lock_irqsave(&worker->lock, flags);
1294 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1295 	WARN_ON_ONCE(work->worker != worker);
1296 
1297 	if (is_dwork)
1298 		kthread_cancel_delayed_work_timer(work, &flags);
1299 
1300 	ret = __kthread_cancel_work(work);
1301 
1302 	if (worker->current_work != work)
1303 		goto out_fast;
1304 
1305 	/*
1306 	 * The work is in progress and we need to wait with the lock released.
1307 	 * In the meantime, block any queuing by setting the canceling counter.
1308 	 */
1309 	work->canceling++;
1310 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1311 	kthread_flush_work(work);
1312 	raw_spin_lock_irqsave(&worker->lock, flags);
1313 	work->canceling--;
1314 
1315 out_fast:
1316 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1317 out:
1318 	return ret;
1319 }
1320 
1321 /**
1322  * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1323  * @work: the kthread work to cancel
1324  *
1325  * Cancel @work and wait for its execution to finish.  This function
1326  * can be used even if the work re-queues itself. On return from this
1327  * function, @work is guaranteed to be not pending or executing on any CPU.
1328  *
1329  * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1330  * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1331  *
1332  * The caller must ensure that the worker on which @work was last
1333  * queued can't be destroyed before this function returns.
1334  *
1335  * Return: %true if @work was pending, %false otherwise.
1336  */
1337 bool kthread_cancel_work_sync(struct kthread_work *work)
1338 {
1339 	return __kthread_cancel_work_sync(work, false);
1340 }
1341 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1342 
1343 /**
1344  * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1345  *	wait for it to finish.
1346  * @dwork: the kthread delayed work to cancel
1347  *
1348  * This is kthread_cancel_work_sync() for delayed works.
1349  *
1350  * Return: %true if @dwork was pending, %false otherwise.
1351  */
1352 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1353 {
1354 	return __kthread_cancel_work_sync(&dwork->work, true);
1355 }
1356 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1357 
1358 /**
1359  * kthread_flush_worker - flush all current works on a kthread_worker
1360  * @worker: worker to flush
1361  *
1362  * Wait until all currently executing or pending works on @worker are
1363  * finished.
1364  */
1365 void kthread_flush_worker(struct kthread_worker *worker)
1366 {
1367 	struct kthread_flush_work fwork = {
1368 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1369 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1370 	};
1371 
1372 	kthread_queue_work(worker, &fwork.work);
1373 	wait_for_completion(&fwork.done);
1374 }
1375 EXPORT_SYMBOL_GPL(kthread_flush_worker);
1376 
1377 /**
1378  * kthread_destroy_worker - destroy a kthread worker
1379  * @worker: worker to be destroyed
1380  *
1381  * Flush and destroy @worker.  The simple flush is enough because the kthread
1382  * worker API is used only in trivial scenarios.  There are no multi-step state
1383  * machines needed.
1384  */
1385 void kthread_destroy_worker(struct kthread_worker *worker)
1386 {
1387 	struct task_struct *task;
1388 
1389 	task = worker->task;
1390 	if (WARN_ON(!task))
1391 		return;
1392 
1393 	kthread_flush_worker(worker);
1394 	kthread_stop(task);
1395 	WARN_ON(!list_empty(&worker->work_list));
1396 	kfree(worker);
1397 }
1398 EXPORT_SYMBOL(kthread_destroy_worker);
1399 
1400 /**
1401  * kthread_use_mm - make the calling kthread operate on an address space
1402  * @mm: address space to operate on
1403  */
1404 void kthread_use_mm(struct mm_struct *mm)
1405 {
1406 	struct mm_struct *active_mm;
1407 	struct task_struct *tsk = current;
1408 
1409 	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1410 	WARN_ON_ONCE(tsk->mm);
1411 
1412 	task_lock(tsk);
1413 	/* Hold off tlb flush IPIs while switching mm's */
1414 	local_irq_disable();
1415 	active_mm = tsk->active_mm;
1416 	if (active_mm != mm) {
1417 		mmgrab(mm);
1418 		tsk->active_mm = mm;
1419 	}
1420 	tsk->mm = mm;
1421 	membarrier_update_current_mm(mm);
1422 	switch_mm_irqs_off(active_mm, mm, tsk);
1423 	local_irq_enable();
1424 	task_unlock(tsk);
1425 #ifdef finish_arch_post_lock_switch
1426 	finish_arch_post_lock_switch();
1427 #endif
1428 
1429 	/*
1430 	 * When a kthread starts operating on an address space, the loop
1431 	 * in membarrier_{private,global}_expedited() may not observe
1432 	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1433 	 * memory barrier after storing to tsk->mm, before accessing
1434 	 * user-space memory. A full memory barrier for membarrier
1435 	 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1436 	 * mmdrop(), or explicitly with smp_mb().
1437 	 */
1438 	if (active_mm != mm)
1439 		mmdrop(active_mm);
1440 	else
1441 		smp_mb();
1442 }
1443 EXPORT_SYMBOL_GPL(kthread_use_mm);
1444 
1445 /**
1446  * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1447  * @mm: address space to operate on
1448  */
1449 void kthread_unuse_mm(struct mm_struct *mm)
1450 {
1451 	struct task_struct *tsk = current;
1452 
1453 	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1454 	WARN_ON_ONCE(!tsk->mm);
1455 
1456 	task_lock(tsk);
1457 	/*
1458 	 * When a kthread stops operating on an address space, the loop
1459 	 * in membarrier_{private,global}_expedited() may not observe
1460 	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1461 	 * memory barrier after accessing user-space memory, before
1462 	 * clearing tsk->mm.
1463 	 */
1464 	smp_mb__after_spinlock();
1465 	sync_mm_rss(mm);
1466 	local_irq_disable();
1467 	tsk->mm = NULL;
1468 	membarrier_update_current_mm(NULL);
1469 	/* active_mm is still 'mm' */
1470 	enter_lazy_tlb(mm, tsk);
1471 	local_irq_enable();
1472 	task_unlock(tsk);
1473 }
1474 EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1475 
1476 #ifdef CONFIG_BLK_CGROUP
1477 /**
1478  * kthread_associate_blkcg - associate blkcg to current kthread
1479  * @css: the cgroup info
1480  *
1481  * Current thread must be a kthread. The thread is running jobs on behalf of
1482  * other threads. In some cases, we expect the jobs attach cgroup info of
1483  * original threads instead of that of current thread. This function stores
1484  * original thread's cgroup info in current kthread context for later
1485  * retrieval.
1486  */
1487 void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1488 {
1489 	struct kthread *kthread;
1490 
1491 	if (!(current->flags & PF_KTHREAD))
1492 		return;
1493 	kthread = to_kthread(current);
1494 	if (!kthread)
1495 		return;
1496 
1497 	if (kthread->blkcg_css) {
1498 		css_put(kthread->blkcg_css);
1499 		kthread->blkcg_css = NULL;
1500 	}
1501 	if (css) {
1502 		css_get(css);
1503 		kthread->blkcg_css = css;
1504 	}
1505 }
1506 EXPORT_SYMBOL(kthread_associate_blkcg);
1507 
1508 /**
1509  * kthread_blkcg - get associated blkcg css of current kthread
1510  *
1511  * Current thread must be a kthread.
1512  */
1513 struct cgroup_subsys_state *kthread_blkcg(void)
1514 {
1515 	struct kthread *kthread;
1516 
1517 	if (current->flags & PF_KTHREAD) {
1518 		kthread = to_kthread(current);
1519 		if (kthread)
1520 			return kthread->blkcg_css;
1521 	}
1522 	return NULL;
1523 }
1524 #endif
1525