xref: /openbmc/linux/kernel/kthread.c (revision d6986ce2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Kernel thread helper functions.
3  *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
4  *   Copyright (C) 2009 Red Hat, Inc.
5  *
6  * Creation is done via kthreadd, so that we get a clean environment
7  * even if we're invoked from userspace (think modprobe, hotplug cpu,
8  * etc.).
9  */
10 #include <uapi/linux/sched/types.h>
11 #include <linux/mm.h>
12 #include <linux/mmu_context.h>
13 #include <linux/sched.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/task.h>
16 #include <linux/kthread.h>
17 #include <linux/completion.h>
18 #include <linux/err.h>
19 #include <linux/cgroup.h>
20 #include <linux/cpuset.h>
21 #include <linux/unistd.h>
22 #include <linux/file.h>
23 #include <linux/export.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <linux/freezer.h>
27 #include <linux/ptrace.h>
28 #include <linux/uaccess.h>
29 #include <linux/numa.h>
30 #include <linux/sched/isolation.h>
31 #include <trace/events/sched.h>
32 
33 
34 static DEFINE_SPINLOCK(kthread_create_lock);
35 static LIST_HEAD(kthread_create_list);
36 struct task_struct *kthreadd_task;
37 
38 struct kthread_create_info
39 {
40 	/* Information passed to kthread() from kthreadd. */
41 	int (*threadfn)(void *data);
42 	void *data;
43 	int node;
44 
45 	/* Result passed back to kthread_create() from kthreadd. */
46 	struct task_struct *result;
47 	struct completion *done;
48 
49 	struct list_head list;
50 };
51 
52 struct kthread {
53 	unsigned long flags;
54 	unsigned int cpu;
55 	int (*threadfn)(void *);
56 	void *data;
57 	mm_segment_t oldfs;
58 	struct completion parked;
59 	struct completion exited;
60 #ifdef CONFIG_BLK_CGROUP
61 	struct cgroup_subsys_state *blkcg_css;
62 #endif
63 	/* To store the full name if task comm is truncated. */
64 	char *full_name;
65 };
66 
67 enum KTHREAD_BITS {
68 	KTHREAD_IS_PER_CPU = 0,
69 	KTHREAD_SHOULD_STOP,
70 	KTHREAD_SHOULD_PARK,
71 };
72 
73 static inline struct kthread *to_kthread(struct task_struct *k)
74 {
75 	WARN_ON(!(k->flags & PF_KTHREAD));
76 	return (__force void *)k->set_child_tid;
77 }
78 
79 /*
80  * Variant of to_kthread() that doesn't assume @p is a kthread.
81  *
82  * Per construction; when:
83  *
84  *   (p->flags & PF_KTHREAD) && p->set_child_tid
85  *
86  * the task is both a kthread and struct kthread is persistent. However
87  * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
88  * begin_new_exec()).
89  */
90 static inline struct kthread *__to_kthread(struct task_struct *p)
91 {
92 	void *kthread = (__force void *)p->set_child_tid;
93 	if (kthread && !(p->flags & PF_KTHREAD))
94 		kthread = NULL;
95 	return kthread;
96 }
97 
98 void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
99 {
100 	struct kthread *kthread = to_kthread(tsk);
101 
102 	if (!kthread || !kthread->full_name) {
103 		__get_task_comm(buf, buf_size, tsk);
104 		return;
105 	}
106 
107 	strscpy_pad(buf, kthread->full_name, buf_size);
108 }
109 
110 void set_kthread_struct(struct task_struct *p)
111 {
112 	struct kthread *kthread;
113 
114 	if (__to_kthread(p))
115 		return;
116 
117 	kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
118 	/*
119 	 * We abuse ->set_child_tid to avoid the new member and because it
120 	 * can't be wrongly copied by copy_process(). We also rely on fact
121 	 * that the caller can't exec, so PF_KTHREAD can't be cleared.
122 	 */
123 	p->set_child_tid = (__force void __user *)kthread;
124 }
125 
126 void free_kthread_struct(struct task_struct *k)
127 {
128 	struct kthread *kthread;
129 
130 	/*
131 	 * Can be NULL if this kthread was created by kernel_thread()
132 	 * or if kmalloc() in kthread() failed.
133 	 */
134 	kthread = to_kthread(k);
135 	if (!kthread)
136 		return;
137 
138 #ifdef CONFIG_BLK_CGROUP
139 	WARN_ON_ONCE(kthread->blkcg_css);
140 #endif
141 	kfree(kthread->full_name);
142 	kfree(kthread);
143 }
144 
145 /**
146  * kthread_should_stop - should this kthread return now?
147  *
148  * When someone calls kthread_stop() on your kthread, it will be woken
149  * and this will return true.  You should then return, and your return
150  * value will be passed through to kthread_stop().
151  */
152 bool kthread_should_stop(void)
153 {
154 	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
155 }
156 EXPORT_SYMBOL(kthread_should_stop);
157 
158 bool __kthread_should_park(struct task_struct *k)
159 {
160 	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
161 }
162 EXPORT_SYMBOL_GPL(__kthread_should_park);
163 
164 /**
165  * kthread_should_park - should this kthread park now?
166  *
167  * When someone calls kthread_park() on your kthread, it will be woken
168  * and this will return true.  You should then do the necessary
169  * cleanup and call kthread_parkme()
170  *
171  * Similar to kthread_should_stop(), but this keeps the thread alive
172  * and in a park position. kthread_unpark() "restarts" the thread and
173  * calls the thread function again.
174  */
175 bool kthread_should_park(void)
176 {
177 	return __kthread_should_park(current);
178 }
179 EXPORT_SYMBOL_GPL(kthread_should_park);
180 
181 /**
182  * kthread_freezable_should_stop - should this freezable kthread return now?
183  * @was_frozen: optional out parameter, indicates whether %current was frozen
184  *
185  * kthread_should_stop() for freezable kthreads, which will enter
186  * refrigerator if necessary.  This function is safe from kthread_stop() /
187  * freezer deadlock and freezable kthreads should use this function instead
188  * of calling try_to_freeze() directly.
189  */
190 bool kthread_freezable_should_stop(bool *was_frozen)
191 {
192 	bool frozen = false;
193 
194 	might_sleep();
195 
196 	if (unlikely(freezing(current)))
197 		frozen = __refrigerator(true);
198 
199 	if (was_frozen)
200 		*was_frozen = frozen;
201 
202 	return kthread_should_stop();
203 }
204 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
205 
206 /**
207  * kthread_func - return the function specified on kthread creation
208  * @task: kthread task in question
209  *
210  * Returns NULL if the task is not a kthread.
211  */
212 void *kthread_func(struct task_struct *task)
213 {
214 	struct kthread *kthread = __to_kthread(task);
215 	if (kthread)
216 		return kthread->threadfn;
217 	return NULL;
218 }
219 EXPORT_SYMBOL_GPL(kthread_func);
220 
221 /**
222  * kthread_data - return data value specified on kthread creation
223  * @task: kthread task in question
224  *
225  * Return the data value specified when kthread @task was created.
226  * The caller is responsible for ensuring the validity of @task when
227  * calling this function.
228  */
229 void *kthread_data(struct task_struct *task)
230 {
231 	return to_kthread(task)->data;
232 }
233 EXPORT_SYMBOL_GPL(kthread_data);
234 
235 /**
236  * kthread_probe_data - speculative version of kthread_data()
237  * @task: possible kthread task in question
238  *
239  * @task could be a kthread task.  Return the data value specified when it
240  * was created if accessible.  If @task isn't a kthread task or its data is
241  * inaccessible for any reason, %NULL is returned.  This function requires
242  * that @task itself is safe to dereference.
243  */
244 void *kthread_probe_data(struct task_struct *task)
245 {
246 	struct kthread *kthread = __to_kthread(task);
247 	void *data = NULL;
248 
249 	if (kthread)
250 		copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
251 	return data;
252 }
253 
254 static void __kthread_parkme(struct kthread *self)
255 {
256 	for (;;) {
257 		/*
258 		 * TASK_PARKED is a special state; we must serialize against
259 		 * possible pending wakeups to avoid store-store collisions on
260 		 * task->state.
261 		 *
262 		 * Such a collision might possibly result in the task state
263 		 * changin from TASK_PARKED and us failing the
264 		 * wait_task_inactive() in kthread_park().
265 		 */
266 		set_special_state(TASK_PARKED);
267 		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
268 			break;
269 
270 		/*
271 		 * Thread is going to call schedule(), do not preempt it,
272 		 * or the caller of kthread_park() may spend more time in
273 		 * wait_task_inactive().
274 		 */
275 		preempt_disable();
276 		complete(&self->parked);
277 		schedule_preempt_disabled();
278 		preempt_enable();
279 	}
280 	__set_current_state(TASK_RUNNING);
281 }
282 
283 void kthread_parkme(void)
284 {
285 	__kthread_parkme(to_kthread(current));
286 }
287 EXPORT_SYMBOL_GPL(kthread_parkme);
288 
289 static int kthread(void *_create)
290 {
291 	static const struct sched_param param = { .sched_priority = 0 };
292 	/* Copy data: it's on kthread's stack */
293 	struct kthread_create_info *create = _create;
294 	int (*threadfn)(void *data) = create->threadfn;
295 	void *data = create->data;
296 	struct completion *done;
297 	struct kthread *self;
298 	int ret;
299 
300 	set_kthread_struct(current);
301 	self = to_kthread(current);
302 
303 	/* If user was SIGKILLed, I release the structure. */
304 	done = xchg(&create->done, NULL);
305 	if (!done) {
306 		kfree(create);
307 		do_exit(-EINTR);
308 	}
309 
310 	if (!self) {
311 		create->result = ERR_PTR(-ENOMEM);
312 		complete(done);
313 		do_exit(-ENOMEM);
314 	}
315 
316 	self->threadfn = threadfn;
317 	self->data = data;
318 	init_completion(&self->exited);
319 	init_completion(&self->parked);
320 	current->vfork_done = &self->exited;
321 
322 	/*
323 	 * The new thread inherited kthreadd's priority and CPU mask. Reset
324 	 * back to default in case they have been changed.
325 	 */
326 	sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
327 	set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_KTHREAD));
328 
329 	/* OK, tell user we're spawned, wait for stop or wakeup */
330 	__set_current_state(TASK_UNINTERRUPTIBLE);
331 	create->result = current;
332 	/*
333 	 * Thread is going to call schedule(), do not preempt it,
334 	 * or the creator may spend more time in wait_task_inactive().
335 	 */
336 	preempt_disable();
337 	complete(done);
338 	schedule_preempt_disabled();
339 	preempt_enable();
340 
341 	ret = -EINTR;
342 	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
343 		cgroup_kthread_ready();
344 		__kthread_parkme(self);
345 		ret = threadfn(data);
346 	}
347 	do_exit(ret);
348 }
349 
350 /* called from kernel_clone() to get node information for about to be created task */
351 int tsk_fork_get_node(struct task_struct *tsk)
352 {
353 #ifdef CONFIG_NUMA
354 	if (tsk == kthreadd_task)
355 		return tsk->pref_node_fork;
356 #endif
357 	return NUMA_NO_NODE;
358 }
359 
360 static void create_kthread(struct kthread_create_info *create)
361 {
362 	int pid;
363 
364 #ifdef CONFIG_NUMA
365 	current->pref_node_fork = create->node;
366 #endif
367 	/* We want our own signal handler (we take no signals by default). */
368 	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
369 	if (pid < 0) {
370 		/* If user was SIGKILLed, I release the structure. */
371 		struct completion *done = xchg(&create->done, NULL);
372 
373 		if (!done) {
374 			kfree(create);
375 			return;
376 		}
377 		create->result = ERR_PTR(pid);
378 		complete(done);
379 	}
380 }
381 
382 static __printf(4, 0)
383 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
384 						    void *data, int node,
385 						    const char namefmt[],
386 						    va_list args)
387 {
388 	DECLARE_COMPLETION_ONSTACK(done);
389 	struct task_struct *task;
390 	struct kthread_create_info *create = kmalloc(sizeof(*create),
391 						     GFP_KERNEL);
392 
393 	if (!create)
394 		return ERR_PTR(-ENOMEM);
395 	create->threadfn = threadfn;
396 	create->data = data;
397 	create->node = node;
398 	create->done = &done;
399 
400 	spin_lock(&kthread_create_lock);
401 	list_add_tail(&create->list, &kthread_create_list);
402 	spin_unlock(&kthread_create_lock);
403 
404 	wake_up_process(kthreadd_task);
405 	/*
406 	 * Wait for completion in killable state, for I might be chosen by
407 	 * the OOM killer while kthreadd is trying to allocate memory for
408 	 * new kernel thread.
409 	 */
410 	if (unlikely(wait_for_completion_killable(&done))) {
411 		/*
412 		 * If I was SIGKILLed before kthreadd (or new kernel thread)
413 		 * calls complete(), leave the cleanup of this structure to
414 		 * that thread.
415 		 */
416 		if (xchg(&create->done, NULL))
417 			return ERR_PTR(-EINTR);
418 		/*
419 		 * kthreadd (or new kernel thread) will call complete()
420 		 * shortly.
421 		 */
422 		wait_for_completion(&done);
423 	}
424 	task = create->result;
425 	if (!IS_ERR(task)) {
426 		char name[TASK_COMM_LEN];
427 		va_list aq;
428 		int len;
429 
430 		/*
431 		 * task is already visible to other tasks, so updating
432 		 * COMM must be protected.
433 		 */
434 		va_copy(aq, args);
435 		len = vsnprintf(name, sizeof(name), namefmt, aq);
436 		va_end(aq);
437 		if (len >= TASK_COMM_LEN) {
438 			struct kthread *kthread = to_kthread(task);
439 
440 			/* leave it truncated when out of memory. */
441 			kthread->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
442 		}
443 		set_task_comm(task, name);
444 	}
445 	kfree(create);
446 	return task;
447 }
448 
449 /**
450  * kthread_create_on_node - create a kthread.
451  * @threadfn: the function to run until signal_pending(current).
452  * @data: data ptr for @threadfn.
453  * @node: task and thread structures for the thread are allocated on this node
454  * @namefmt: printf-style name for the thread.
455  *
456  * Description: This helper function creates and names a kernel
457  * thread.  The thread will be stopped: use wake_up_process() to start
458  * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
459  * is affine to all CPUs.
460  *
461  * If thread is going to be bound on a particular cpu, give its node
462  * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
463  * When woken, the thread will run @threadfn() with @data as its
464  * argument. @threadfn() can either return directly if it is a
465  * standalone thread for which no one will call kthread_stop(), or
466  * return when 'kthread_should_stop()' is true (which means
467  * kthread_stop() has been called).  The return value should be zero
468  * or a negative error number; it will be passed to kthread_stop().
469  *
470  * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
471  */
472 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
473 					   void *data, int node,
474 					   const char namefmt[],
475 					   ...)
476 {
477 	struct task_struct *task;
478 	va_list args;
479 
480 	va_start(args, namefmt);
481 	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
482 	va_end(args);
483 
484 	return task;
485 }
486 EXPORT_SYMBOL(kthread_create_on_node);
487 
488 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
489 {
490 	unsigned long flags;
491 
492 	if (!wait_task_inactive(p, state)) {
493 		WARN_ON(1);
494 		return;
495 	}
496 
497 	/* It's safe because the task is inactive. */
498 	raw_spin_lock_irqsave(&p->pi_lock, flags);
499 	do_set_cpus_allowed(p, mask);
500 	p->flags |= PF_NO_SETAFFINITY;
501 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
502 }
503 
504 static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
505 {
506 	__kthread_bind_mask(p, cpumask_of(cpu), state);
507 }
508 
509 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
510 {
511 	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
512 }
513 
514 /**
515  * kthread_bind - bind a just-created kthread to a cpu.
516  * @p: thread created by kthread_create().
517  * @cpu: cpu (might not be online, must be possible) for @k to run on.
518  *
519  * Description: This function is equivalent to set_cpus_allowed(),
520  * except that @cpu doesn't need to be online, and the thread must be
521  * stopped (i.e., just returned from kthread_create()).
522  */
523 void kthread_bind(struct task_struct *p, unsigned int cpu)
524 {
525 	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
526 }
527 EXPORT_SYMBOL(kthread_bind);
528 
529 /**
530  * kthread_create_on_cpu - Create a cpu bound kthread
531  * @threadfn: the function to run until signal_pending(current).
532  * @data: data ptr for @threadfn.
533  * @cpu: The cpu on which the thread should be bound,
534  * @namefmt: printf-style name for the thread. Format is restricted
535  *	     to "name.*%u". Code fills in cpu number.
536  *
537  * Description: This helper function creates and names a kernel thread
538  */
539 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
540 					  void *data, unsigned int cpu,
541 					  const char *namefmt)
542 {
543 	struct task_struct *p;
544 
545 	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
546 				   cpu);
547 	if (IS_ERR(p))
548 		return p;
549 	kthread_bind(p, cpu);
550 	/* CPU hotplug need to bind once again when unparking the thread. */
551 	to_kthread(p)->cpu = cpu;
552 	return p;
553 }
554 
555 void kthread_set_per_cpu(struct task_struct *k, int cpu)
556 {
557 	struct kthread *kthread = to_kthread(k);
558 	if (!kthread)
559 		return;
560 
561 	WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
562 
563 	if (cpu < 0) {
564 		clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
565 		return;
566 	}
567 
568 	kthread->cpu = cpu;
569 	set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
570 }
571 
572 bool kthread_is_per_cpu(struct task_struct *p)
573 {
574 	struct kthread *kthread = __to_kthread(p);
575 	if (!kthread)
576 		return false;
577 
578 	return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
579 }
580 
581 /**
582  * kthread_unpark - unpark a thread created by kthread_create().
583  * @k:		thread created by kthread_create().
584  *
585  * Sets kthread_should_park() for @k to return false, wakes it, and
586  * waits for it to return. If the thread is marked percpu then its
587  * bound to the cpu again.
588  */
589 void kthread_unpark(struct task_struct *k)
590 {
591 	struct kthread *kthread = to_kthread(k);
592 
593 	/*
594 	 * Newly created kthread was parked when the CPU was offline.
595 	 * The binding was lost and we need to set it again.
596 	 */
597 	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
598 		__kthread_bind(k, kthread->cpu, TASK_PARKED);
599 
600 	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
601 	/*
602 	 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
603 	 */
604 	wake_up_state(k, TASK_PARKED);
605 }
606 EXPORT_SYMBOL_GPL(kthread_unpark);
607 
608 /**
609  * kthread_park - park a thread created by kthread_create().
610  * @k: thread created by kthread_create().
611  *
612  * Sets kthread_should_park() for @k to return true, wakes it, and
613  * waits for it to return. This can also be called after kthread_create()
614  * instead of calling wake_up_process(): the thread will park without
615  * calling threadfn().
616  *
617  * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
618  * If called by the kthread itself just the park bit is set.
619  */
620 int kthread_park(struct task_struct *k)
621 {
622 	struct kthread *kthread = to_kthread(k);
623 
624 	if (WARN_ON(k->flags & PF_EXITING))
625 		return -ENOSYS;
626 
627 	if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
628 		return -EBUSY;
629 
630 	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
631 	if (k != current) {
632 		wake_up_process(k);
633 		/*
634 		 * Wait for __kthread_parkme() to complete(), this means we
635 		 * _will_ have TASK_PARKED and are about to call schedule().
636 		 */
637 		wait_for_completion(&kthread->parked);
638 		/*
639 		 * Now wait for that schedule() to complete and the task to
640 		 * get scheduled out.
641 		 */
642 		WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
643 	}
644 
645 	return 0;
646 }
647 EXPORT_SYMBOL_GPL(kthread_park);
648 
649 /**
650  * kthread_stop - stop a thread created by kthread_create().
651  * @k: thread created by kthread_create().
652  *
653  * Sets kthread_should_stop() for @k to return true, wakes it, and
654  * waits for it to exit. This can also be called after kthread_create()
655  * instead of calling wake_up_process(): the thread will exit without
656  * calling threadfn().
657  *
658  * If threadfn() may call do_exit() itself, the caller must ensure
659  * task_struct can't go away.
660  *
661  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
662  * was never called.
663  */
664 int kthread_stop(struct task_struct *k)
665 {
666 	struct kthread *kthread;
667 	int ret;
668 
669 	trace_sched_kthread_stop(k);
670 
671 	get_task_struct(k);
672 	kthread = to_kthread(k);
673 	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
674 	kthread_unpark(k);
675 	wake_up_process(k);
676 	wait_for_completion(&kthread->exited);
677 	ret = k->exit_code;
678 	put_task_struct(k);
679 
680 	trace_sched_kthread_stop_ret(ret);
681 	return ret;
682 }
683 EXPORT_SYMBOL(kthread_stop);
684 
685 int kthreadd(void *unused)
686 {
687 	struct task_struct *tsk = current;
688 
689 	/* Setup a clean context for our children to inherit. */
690 	set_task_comm(tsk, "kthreadd");
691 	ignore_signals(tsk);
692 	set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD));
693 	set_mems_allowed(node_states[N_MEMORY]);
694 
695 	current->flags |= PF_NOFREEZE;
696 	cgroup_init_kthreadd();
697 
698 	for (;;) {
699 		set_current_state(TASK_INTERRUPTIBLE);
700 		if (list_empty(&kthread_create_list))
701 			schedule();
702 		__set_current_state(TASK_RUNNING);
703 
704 		spin_lock(&kthread_create_lock);
705 		while (!list_empty(&kthread_create_list)) {
706 			struct kthread_create_info *create;
707 
708 			create = list_entry(kthread_create_list.next,
709 					    struct kthread_create_info, list);
710 			list_del_init(&create->list);
711 			spin_unlock(&kthread_create_lock);
712 
713 			create_kthread(create);
714 
715 			spin_lock(&kthread_create_lock);
716 		}
717 		spin_unlock(&kthread_create_lock);
718 	}
719 
720 	return 0;
721 }
722 
723 void __kthread_init_worker(struct kthread_worker *worker,
724 				const char *name,
725 				struct lock_class_key *key)
726 {
727 	memset(worker, 0, sizeof(struct kthread_worker));
728 	raw_spin_lock_init(&worker->lock);
729 	lockdep_set_class_and_name(&worker->lock, key, name);
730 	INIT_LIST_HEAD(&worker->work_list);
731 	INIT_LIST_HEAD(&worker->delayed_work_list);
732 }
733 EXPORT_SYMBOL_GPL(__kthread_init_worker);
734 
735 /**
736  * kthread_worker_fn - kthread function to process kthread_worker
737  * @worker_ptr: pointer to initialized kthread_worker
738  *
739  * This function implements the main cycle of kthread worker. It processes
740  * work_list until it is stopped with kthread_stop(). It sleeps when the queue
741  * is empty.
742  *
743  * The works are not allowed to keep any locks, disable preemption or interrupts
744  * when they finish. There is defined a safe point for freezing when one work
745  * finishes and before a new one is started.
746  *
747  * Also the works must not be handled by more than one worker at the same time,
748  * see also kthread_queue_work().
749  */
750 int kthread_worker_fn(void *worker_ptr)
751 {
752 	struct kthread_worker *worker = worker_ptr;
753 	struct kthread_work *work;
754 
755 	/*
756 	 * FIXME: Update the check and remove the assignment when all kthread
757 	 * worker users are created using kthread_create_worker*() functions.
758 	 */
759 	WARN_ON(worker->task && worker->task != current);
760 	worker->task = current;
761 
762 	if (worker->flags & KTW_FREEZABLE)
763 		set_freezable();
764 
765 repeat:
766 	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
767 
768 	if (kthread_should_stop()) {
769 		__set_current_state(TASK_RUNNING);
770 		raw_spin_lock_irq(&worker->lock);
771 		worker->task = NULL;
772 		raw_spin_unlock_irq(&worker->lock);
773 		return 0;
774 	}
775 
776 	work = NULL;
777 	raw_spin_lock_irq(&worker->lock);
778 	if (!list_empty(&worker->work_list)) {
779 		work = list_first_entry(&worker->work_list,
780 					struct kthread_work, node);
781 		list_del_init(&work->node);
782 	}
783 	worker->current_work = work;
784 	raw_spin_unlock_irq(&worker->lock);
785 
786 	if (work) {
787 		kthread_work_func_t func = work->func;
788 		__set_current_state(TASK_RUNNING);
789 		trace_sched_kthread_work_execute_start(work);
790 		work->func(work);
791 		/*
792 		 * Avoid dereferencing work after this point.  The trace
793 		 * event only cares about the address.
794 		 */
795 		trace_sched_kthread_work_execute_end(work, func);
796 	} else if (!freezing(current))
797 		schedule();
798 
799 	try_to_freeze();
800 	cond_resched();
801 	goto repeat;
802 }
803 EXPORT_SYMBOL_GPL(kthread_worker_fn);
804 
805 static __printf(3, 0) struct kthread_worker *
806 __kthread_create_worker(int cpu, unsigned int flags,
807 			const char namefmt[], va_list args)
808 {
809 	struct kthread_worker *worker;
810 	struct task_struct *task;
811 	int node = NUMA_NO_NODE;
812 
813 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
814 	if (!worker)
815 		return ERR_PTR(-ENOMEM);
816 
817 	kthread_init_worker(worker);
818 
819 	if (cpu >= 0)
820 		node = cpu_to_node(cpu);
821 
822 	task = __kthread_create_on_node(kthread_worker_fn, worker,
823 						node, namefmt, args);
824 	if (IS_ERR(task))
825 		goto fail_task;
826 
827 	if (cpu >= 0)
828 		kthread_bind(task, cpu);
829 
830 	worker->flags = flags;
831 	worker->task = task;
832 	wake_up_process(task);
833 	return worker;
834 
835 fail_task:
836 	kfree(worker);
837 	return ERR_CAST(task);
838 }
839 
840 /**
841  * kthread_create_worker - create a kthread worker
842  * @flags: flags modifying the default behavior of the worker
843  * @namefmt: printf-style name for the kthread worker (task).
844  *
845  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
846  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
847  * when the worker was SIGKILLed.
848  */
849 struct kthread_worker *
850 kthread_create_worker(unsigned int flags, const char namefmt[], ...)
851 {
852 	struct kthread_worker *worker;
853 	va_list args;
854 
855 	va_start(args, namefmt);
856 	worker = __kthread_create_worker(-1, flags, namefmt, args);
857 	va_end(args);
858 
859 	return worker;
860 }
861 EXPORT_SYMBOL(kthread_create_worker);
862 
863 /**
864  * kthread_create_worker_on_cpu - create a kthread worker and bind it
865  *	to a given CPU and the associated NUMA node.
866  * @cpu: CPU number
867  * @flags: flags modifying the default behavior of the worker
868  * @namefmt: printf-style name for the kthread worker (task).
869  *
870  * Use a valid CPU number if you want to bind the kthread worker
871  * to the given CPU and the associated NUMA node.
872  *
873  * A good practice is to add the cpu number also into the worker name.
874  * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
875  *
876  * CPU hotplug:
877  * The kthread worker API is simple and generic. It just provides a way
878  * to create, use, and destroy workers.
879  *
880  * It is up to the API user how to handle CPU hotplug. They have to decide
881  * how to handle pending work items, prevent queuing new ones, and
882  * restore the functionality when the CPU goes off and on. There are a
883  * few catches:
884  *
885  *    - CPU affinity gets lost when it is scheduled on an offline CPU.
886  *
887  *    - The worker might not exist when the CPU was off when the user
888  *      created the workers.
889  *
890  * Good practice is to implement two CPU hotplug callbacks and to
891  * destroy/create the worker when the CPU goes down/up.
892  *
893  * Return:
894  * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
895  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
896  * when the worker was SIGKILLed.
897  */
898 struct kthread_worker *
899 kthread_create_worker_on_cpu(int cpu, unsigned int flags,
900 			     const char namefmt[], ...)
901 {
902 	struct kthread_worker *worker;
903 	va_list args;
904 
905 	va_start(args, namefmt);
906 	worker = __kthread_create_worker(cpu, flags, namefmt, args);
907 	va_end(args);
908 
909 	return worker;
910 }
911 EXPORT_SYMBOL(kthread_create_worker_on_cpu);
912 
913 /*
914  * Returns true when the work could not be queued at the moment.
915  * It happens when it is already pending in a worker list
916  * or when it is being cancelled.
917  */
918 static inline bool queuing_blocked(struct kthread_worker *worker,
919 				   struct kthread_work *work)
920 {
921 	lockdep_assert_held(&worker->lock);
922 
923 	return !list_empty(&work->node) || work->canceling;
924 }
925 
926 static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
927 					     struct kthread_work *work)
928 {
929 	lockdep_assert_held(&worker->lock);
930 	WARN_ON_ONCE(!list_empty(&work->node));
931 	/* Do not use a work with >1 worker, see kthread_queue_work() */
932 	WARN_ON_ONCE(work->worker && work->worker != worker);
933 }
934 
935 /* insert @work before @pos in @worker */
936 static void kthread_insert_work(struct kthread_worker *worker,
937 				struct kthread_work *work,
938 				struct list_head *pos)
939 {
940 	kthread_insert_work_sanity_check(worker, work);
941 
942 	trace_sched_kthread_work_queue_work(worker, work);
943 
944 	list_add_tail(&work->node, pos);
945 	work->worker = worker;
946 	if (!worker->current_work && likely(worker->task))
947 		wake_up_process(worker->task);
948 }
949 
950 /**
951  * kthread_queue_work - queue a kthread_work
952  * @worker: target kthread_worker
953  * @work: kthread_work to queue
954  *
955  * Queue @work to work processor @task for async execution.  @task
956  * must have been created with kthread_worker_create().  Returns %true
957  * if @work was successfully queued, %false if it was already pending.
958  *
959  * Reinitialize the work if it needs to be used by another worker.
960  * For example, when the worker was stopped and started again.
961  */
962 bool kthread_queue_work(struct kthread_worker *worker,
963 			struct kthread_work *work)
964 {
965 	bool ret = false;
966 	unsigned long flags;
967 
968 	raw_spin_lock_irqsave(&worker->lock, flags);
969 	if (!queuing_blocked(worker, work)) {
970 		kthread_insert_work(worker, work, &worker->work_list);
971 		ret = true;
972 	}
973 	raw_spin_unlock_irqrestore(&worker->lock, flags);
974 	return ret;
975 }
976 EXPORT_SYMBOL_GPL(kthread_queue_work);
977 
978 /**
979  * kthread_delayed_work_timer_fn - callback that queues the associated kthread
980  *	delayed work when the timer expires.
981  * @t: pointer to the expired timer
982  *
983  * The format of the function is defined by struct timer_list.
984  * It should have been called from irqsafe timer with irq already off.
985  */
986 void kthread_delayed_work_timer_fn(struct timer_list *t)
987 {
988 	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
989 	struct kthread_work *work = &dwork->work;
990 	struct kthread_worker *worker = work->worker;
991 	unsigned long flags;
992 
993 	/*
994 	 * This might happen when a pending work is reinitialized.
995 	 * It means that it is used a wrong way.
996 	 */
997 	if (WARN_ON_ONCE(!worker))
998 		return;
999 
1000 	raw_spin_lock_irqsave(&worker->lock, flags);
1001 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1002 	WARN_ON_ONCE(work->worker != worker);
1003 
1004 	/* Move the work from worker->delayed_work_list. */
1005 	WARN_ON_ONCE(list_empty(&work->node));
1006 	list_del_init(&work->node);
1007 	if (!work->canceling)
1008 		kthread_insert_work(worker, work, &worker->work_list);
1009 
1010 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1011 }
1012 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
1013 
1014 static void __kthread_queue_delayed_work(struct kthread_worker *worker,
1015 					 struct kthread_delayed_work *dwork,
1016 					 unsigned long delay)
1017 {
1018 	struct timer_list *timer = &dwork->timer;
1019 	struct kthread_work *work = &dwork->work;
1020 
1021 	WARN_ON_FUNCTION_MISMATCH(timer->function,
1022 				  kthread_delayed_work_timer_fn);
1023 
1024 	/*
1025 	 * If @delay is 0, queue @dwork->work immediately.  This is for
1026 	 * both optimization and correctness.  The earliest @timer can
1027 	 * expire is on the closest next tick and delayed_work users depend
1028 	 * on that there's no such delay when @delay is 0.
1029 	 */
1030 	if (!delay) {
1031 		kthread_insert_work(worker, work, &worker->work_list);
1032 		return;
1033 	}
1034 
1035 	/* Be paranoid and try to detect possible races already now. */
1036 	kthread_insert_work_sanity_check(worker, work);
1037 
1038 	list_add(&work->node, &worker->delayed_work_list);
1039 	work->worker = worker;
1040 	timer->expires = jiffies + delay;
1041 	add_timer(timer);
1042 }
1043 
1044 /**
1045  * kthread_queue_delayed_work - queue the associated kthread work
1046  *	after a delay.
1047  * @worker: target kthread_worker
1048  * @dwork: kthread_delayed_work to queue
1049  * @delay: number of jiffies to wait before queuing
1050  *
1051  * If the work has not been pending it starts a timer that will queue
1052  * the work after the given @delay. If @delay is zero, it queues the
1053  * work immediately.
1054  *
1055  * Return: %false if the @work has already been pending. It means that
1056  * either the timer was running or the work was queued. It returns %true
1057  * otherwise.
1058  */
1059 bool kthread_queue_delayed_work(struct kthread_worker *worker,
1060 				struct kthread_delayed_work *dwork,
1061 				unsigned long delay)
1062 {
1063 	struct kthread_work *work = &dwork->work;
1064 	unsigned long flags;
1065 	bool ret = false;
1066 
1067 	raw_spin_lock_irqsave(&worker->lock, flags);
1068 
1069 	if (!queuing_blocked(worker, work)) {
1070 		__kthread_queue_delayed_work(worker, dwork, delay);
1071 		ret = true;
1072 	}
1073 
1074 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1075 	return ret;
1076 }
1077 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1078 
1079 struct kthread_flush_work {
1080 	struct kthread_work	work;
1081 	struct completion	done;
1082 };
1083 
1084 static void kthread_flush_work_fn(struct kthread_work *work)
1085 {
1086 	struct kthread_flush_work *fwork =
1087 		container_of(work, struct kthread_flush_work, work);
1088 	complete(&fwork->done);
1089 }
1090 
1091 /**
1092  * kthread_flush_work - flush a kthread_work
1093  * @work: work to flush
1094  *
1095  * If @work is queued or executing, wait for it to finish execution.
1096  */
1097 void kthread_flush_work(struct kthread_work *work)
1098 {
1099 	struct kthread_flush_work fwork = {
1100 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1101 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1102 	};
1103 	struct kthread_worker *worker;
1104 	bool noop = false;
1105 
1106 	worker = work->worker;
1107 	if (!worker)
1108 		return;
1109 
1110 	raw_spin_lock_irq(&worker->lock);
1111 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1112 	WARN_ON_ONCE(work->worker != worker);
1113 
1114 	if (!list_empty(&work->node))
1115 		kthread_insert_work(worker, &fwork.work, work->node.next);
1116 	else if (worker->current_work == work)
1117 		kthread_insert_work(worker, &fwork.work,
1118 				    worker->work_list.next);
1119 	else
1120 		noop = true;
1121 
1122 	raw_spin_unlock_irq(&worker->lock);
1123 
1124 	if (!noop)
1125 		wait_for_completion(&fwork.done);
1126 }
1127 EXPORT_SYMBOL_GPL(kthread_flush_work);
1128 
1129 /*
1130  * Make sure that the timer is neither set nor running and could
1131  * not manipulate the work list_head any longer.
1132  *
1133  * The function is called under worker->lock. The lock is temporary
1134  * released but the timer can't be set again in the meantime.
1135  */
1136 static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1137 					      unsigned long *flags)
1138 {
1139 	struct kthread_delayed_work *dwork =
1140 		container_of(work, struct kthread_delayed_work, work);
1141 	struct kthread_worker *worker = work->worker;
1142 
1143 	/*
1144 	 * del_timer_sync() must be called to make sure that the timer
1145 	 * callback is not running. The lock must be temporary released
1146 	 * to avoid a deadlock with the callback. In the meantime,
1147 	 * any queuing is blocked by setting the canceling counter.
1148 	 */
1149 	work->canceling++;
1150 	raw_spin_unlock_irqrestore(&worker->lock, *flags);
1151 	del_timer_sync(&dwork->timer);
1152 	raw_spin_lock_irqsave(&worker->lock, *flags);
1153 	work->canceling--;
1154 }
1155 
1156 /*
1157  * This function removes the work from the worker queue.
1158  *
1159  * It is called under worker->lock. The caller must make sure that
1160  * the timer used by delayed work is not running, e.g. by calling
1161  * kthread_cancel_delayed_work_timer().
1162  *
1163  * The work might still be in use when this function finishes. See the
1164  * current_work proceed by the worker.
1165  *
1166  * Return: %true if @work was pending and successfully canceled,
1167  *	%false if @work was not pending
1168  */
1169 static bool __kthread_cancel_work(struct kthread_work *work)
1170 {
1171 	/*
1172 	 * Try to remove the work from a worker list. It might either
1173 	 * be from worker->work_list or from worker->delayed_work_list.
1174 	 */
1175 	if (!list_empty(&work->node)) {
1176 		list_del_init(&work->node);
1177 		return true;
1178 	}
1179 
1180 	return false;
1181 }
1182 
1183 /**
1184  * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1185  * @worker: kthread worker to use
1186  * @dwork: kthread delayed work to queue
1187  * @delay: number of jiffies to wait before queuing
1188  *
1189  * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1190  * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1191  * @work is guaranteed to be queued immediately.
1192  *
1193  * Return: %false if @dwork was idle and queued, %true otherwise.
1194  *
1195  * A special case is when the work is being canceled in parallel.
1196  * It might be caused either by the real kthread_cancel_delayed_work_sync()
1197  * or yet another kthread_mod_delayed_work() call. We let the other command
1198  * win and return %true here. The return value can be used for reference
1199  * counting and the number of queued works stays the same. Anyway, the caller
1200  * is supposed to synchronize these operations a reasonable way.
1201  *
1202  * This function is safe to call from any context including IRQ handler.
1203  * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1204  * for details.
1205  */
1206 bool kthread_mod_delayed_work(struct kthread_worker *worker,
1207 			      struct kthread_delayed_work *dwork,
1208 			      unsigned long delay)
1209 {
1210 	struct kthread_work *work = &dwork->work;
1211 	unsigned long flags;
1212 	int ret;
1213 
1214 	raw_spin_lock_irqsave(&worker->lock, flags);
1215 
1216 	/* Do not bother with canceling when never queued. */
1217 	if (!work->worker) {
1218 		ret = false;
1219 		goto fast_queue;
1220 	}
1221 
1222 	/* Work must not be used with >1 worker, see kthread_queue_work() */
1223 	WARN_ON_ONCE(work->worker != worker);
1224 
1225 	/*
1226 	 * Temporary cancel the work but do not fight with another command
1227 	 * that is canceling the work as well.
1228 	 *
1229 	 * It is a bit tricky because of possible races with another
1230 	 * mod_delayed_work() and cancel_delayed_work() callers.
1231 	 *
1232 	 * The timer must be canceled first because worker->lock is released
1233 	 * when doing so. But the work can be removed from the queue (list)
1234 	 * only when it can be queued again so that the return value can
1235 	 * be used for reference counting.
1236 	 */
1237 	kthread_cancel_delayed_work_timer(work, &flags);
1238 	if (work->canceling) {
1239 		/* The number of works in the queue does not change. */
1240 		ret = true;
1241 		goto out;
1242 	}
1243 	ret = __kthread_cancel_work(work);
1244 
1245 fast_queue:
1246 	__kthread_queue_delayed_work(worker, dwork, delay);
1247 out:
1248 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1249 	return ret;
1250 }
1251 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1252 
1253 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1254 {
1255 	struct kthread_worker *worker = work->worker;
1256 	unsigned long flags;
1257 	int ret = false;
1258 
1259 	if (!worker)
1260 		goto out;
1261 
1262 	raw_spin_lock_irqsave(&worker->lock, flags);
1263 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1264 	WARN_ON_ONCE(work->worker != worker);
1265 
1266 	if (is_dwork)
1267 		kthread_cancel_delayed_work_timer(work, &flags);
1268 
1269 	ret = __kthread_cancel_work(work);
1270 
1271 	if (worker->current_work != work)
1272 		goto out_fast;
1273 
1274 	/*
1275 	 * The work is in progress and we need to wait with the lock released.
1276 	 * In the meantime, block any queuing by setting the canceling counter.
1277 	 */
1278 	work->canceling++;
1279 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1280 	kthread_flush_work(work);
1281 	raw_spin_lock_irqsave(&worker->lock, flags);
1282 	work->canceling--;
1283 
1284 out_fast:
1285 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1286 out:
1287 	return ret;
1288 }
1289 
1290 /**
1291  * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1292  * @work: the kthread work to cancel
1293  *
1294  * Cancel @work and wait for its execution to finish.  This function
1295  * can be used even if the work re-queues itself. On return from this
1296  * function, @work is guaranteed to be not pending or executing on any CPU.
1297  *
1298  * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1299  * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1300  *
1301  * The caller must ensure that the worker on which @work was last
1302  * queued can't be destroyed before this function returns.
1303  *
1304  * Return: %true if @work was pending, %false otherwise.
1305  */
1306 bool kthread_cancel_work_sync(struct kthread_work *work)
1307 {
1308 	return __kthread_cancel_work_sync(work, false);
1309 }
1310 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1311 
1312 /**
1313  * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1314  *	wait for it to finish.
1315  * @dwork: the kthread delayed work to cancel
1316  *
1317  * This is kthread_cancel_work_sync() for delayed works.
1318  *
1319  * Return: %true if @dwork was pending, %false otherwise.
1320  */
1321 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1322 {
1323 	return __kthread_cancel_work_sync(&dwork->work, true);
1324 }
1325 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1326 
1327 /**
1328  * kthread_flush_worker - flush all current works on a kthread_worker
1329  * @worker: worker to flush
1330  *
1331  * Wait until all currently executing or pending works on @worker are
1332  * finished.
1333  */
1334 void kthread_flush_worker(struct kthread_worker *worker)
1335 {
1336 	struct kthread_flush_work fwork = {
1337 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1338 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1339 	};
1340 
1341 	kthread_queue_work(worker, &fwork.work);
1342 	wait_for_completion(&fwork.done);
1343 }
1344 EXPORT_SYMBOL_GPL(kthread_flush_worker);
1345 
1346 /**
1347  * kthread_destroy_worker - destroy a kthread worker
1348  * @worker: worker to be destroyed
1349  *
1350  * Flush and destroy @worker.  The simple flush is enough because the kthread
1351  * worker API is used only in trivial scenarios.  There are no multi-step state
1352  * machines needed.
1353  */
1354 void kthread_destroy_worker(struct kthread_worker *worker)
1355 {
1356 	struct task_struct *task;
1357 
1358 	task = worker->task;
1359 	if (WARN_ON(!task))
1360 		return;
1361 
1362 	kthread_flush_worker(worker);
1363 	kthread_stop(task);
1364 	WARN_ON(!list_empty(&worker->work_list));
1365 	kfree(worker);
1366 }
1367 EXPORT_SYMBOL(kthread_destroy_worker);
1368 
1369 /**
1370  * kthread_use_mm - make the calling kthread operate on an address space
1371  * @mm: address space to operate on
1372  */
1373 void kthread_use_mm(struct mm_struct *mm)
1374 {
1375 	struct mm_struct *active_mm;
1376 	struct task_struct *tsk = current;
1377 
1378 	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1379 	WARN_ON_ONCE(tsk->mm);
1380 
1381 	task_lock(tsk);
1382 	/* Hold off tlb flush IPIs while switching mm's */
1383 	local_irq_disable();
1384 	active_mm = tsk->active_mm;
1385 	if (active_mm != mm) {
1386 		mmgrab(mm);
1387 		tsk->active_mm = mm;
1388 	}
1389 	tsk->mm = mm;
1390 	membarrier_update_current_mm(mm);
1391 	switch_mm_irqs_off(active_mm, mm, tsk);
1392 	local_irq_enable();
1393 	task_unlock(tsk);
1394 #ifdef finish_arch_post_lock_switch
1395 	finish_arch_post_lock_switch();
1396 #endif
1397 
1398 	/*
1399 	 * When a kthread starts operating on an address space, the loop
1400 	 * in membarrier_{private,global}_expedited() may not observe
1401 	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1402 	 * memory barrier after storing to tsk->mm, before accessing
1403 	 * user-space memory. A full memory barrier for membarrier
1404 	 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1405 	 * mmdrop(), or explicitly with smp_mb().
1406 	 */
1407 	if (active_mm != mm)
1408 		mmdrop(active_mm);
1409 	else
1410 		smp_mb();
1411 
1412 	to_kthread(tsk)->oldfs = force_uaccess_begin();
1413 }
1414 EXPORT_SYMBOL_GPL(kthread_use_mm);
1415 
1416 /**
1417  * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1418  * @mm: address space to operate on
1419  */
1420 void kthread_unuse_mm(struct mm_struct *mm)
1421 {
1422 	struct task_struct *tsk = current;
1423 
1424 	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1425 	WARN_ON_ONCE(!tsk->mm);
1426 
1427 	force_uaccess_end(to_kthread(tsk)->oldfs);
1428 
1429 	task_lock(tsk);
1430 	/*
1431 	 * When a kthread stops operating on an address space, the loop
1432 	 * in membarrier_{private,global}_expedited() may not observe
1433 	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1434 	 * memory barrier after accessing user-space memory, before
1435 	 * clearing tsk->mm.
1436 	 */
1437 	smp_mb__after_spinlock();
1438 	sync_mm_rss(mm);
1439 	local_irq_disable();
1440 	tsk->mm = NULL;
1441 	membarrier_update_current_mm(NULL);
1442 	/* active_mm is still 'mm' */
1443 	enter_lazy_tlb(mm, tsk);
1444 	local_irq_enable();
1445 	task_unlock(tsk);
1446 }
1447 EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1448 
1449 #ifdef CONFIG_BLK_CGROUP
1450 /**
1451  * kthread_associate_blkcg - associate blkcg to current kthread
1452  * @css: the cgroup info
1453  *
1454  * Current thread must be a kthread. The thread is running jobs on behalf of
1455  * other threads. In some cases, we expect the jobs attach cgroup info of
1456  * original threads instead of that of current thread. This function stores
1457  * original thread's cgroup info in current kthread context for later
1458  * retrieval.
1459  */
1460 void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1461 {
1462 	struct kthread *kthread;
1463 
1464 	if (!(current->flags & PF_KTHREAD))
1465 		return;
1466 	kthread = to_kthread(current);
1467 	if (!kthread)
1468 		return;
1469 
1470 	if (kthread->blkcg_css) {
1471 		css_put(kthread->blkcg_css);
1472 		kthread->blkcg_css = NULL;
1473 	}
1474 	if (css) {
1475 		css_get(css);
1476 		kthread->blkcg_css = css;
1477 	}
1478 }
1479 EXPORT_SYMBOL(kthread_associate_blkcg);
1480 
1481 /**
1482  * kthread_blkcg - get associated blkcg css of current kthread
1483  *
1484  * Current thread must be a kthread.
1485  */
1486 struct cgroup_subsys_state *kthread_blkcg(void)
1487 {
1488 	struct kthread *kthread;
1489 
1490 	if (current->flags & PF_KTHREAD) {
1491 		kthread = to_kthread(current);
1492 		if (kthread)
1493 			return kthread->blkcg_css;
1494 	}
1495 	return NULL;
1496 }
1497 EXPORT_SYMBOL(kthread_blkcg);
1498 #endif
1499