xref: /openbmc/linux/kernel/kthread.c (revision be33db21)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /* Kernel thread helper functions.
31da177e4SLinus Torvalds  *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
49bf5b9ebSChristoph Hellwig  *   Copyright (C) 2009 Red Hat, Inc.
51da177e4SLinus Torvalds  *
673c27992SEric W. Biederman  * Creation is done via kthreadd, so that we get a clean environment
71da177e4SLinus Torvalds  * even if we're invoked from userspace (think modprobe, hotplug cpu,
81da177e4SLinus Torvalds  * etc.).
91da177e4SLinus Torvalds  */
10ae7e81c0SIngo Molnar #include <uapi/linux/sched/types.h>
119bf5b9ebSChristoph Hellwig #include <linux/mm.h>
129bf5b9ebSChristoph Hellwig #include <linux/mmu_context.h>
131da177e4SLinus Torvalds #include <linux/sched.h>
149bf5b9ebSChristoph Hellwig #include <linux/sched/mm.h>
1529930025SIngo Molnar #include <linux/sched/task.h>
161da177e4SLinus Torvalds #include <linux/kthread.h>
171da177e4SLinus Torvalds #include <linux/completion.h>
181da177e4SLinus Torvalds #include <linux/err.h>
198af0c18aSSuren Baghdasaryan #include <linux/cgroup.h>
2058568d2aSMiao Xie #include <linux/cpuset.h>
211da177e4SLinus Torvalds #include <linux/unistd.h>
221da177e4SLinus Torvalds #include <linux/file.h>
239984de1aSPaul Gortmaker #include <linux/export.h>
2497d1f15bSArjan van de Ven #include <linux/mutex.h>
25b56c0d89STejun Heo #include <linux/slab.h>
26b56c0d89STejun Heo #include <linux/freezer.h>
27a74fb73cSAl Viro #include <linux/ptrace.h>
28cd42d559STejun Heo #include <linux/uaccess.h>
2998fa15f3SAnshuman Khandual #include <linux/numa.h>
309cc5b865SMarcelo Tosatti #include <linux/sched/isolation.h>
31ad8d75ffSSteven Rostedt #include <trace/events/sched.h>
321da177e4SLinus Torvalds 
339bf5b9ebSChristoph Hellwig 
3473c27992SEric W. Biederman static DEFINE_SPINLOCK(kthread_create_lock);
3573c27992SEric W. Biederman static LIST_HEAD(kthread_create_list);
3673c27992SEric W. Biederman struct task_struct *kthreadd_task;
371da177e4SLinus Torvalds 
381da177e4SLinus Torvalds struct kthread_create_info
391da177e4SLinus Torvalds {
4073c27992SEric W. Biederman 	/* Information passed to kthread() from kthreadd. */
4173e0c116SMike Christie 	char *full_name;
421da177e4SLinus Torvalds 	int (*threadfn)(void *data);
431da177e4SLinus Torvalds 	void *data;
44207205a2SEric Dumazet 	int node;
451da177e4SLinus Torvalds 
4673c27992SEric W. Biederman 	/* Result passed back to kthread_create() from kthreadd. */
471da177e4SLinus Torvalds 	struct task_struct *result;
48786235eeSTetsuo Handa 	struct completion *done;
4965f27f38SDavid Howells 
5073c27992SEric W. Biederman 	struct list_head list;
511da177e4SLinus Torvalds };
521da177e4SLinus Torvalds 
5363706172SOleg Nesterov struct kthread {
542a1d4460SThomas Gleixner 	unsigned long flags;
552a1d4460SThomas Gleixner 	unsigned int cpu;
566b124879SEric W. Biederman 	int result;
5752782c92SJ. Bruce Fields 	int (*threadfn)(void *);
5882805ab7STejun Heo 	void *data;
592a1d4460SThomas Gleixner 	struct completion parked;
6063706172SOleg Nesterov 	struct completion exited;
610b508bc9SShaohua Li #ifdef CONFIG_BLK_CGROUP
6205e3db95SShaohua Li 	struct cgroup_subsys_state *blkcg_css;
6305e3db95SShaohua Li #endif
64d6986ce2SYafang Shao 	/* To store the full name if task comm is truncated. */
65d6986ce2SYafang Shao 	char *full_name;
661da177e4SLinus Torvalds };
671da177e4SLinus Torvalds 
682a1d4460SThomas Gleixner enum KTHREAD_BITS {
692a1d4460SThomas Gleixner 	KTHREAD_IS_PER_CPU = 0,
702a1d4460SThomas Gleixner 	KTHREAD_SHOULD_STOP,
712a1d4460SThomas Gleixner 	KTHREAD_SHOULD_PARK,
722a1d4460SThomas Gleixner };
732a1d4460SThomas Gleixner 
to_kthread(struct task_struct * k)744ecdafc8SOleg Nesterov static inline struct kthread *to_kthread(struct task_struct *k)
754ecdafc8SOleg Nesterov {
761da5c46fSOleg Nesterov 	WARN_ON(!(k->flags & PF_KTHREAD));
77e32cf5dfSEric W. Biederman 	return k->worker_private;
784ecdafc8SOleg Nesterov }
794ecdafc8SOleg Nesterov 
803a7956e2SPeter Zijlstra /*
813a7956e2SPeter Zijlstra  * Variant of to_kthread() that doesn't assume @p is a kthread.
823a7956e2SPeter Zijlstra  *
833a7956e2SPeter Zijlstra  * Per construction; when:
843a7956e2SPeter Zijlstra  *
85e32cf5dfSEric W. Biederman  *   (p->flags & PF_KTHREAD) && p->worker_private
863a7956e2SPeter Zijlstra  *
873a7956e2SPeter Zijlstra  * the task is both a kthread and struct kthread is persistent. However
883a7956e2SPeter Zijlstra  * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
893a7956e2SPeter Zijlstra  * begin_new_exec()).
903a7956e2SPeter Zijlstra  */
__to_kthread(struct task_struct * p)913a7956e2SPeter Zijlstra static inline struct kthread *__to_kthread(struct task_struct *p)
923a7956e2SPeter Zijlstra {
93e32cf5dfSEric W. Biederman 	void *kthread = p->worker_private;
943a7956e2SPeter Zijlstra 	if (kthread && !(p->flags & PF_KTHREAD))
953a7956e2SPeter Zijlstra 		kthread = NULL;
963a7956e2SPeter Zijlstra 	return kthread;
973a7956e2SPeter Zijlstra }
983a7956e2SPeter Zijlstra 
get_kthread_comm(char * buf,size_t buf_size,struct task_struct * tsk)99d6986ce2SYafang Shao void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
100d6986ce2SYafang Shao {
101d6986ce2SYafang Shao 	struct kthread *kthread = to_kthread(tsk);
102d6986ce2SYafang Shao 
103d6986ce2SYafang Shao 	if (!kthread || !kthread->full_name) {
104d6986ce2SYafang Shao 		__get_task_comm(buf, buf_size, tsk);
105d6986ce2SYafang Shao 		return;
106d6986ce2SYafang Shao 	}
107d6986ce2SYafang Shao 
108d6986ce2SYafang Shao 	strscpy_pad(buf, kthread->full_name, buf_size);
109d6986ce2SYafang Shao }
110d6986ce2SYafang Shao 
set_kthread_struct(struct task_struct * p)11140966e31SEric W. Biederman bool set_kthread_struct(struct task_struct *p)
11200b89fe0SValentin Schneider {
11300b89fe0SValentin Schneider 	struct kthread *kthread;
11400b89fe0SValentin Schneider 
11540966e31SEric W. Biederman 	if (WARN_ON_ONCE(to_kthread(p)))
11640966e31SEric W. Biederman 		return false;
11700b89fe0SValentin Schneider 
11800b89fe0SValentin Schneider 	kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
11940966e31SEric W. Biederman 	if (!kthread)
12040966e31SEric W. Biederman 		return false;
12140966e31SEric W. Biederman 
12240966e31SEric W. Biederman 	init_completion(&kthread->exited);
12340966e31SEric W. Biederman 	init_completion(&kthread->parked);
12440966e31SEric W. Biederman 	p->vfork_done = &kthread->exited;
12540966e31SEric W. Biederman 
126e32cf5dfSEric W. Biederman 	p->worker_private = kthread;
12740966e31SEric W. Biederman 	return true;
12800b89fe0SValentin Schneider }
12900b89fe0SValentin Schneider 
free_kthread_struct(struct task_struct * k)1301da5c46fSOleg Nesterov void free_kthread_struct(struct task_struct *k)
1311da5c46fSOleg Nesterov {
13205e3db95SShaohua Li 	struct kthread *kthread;
13305e3db95SShaohua Li 
1341da5c46fSOleg Nesterov 	/*
13540966e31SEric W. Biederman 	 * Can be NULL if kmalloc() in set_kthread_struct() failed.
1361da5c46fSOleg Nesterov 	 */
13705e3db95SShaohua Li 	kthread = to_kthread(k);
138d6986ce2SYafang Shao 	if (!kthread)
139d6986ce2SYafang Shao 		return;
140d6986ce2SYafang Shao 
1410b508bc9SShaohua Li #ifdef CONFIG_BLK_CGROUP
142d6986ce2SYafang Shao 	WARN_ON_ONCE(kthread->blkcg_css);
14305e3db95SShaohua Li #endif
144e32cf5dfSEric W. Biederman 	k->worker_private = NULL;
145d6986ce2SYafang Shao 	kfree(kthread->full_name);
14605e3db95SShaohua Li 	kfree(kthread);
1471da5c46fSOleg Nesterov }
1481da5c46fSOleg Nesterov 
1499e37bd30SRandy Dunlap /**
1509e37bd30SRandy Dunlap  * kthread_should_stop - should this kthread return now?
1519e37bd30SRandy Dunlap  *
15272fd4a35SRobert P. J. Day  * When someone calls kthread_stop() on your kthread, it will be woken
1539e37bd30SRandy Dunlap  * and this will return true.  You should then return, and your return
1549e37bd30SRandy Dunlap  * value will be passed through to kthread_stop().
1559e37bd30SRandy Dunlap  */
kthread_should_stop(void)1562a1d4460SThomas Gleixner bool kthread_should_stop(void)
1571da177e4SLinus Torvalds {
1582a1d4460SThomas Gleixner 	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
1591da177e4SLinus Torvalds }
1601da177e4SLinus Torvalds EXPORT_SYMBOL(kthread_should_stop);
1611da177e4SLinus Torvalds 
__kthread_should_park(struct task_struct * k)162*be33db21SGreg Kroah-Hartman static bool __kthread_should_park(struct task_struct *k)
1630121805dSMatthias Kaehlcke {
1640121805dSMatthias Kaehlcke 	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
1650121805dSMatthias Kaehlcke }
1660121805dSMatthias Kaehlcke 
16782805ab7STejun Heo /**
1682a1d4460SThomas Gleixner  * kthread_should_park - should this kthread park now?
1692a1d4460SThomas Gleixner  *
1702a1d4460SThomas Gleixner  * When someone calls kthread_park() on your kthread, it will be woken
1712a1d4460SThomas Gleixner  * and this will return true.  You should then do the necessary
1722a1d4460SThomas Gleixner  * cleanup and call kthread_parkme()
1732a1d4460SThomas Gleixner  *
1742a1d4460SThomas Gleixner  * Similar to kthread_should_stop(), but this keeps the thread alive
1752a1d4460SThomas Gleixner  * and in a park position. kthread_unpark() "restarts" the thread and
1762a1d4460SThomas Gleixner  * calls the thread function again.
1772a1d4460SThomas Gleixner  */
kthread_should_park(void)1782a1d4460SThomas Gleixner bool kthread_should_park(void)
1792a1d4460SThomas Gleixner {
1800121805dSMatthias Kaehlcke 	return __kthread_should_park(current);
1812a1d4460SThomas Gleixner }
18218896451SDavid Kershner EXPORT_SYMBOL_GPL(kthread_should_park);
1832a1d4460SThomas Gleixner 
kthread_should_stop_or_park(void)184ef73d6a4SArve Hjønnevåg bool kthread_should_stop_or_park(void)
185ef73d6a4SArve Hjønnevåg {
186ef73d6a4SArve Hjønnevåg 	struct kthread *kthread = __to_kthread(current);
187ef73d6a4SArve Hjønnevåg 
188ef73d6a4SArve Hjønnevåg 	if (!kthread)
189ef73d6a4SArve Hjønnevåg 		return false;
190ef73d6a4SArve Hjønnevåg 
191ef73d6a4SArve Hjønnevåg 	return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK));
192ef73d6a4SArve Hjønnevåg }
193ef73d6a4SArve Hjønnevåg 
1942a1d4460SThomas Gleixner /**
1958a32c441STejun Heo  * kthread_freezable_should_stop - should this freezable kthread return now?
1968a32c441STejun Heo  * @was_frozen: optional out parameter, indicates whether %current was frozen
1978a32c441STejun Heo  *
1988a32c441STejun Heo  * kthread_should_stop() for freezable kthreads, which will enter
1998a32c441STejun Heo  * refrigerator if necessary.  This function is safe from kthread_stop() /
2008a32c441STejun Heo  * freezer deadlock and freezable kthreads should use this function instead
2018a32c441STejun Heo  * of calling try_to_freeze() directly.
2028a32c441STejun Heo  */
kthread_freezable_should_stop(bool * was_frozen)2038a32c441STejun Heo bool kthread_freezable_should_stop(bool *was_frozen)
2048a32c441STejun Heo {
2058a32c441STejun Heo 	bool frozen = false;
2068a32c441STejun Heo 
2078a32c441STejun Heo 	might_sleep();
2088a32c441STejun Heo 
2098a32c441STejun Heo 	if (unlikely(freezing(current)))
2108a32c441STejun Heo 		frozen = __refrigerator(true);
2118a32c441STejun Heo 
2128a32c441STejun Heo 	if (was_frozen)
2138a32c441STejun Heo 		*was_frozen = frozen;
2148a32c441STejun Heo 
2158a32c441STejun Heo 	return kthread_should_stop();
2168a32c441STejun Heo }
2178a32c441STejun Heo EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
2188a32c441STejun Heo 
2198a32c441STejun Heo /**
22052782c92SJ. Bruce Fields  * kthread_func - return the function specified on kthread creation
22152782c92SJ. Bruce Fields  * @task: kthread task in question
22252782c92SJ. Bruce Fields  *
22352782c92SJ. Bruce Fields  * Returns NULL if the task is not a kthread.
22452782c92SJ. Bruce Fields  */
kthread_func(struct task_struct * task)22552782c92SJ. Bruce Fields void *kthread_func(struct task_struct *task)
22652782c92SJ. Bruce Fields {
2273a7956e2SPeter Zijlstra 	struct kthread *kthread = __to_kthread(task);
2283a7956e2SPeter Zijlstra 	if (kthread)
2293a7956e2SPeter Zijlstra 		return kthread->threadfn;
23052782c92SJ. Bruce Fields 	return NULL;
23152782c92SJ. Bruce Fields }
23252782c92SJ. Bruce Fields EXPORT_SYMBOL_GPL(kthread_func);
23352782c92SJ. Bruce Fields 
23452782c92SJ. Bruce Fields /**
23582805ab7STejun Heo  * kthread_data - return data value specified on kthread creation
23682805ab7STejun Heo  * @task: kthread task in question
23782805ab7STejun Heo  *
23882805ab7STejun Heo  * Return the data value specified when kthread @task was created.
23982805ab7STejun Heo  * The caller is responsible for ensuring the validity of @task when
24082805ab7STejun Heo  * calling this function.
24182805ab7STejun Heo  */
kthread_data(struct task_struct * task)24282805ab7STejun Heo void *kthread_data(struct task_struct *task)
24382805ab7STejun Heo {
24482805ab7STejun Heo 	return to_kthread(task)->data;
24582805ab7STejun Heo }
24652782c92SJ. Bruce Fields EXPORT_SYMBOL_GPL(kthread_data);
24782805ab7STejun Heo 
248cd42d559STejun Heo /**
249e700591aSPetr Mladek  * kthread_probe_data - speculative version of kthread_data()
250cd42d559STejun Heo  * @task: possible kthread task in question
251cd42d559STejun Heo  *
252cd42d559STejun Heo  * @task could be a kthread task.  Return the data value specified when it
253cd42d559STejun Heo  * was created if accessible.  If @task isn't a kthread task or its data is
254cd42d559STejun Heo  * inaccessible for any reason, %NULL is returned.  This function requires
255cd42d559STejun Heo  * that @task itself is safe to dereference.
256cd42d559STejun Heo  */
kthread_probe_data(struct task_struct * task)257e700591aSPetr Mladek void *kthread_probe_data(struct task_struct *task)
258cd42d559STejun Heo {
2593a7956e2SPeter Zijlstra 	struct kthread *kthread = __to_kthread(task);
260cd42d559STejun Heo 	void *data = NULL;
261cd42d559STejun Heo 
2623a7956e2SPeter Zijlstra 	if (kthread)
263fe557319SChristoph Hellwig 		copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
264cd42d559STejun Heo 	return data;
265cd42d559STejun Heo }
266cd42d559STejun Heo 
__kthread_parkme(struct kthread * self)2672a1d4460SThomas Gleixner static void __kthread_parkme(struct kthread *self)
2682a1d4460SThomas Gleixner {
269741a76b3SPeter Zijlstra 	for (;;) {
2701cef1150SPeter Zijlstra 		/*
2711cef1150SPeter Zijlstra 		 * TASK_PARKED is a special state; we must serialize against
2721cef1150SPeter Zijlstra 		 * possible pending wakeups to avoid store-store collisions on
2731cef1150SPeter Zijlstra 		 * task->state.
2741cef1150SPeter Zijlstra 		 *
2751cef1150SPeter Zijlstra 		 * Such a collision might possibly result in the task state
2761cef1150SPeter Zijlstra 		 * changin from TASK_PARKED and us failing the
2771cef1150SPeter Zijlstra 		 * wait_task_inactive() in kthread_park().
2781cef1150SPeter Zijlstra 		 */
2791cef1150SPeter Zijlstra 		set_special_state(TASK_PARKED);
280741a76b3SPeter Zijlstra 		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
281741a76b3SPeter Zijlstra 			break;
2821cef1150SPeter Zijlstra 
28326c7295bSLiang Chen 		/*
28426c7295bSLiang Chen 		 * Thread is going to call schedule(), do not preempt it,
28526c7295bSLiang Chen 		 * or the caller of kthread_park() may spend more time in
28626c7295bSLiang Chen 		 * wait_task_inactive().
28726c7295bSLiang Chen 		 */
28826c7295bSLiang Chen 		preempt_disable();
289f83ee19bSPeter Zijlstra 		complete(&self->parked);
29026c7295bSLiang Chen 		schedule_preempt_disabled();
29126c7295bSLiang Chen 		preempt_enable();
2922a1d4460SThomas Gleixner 	}
2932a1d4460SThomas Gleixner 	__set_current_state(TASK_RUNNING);
2942a1d4460SThomas Gleixner }
2952a1d4460SThomas Gleixner 
kthread_parkme(void)2962a1d4460SThomas Gleixner void kthread_parkme(void)
2972a1d4460SThomas Gleixner {
2982a1d4460SThomas Gleixner 	__kthread_parkme(to_kthread(current));
2992a1d4460SThomas Gleixner }
30018896451SDavid Kershner EXPORT_SYMBOL_GPL(kthread_parkme);
3012a1d4460SThomas Gleixner 
302bbda86e9SEric W. Biederman /**
303bbda86e9SEric W. Biederman  * kthread_exit - Cause the current kthread return @result to kthread_stop().
304bbda86e9SEric W. Biederman  * @result: The integer value to return to kthread_stop().
305bbda86e9SEric W. Biederman  *
306bbda86e9SEric W. Biederman  * While kthread_exit can be called directly, it exists so that
307bbda86e9SEric W. Biederman  * functions which do some additional work in non-modular code such as
308bbda86e9SEric W. Biederman  * module_put_and_kthread_exit can be implemented.
309bbda86e9SEric W. Biederman  *
310bbda86e9SEric W. Biederman  * Does not return.
311bbda86e9SEric W. Biederman  */
kthread_exit(long result)312bbda86e9SEric W. Biederman void __noreturn kthread_exit(long result)
313bbda86e9SEric W. Biederman {
3146b124879SEric W. Biederman 	struct kthread *kthread = to_kthread(current);
3156b124879SEric W. Biederman 	kthread->result = result;
3166b124879SEric W. Biederman 	do_exit(0);
317bbda86e9SEric W. Biederman }
318bbda86e9SEric W. Biederman 
319cead1855SEric W. Biederman /**
3205eb6f228SEric W. Biederman  * kthread_complete_and_exit - Exit the current kthread.
321cead1855SEric W. Biederman  * @comp: Completion to complete
322cead1855SEric W. Biederman  * @code: The integer value to return to kthread_stop().
323cead1855SEric W. Biederman  *
3246a25212dSPrathu Baronia  * If present, complete @comp and then return code to kthread_stop().
325cead1855SEric W. Biederman  *
326cead1855SEric W. Biederman  * A kernel thread whose module may be removed after the completion of
3276a25212dSPrathu Baronia  * @comp can use this function to exit safely.
328cead1855SEric W. Biederman  *
329cead1855SEric W. Biederman  * Does not return.
330cead1855SEric W. Biederman  */
kthread_complete_and_exit(struct completion * comp,long code)331cead1855SEric W. Biederman void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
332cead1855SEric W. Biederman {
333cead1855SEric W. Biederman 	if (comp)
334cead1855SEric W. Biederman 		complete(comp);
335cead1855SEric W. Biederman 
336cead1855SEric W. Biederman 	kthread_exit(code);
337cead1855SEric W. Biederman }
338cead1855SEric W. Biederman EXPORT_SYMBOL(kthread_complete_and_exit);
339cead1855SEric W. Biederman 
kthread(void * _create)3401da177e4SLinus Torvalds static int kthread(void *_create)
3411da177e4SLinus Torvalds {
3421a7243caSSebastian Andrzej Siewior 	static const struct sched_param param = { .sched_priority = 0 };
34373c27992SEric W. Biederman 	/* Copy data: it's on kthread's stack */
34463706172SOleg Nesterov 	struct kthread_create_info *create = _create;
34563706172SOleg Nesterov 	int (*threadfn)(void *data) = create->threadfn;
34663706172SOleg Nesterov 	void *data = create->data;
347786235eeSTetsuo Handa 	struct completion *done;
3481da5c46fSOleg Nesterov 	struct kthread *self;
34963706172SOleg Nesterov 	int ret;
35063706172SOleg Nesterov 
35100b89fe0SValentin Schneider 	self = to_kthread(current);
3521da177e4SLinus Torvalds 
353d25c83c6SPetr Mladek 	/* Release the structure when caller killed by a fatal signal. */
354786235eeSTetsuo Handa 	done = xchg(&create->done, NULL);
355786235eeSTetsuo Handa 	if (!done) {
35673e0c116SMike Christie 		kfree(create->full_name);
357786235eeSTetsuo Handa 		kfree(create);
358bbda86e9SEric W. Biederman 		kthread_exit(-EINTR);
3591da5c46fSOleg Nesterov 	}
3601da5c46fSOleg Nesterov 
36173e0c116SMike Christie 	self->full_name = create->full_name;
36252782c92SJ. Bruce Fields 	self->threadfn = threadfn;
3631da5c46fSOleg Nesterov 	self->data = data;
3641da5c46fSOleg Nesterov 
3651a7243caSSebastian Andrzej Siewior 	/*
3661a7243caSSebastian Andrzej Siewior 	 * The new thread inherited kthreadd's priority and CPU mask. Reset
3671a7243caSSebastian Andrzej Siewior 	 * back to default in case they have been changed.
3681a7243caSSebastian Andrzej Siewior 	 */
3691a7243caSSebastian Andrzej Siewior 	sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
37004d4e665SFrederic Weisbecker 	set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
3711a7243caSSebastian Andrzej Siewior 
3721da177e4SLinus Torvalds 	/* OK, tell user we're spawned, wait for stop or wakeup */
373a076e4bcSOleg Nesterov 	__set_current_state(TASK_UNINTERRUPTIBLE);
3743217ab97SVitaliy Gusev 	create->result = current;
37526c7295bSLiang Chen 	/*
37626c7295bSLiang Chen 	 * Thread is going to call schedule(), do not preempt it,
37726c7295bSLiang Chen 	 * or the creator may spend more time in wait_task_inactive().
37826c7295bSLiang Chen 	 */
37926c7295bSLiang Chen 	preempt_disable();
380786235eeSTetsuo Handa 	complete(done);
38126c7295bSLiang Chen 	schedule_preempt_disabled();
38226c7295bSLiang Chen 	preempt_enable();
3831da177e4SLinus Torvalds 
38463706172SOleg Nesterov 	ret = -EINTR;
3851da5c46fSOleg Nesterov 	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
38677f88796STejun Heo 		cgroup_kthread_ready();
3871da5c46fSOleg Nesterov 		__kthread_parkme(self);
3882a1d4460SThomas Gleixner 		ret = threadfn(data);
3892a1d4460SThomas Gleixner 	}
390bbda86e9SEric W. Biederman 	kthread_exit(ret);
3911da177e4SLinus Torvalds }
3921da177e4SLinus Torvalds 
393cb5021caSYanfei Xu /* called from kernel_clone() to get node information for about to be created task */
tsk_fork_get_node(struct task_struct * tsk)394207205a2SEric Dumazet int tsk_fork_get_node(struct task_struct *tsk)
395207205a2SEric Dumazet {
396207205a2SEric Dumazet #ifdef CONFIG_NUMA
397207205a2SEric Dumazet 	if (tsk == kthreadd_task)
398207205a2SEric Dumazet 		return tsk->pref_node_fork;
399207205a2SEric Dumazet #endif
40081c98869SNishanth Aravamudan 	return NUMA_NO_NODE;
401207205a2SEric Dumazet }
402207205a2SEric Dumazet 
create_kthread(struct kthread_create_info * create)40373c27992SEric W. Biederman static void create_kthread(struct kthread_create_info *create)
4041da177e4SLinus Torvalds {
4051da177e4SLinus Torvalds 	int pid;
4061da177e4SLinus Torvalds 
407207205a2SEric Dumazet #ifdef CONFIG_NUMA
408207205a2SEric Dumazet 	current->pref_node_fork = create->node;
409207205a2SEric Dumazet #endif
4101da177e4SLinus Torvalds 	/* We want our own signal handler (we take no signals by default). */
41173e0c116SMike Christie 	pid = kernel_thread(kthread, create, create->full_name,
412cf587db2SMike Christie 			    CLONE_FS | CLONE_FILES | SIGCHLD);
413cdd140bdSOleg Nesterov 	if (pid < 0) {
414d25c83c6SPetr Mladek 		/* Release the structure when caller killed by a fatal signal. */
415786235eeSTetsuo Handa 		struct completion *done = xchg(&create->done, NULL);
416786235eeSTetsuo Handa 
41773e0c116SMike Christie 		kfree(create->full_name);
418786235eeSTetsuo Handa 		if (!done) {
419786235eeSTetsuo Handa 			kfree(create);
420786235eeSTetsuo Handa 			return;
421786235eeSTetsuo Handa 		}
4221da177e4SLinus Torvalds 		create->result = ERR_PTR(pid);
423786235eeSTetsuo Handa 		complete(done);
4241da177e4SLinus Torvalds 	}
425cdd140bdSOleg Nesterov }
4261da177e4SLinus Torvalds 
427c0b942a7SNicolas Iooss static __printf(4, 0)
__kthread_create_on_node(int (* threadfn)(void * data),void * data,int node,const char namefmt[],va_list args)428c0b942a7SNicolas Iooss struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
4292a1d4460SThomas Gleixner 						    void *data, int node,
4301da177e4SLinus Torvalds 						    const char namefmt[],
431255451e4SPetr Mladek 						    va_list args)
4321da177e4SLinus Torvalds {
433786235eeSTetsuo Handa 	DECLARE_COMPLETION_ONSTACK(done);
434786235eeSTetsuo Handa 	struct task_struct *task;
435786235eeSTetsuo Handa 	struct kthread_create_info *create = kmalloc(sizeof(*create),
436786235eeSTetsuo Handa 						     GFP_KERNEL);
4371da177e4SLinus Torvalds 
438786235eeSTetsuo Handa 	if (!create)
439786235eeSTetsuo Handa 		return ERR_PTR(-ENOMEM);
440786235eeSTetsuo Handa 	create->threadfn = threadfn;
441786235eeSTetsuo Handa 	create->data = data;
442786235eeSTetsuo Handa 	create->node = node;
443786235eeSTetsuo Handa 	create->done = &done;
44473e0c116SMike Christie 	create->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
44573e0c116SMike Christie 	if (!create->full_name) {
44673e0c116SMike Christie 		task = ERR_PTR(-ENOMEM);
44773e0c116SMike Christie 		goto free_create;
44873e0c116SMike Christie 	}
4491da177e4SLinus Torvalds 
45073c27992SEric W. Biederman 	spin_lock(&kthread_create_lock);
451786235eeSTetsuo Handa 	list_add_tail(&create->list, &kthread_create_list);
45273c27992SEric W. Biederman 	spin_unlock(&kthread_create_lock);
45373c27992SEric W. Biederman 
454cbd9b67bSDmitry Adamushko 	wake_up_process(kthreadd_task);
455786235eeSTetsuo Handa 	/*
456786235eeSTetsuo Handa 	 * Wait for completion in killable state, for I might be chosen by
457786235eeSTetsuo Handa 	 * the OOM killer while kthreadd is trying to allocate memory for
458786235eeSTetsuo Handa 	 * new kernel thread.
459786235eeSTetsuo Handa 	 */
460786235eeSTetsuo Handa 	if (unlikely(wait_for_completion_killable(&done))) {
461786235eeSTetsuo Handa 		/*
462d25c83c6SPetr Mladek 		 * If I was killed by a fatal signal before kthreadd (or new
463d25c83c6SPetr Mladek 		 * kernel thread) calls complete(), leave the cleanup of this
464d25c83c6SPetr Mladek 		 * structure to that thread.
465786235eeSTetsuo Handa 		 */
466786235eeSTetsuo Handa 		if (xchg(&create->done, NULL))
4678fe6929cSTetsuo Handa 			return ERR_PTR(-EINTR);
468786235eeSTetsuo Handa 		/*
469786235eeSTetsuo Handa 		 * kthreadd (or new kernel thread) will call complete()
470786235eeSTetsuo Handa 		 * shortly.
471786235eeSTetsuo Handa 		 */
472786235eeSTetsuo Handa 		wait_for_completion(&done);
473786235eeSTetsuo Handa 	}
474786235eeSTetsuo Handa 	task = create->result;
47573e0c116SMike Christie free_create:
476786235eeSTetsuo Handa 	kfree(create);
477786235eeSTetsuo Handa 	return task;
4781da177e4SLinus Torvalds }
479255451e4SPetr Mladek 
480255451e4SPetr Mladek /**
481255451e4SPetr Mladek  * kthread_create_on_node - create a kthread.
482255451e4SPetr Mladek  * @threadfn: the function to run until signal_pending(current).
483255451e4SPetr Mladek  * @data: data ptr for @threadfn.
484255451e4SPetr Mladek  * @node: task and thread structures for the thread are allocated on this node
485255451e4SPetr Mladek  * @namefmt: printf-style name for the thread.
486255451e4SPetr Mladek  *
487255451e4SPetr Mladek  * Description: This helper function creates and names a kernel
488255451e4SPetr Mladek  * thread.  The thread will be stopped: use wake_up_process() to start
489255451e4SPetr Mladek  * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
490255451e4SPetr Mladek  * is affine to all CPUs.
491255451e4SPetr Mladek  *
492255451e4SPetr Mladek  * If thread is going to be bound on a particular cpu, give its node
493255451e4SPetr Mladek  * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
494255451e4SPetr Mladek  * When woken, the thread will run @threadfn() with @data as its
495111e7049SEric W. Biederman  * argument. @threadfn() can either return directly if it is a
496255451e4SPetr Mladek  * standalone thread for which no one will call kthread_stop(), or
497255451e4SPetr Mladek  * return when 'kthread_should_stop()' is true (which means
498255451e4SPetr Mladek  * kthread_stop() has been called).  The return value should be zero
499255451e4SPetr Mladek  * or a negative error number; it will be passed to kthread_stop().
500255451e4SPetr Mladek  *
501255451e4SPetr Mladek  * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
502255451e4SPetr Mladek  */
kthread_create_on_node(int (* threadfn)(void * data),void * data,int node,const char namefmt[],...)503255451e4SPetr Mladek struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
504255451e4SPetr Mladek 					   void *data, int node,
505255451e4SPetr Mladek 					   const char namefmt[],
506255451e4SPetr Mladek 					   ...)
507255451e4SPetr Mladek {
508255451e4SPetr Mladek 	struct task_struct *task;
509255451e4SPetr Mladek 	va_list args;
510255451e4SPetr Mladek 
511255451e4SPetr Mladek 	va_start(args, namefmt);
512255451e4SPetr Mladek 	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
513255451e4SPetr Mladek 	va_end(args);
514255451e4SPetr Mladek 
515255451e4SPetr Mladek 	return task;
516255451e4SPetr Mladek }
517207205a2SEric Dumazet EXPORT_SYMBOL(kthread_create_on_node);
5181da177e4SLinus Torvalds 
__kthread_bind_mask(struct task_struct * p,const struct cpumask * mask,unsigned int state)5192f064a59SPeter Zijlstra static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
5202a1d4460SThomas Gleixner {
52125834c73SPeter Zijlstra 	unsigned long flags;
52225834c73SPeter Zijlstra 
523f2530dc7SThomas Gleixner 	if (!wait_task_inactive(p, state)) {
524f2530dc7SThomas Gleixner 		WARN_ON(1);
525f2530dc7SThomas Gleixner 		return;
526f2530dc7SThomas Gleixner 	}
52725834c73SPeter Zijlstra 
5282a1d4460SThomas Gleixner 	/* It's safe because the task is inactive. */
52925834c73SPeter Zijlstra 	raw_spin_lock_irqsave(&p->pi_lock, flags);
53025834c73SPeter Zijlstra 	do_set_cpus_allowed(p, mask);
53114a40ffcSTejun Heo 	p->flags |= PF_NO_SETAFFINITY;
53225834c73SPeter Zijlstra 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
53325834c73SPeter Zijlstra }
53425834c73SPeter Zijlstra 
__kthread_bind(struct task_struct * p,unsigned int cpu,unsigned int state)5352f064a59SPeter Zijlstra static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
53625834c73SPeter Zijlstra {
53725834c73SPeter Zijlstra 	__kthread_bind_mask(p, cpumask_of(cpu), state);
53825834c73SPeter Zijlstra }
53925834c73SPeter Zijlstra 
kthread_bind_mask(struct task_struct * p,const struct cpumask * mask)54025834c73SPeter Zijlstra void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
54125834c73SPeter Zijlstra {
54225834c73SPeter Zijlstra 	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
5432a1d4460SThomas Gleixner }
5442a1d4460SThomas Gleixner 
5459e37bd30SRandy Dunlap /**
546881232b7SPeter Zijlstra  * kthread_bind - bind a just-created kthread to a cpu.
547881232b7SPeter Zijlstra  * @p: thread created by kthread_create().
548881232b7SPeter Zijlstra  * @cpu: cpu (might not be online, must be possible) for @k to run on.
549881232b7SPeter Zijlstra  *
550881232b7SPeter Zijlstra  * Description: This function is equivalent to set_cpus_allowed(),
551881232b7SPeter Zijlstra  * except that @cpu doesn't need to be online, and the thread must be
552881232b7SPeter Zijlstra  * stopped (i.e., just returned from kthread_create()).
553881232b7SPeter Zijlstra  */
kthread_bind(struct task_struct * p,unsigned int cpu)554881232b7SPeter Zijlstra void kthread_bind(struct task_struct *p, unsigned int cpu)
555881232b7SPeter Zijlstra {
556f2530dc7SThomas Gleixner 	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
557881232b7SPeter Zijlstra }
558881232b7SPeter Zijlstra EXPORT_SYMBOL(kthread_bind);
559881232b7SPeter Zijlstra 
560881232b7SPeter Zijlstra /**
5612a1d4460SThomas Gleixner  * kthread_create_on_cpu - Create a cpu bound kthread
5622a1d4460SThomas Gleixner  * @threadfn: the function to run until signal_pending(current).
5632a1d4460SThomas Gleixner  * @data: data ptr for @threadfn.
5642a1d4460SThomas Gleixner  * @cpu: The cpu on which the thread should be bound,
5652a1d4460SThomas Gleixner  * @namefmt: printf-style name for the thread. Format is restricted
5662a1d4460SThomas Gleixner  *	     to "name.*%u". Code fills in cpu number.
5672a1d4460SThomas Gleixner  *
5682a1d4460SThomas Gleixner  * Description: This helper function creates and names a kernel thread
5692a1d4460SThomas Gleixner  */
kthread_create_on_cpu(int (* threadfn)(void * data),void * data,unsigned int cpu,const char * namefmt)5702a1d4460SThomas Gleixner struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
5712a1d4460SThomas Gleixner 					  void *data, unsigned int cpu,
5722a1d4460SThomas Gleixner 					  const char *namefmt)
5732a1d4460SThomas Gleixner {
5742a1d4460SThomas Gleixner 	struct task_struct *p;
5752a1d4460SThomas Gleixner 
57610922838SNishanth Aravamudan 	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
5772a1d4460SThomas Gleixner 				   cpu);
5782a1d4460SThomas Gleixner 	if (IS_ERR(p))
5792a1d4460SThomas Gleixner 		return p;
580a65d4096SPetr Mladek 	kthread_bind(p, cpu);
581a65d4096SPetr Mladek 	/* CPU hotplug need to bind once again when unparking the thread. */
5822a1d4460SThomas Gleixner 	to_kthread(p)->cpu = cpu;
5832a1d4460SThomas Gleixner 	return p;
5842a1d4460SThomas Gleixner }
585800977f6SCai Huoqing EXPORT_SYMBOL(kthread_create_on_cpu);
5862a1d4460SThomas Gleixner 
kthread_set_per_cpu(struct task_struct * k,int cpu)587ac687e6eSPeter Zijlstra void kthread_set_per_cpu(struct task_struct *k, int cpu)
588ac687e6eSPeter Zijlstra {
589ac687e6eSPeter Zijlstra 	struct kthread *kthread = to_kthread(k);
590ac687e6eSPeter Zijlstra 	if (!kthread)
591ac687e6eSPeter Zijlstra 		return;
592ac687e6eSPeter Zijlstra 
593ac687e6eSPeter Zijlstra 	WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
594ac687e6eSPeter Zijlstra 
595ac687e6eSPeter Zijlstra 	if (cpu < 0) {
596ac687e6eSPeter Zijlstra 		clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
597ac687e6eSPeter Zijlstra 		return;
598ac687e6eSPeter Zijlstra 	}
599ac687e6eSPeter Zijlstra 
600ac687e6eSPeter Zijlstra 	kthread->cpu = cpu;
601ac687e6eSPeter Zijlstra 	set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
602ac687e6eSPeter Zijlstra }
603ac687e6eSPeter Zijlstra 
kthread_is_per_cpu(struct task_struct * p)6043a7956e2SPeter Zijlstra bool kthread_is_per_cpu(struct task_struct *p)
605ac687e6eSPeter Zijlstra {
6063a7956e2SPeter Zijlstra 	struct kthread *kthread = __to_kthread(p);
607ac687e6eSPeter Zijlstra 	if (!kthread)
608ac687e6eSPeter Zijlstra 		return false;
609ac687e6eSPeter Zijlstra 
610ac687e6eSPeter Zijlstra 	return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
611ac687e6eSPeter Zijlstra }
612ac687e6eSPeter Zijlstra 
613cf380a4aSOleg Nesterov /**
614cf380a4aSOleg Nesterov  * kthread_unpark - unpark a thread created by kthread_create().
615cf380a4aSOleg Nesterov  * @k:		thread created by kthread_create().
616cf380a4aSOleg Nesterov  *
617cf380a4aSOleg Nesterov  * Sets kthread_should_park() for @k to return false, wakes it, and
618cf380a4aSOleg Nesterov  * waits for it to return. If the thread is marked percpu then its
619cf380a4aSOleg Nesterov  * bound to the cpu again.
620cf380a4aSOleg Nesterov  */
kthread_unpark(struct task_struct * k)621cf380a4aSOleg Nesterov void kthread_unpark(struct task_struct *k)
622f2530dc7SThomas Gleixner {
623cf380a4aSOleg Nesterov 	struct kthread *kthread = to_kthread(k);
624cf380a4aSOleg Nesterov 
625a65d4096SPetr Mladek 	/*
626a65d4096SPetr Mladek 	 * Newly created kthread was parked when the CPU was offline.
627a65d4096SPetr Mladek 	 * The binding was lost and we need to set it again.
628a65d4096SPetr Mladek 	 */
629f2530dc7SThomas Gleixner 	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
630f2530dc7SThomas Gleixner 		__kthread_bind(k, kthread->cpu, TASK_PARKED);
63185f1abe0SPeter Zijlstra 
63285f1abe0SPeter Zijlstra 	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
6331cef1150SPeter Zijlstra 	/*
6341cef1150SPeter Zijlstra 	 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
6351cef1150SPeter Zijlstra 	 */
636f2530dc7SThomas Gleixner 	wake_up_state(k, TASK_PARKED);
637f2530dc7SThomas Gleixner }
63818896451SDavid Kershner EXPORT_SYMBOL_GPL(kthread_unpark);
6392a1d4460SThomas Gleixner 
6402a1d4460SThomas Gleixner /**
6412a1d4460SThomas Gleixner  * kthread_park - park a thread created by kthread_create().
6422a1d4460SThomas Gleixner  * @k: thread created by kthread_create().
6432a1d4460SThomas Gleixner  *
6442a1d4460SThomas Gleixner  * Sets kthread_should_park() for @k to return true, wakes it, and
6452a1d4460SThomas Gleixner  * waits for it to return. This can also be called after kthread_create()
6462a1d4460SThomas Gleixner  * instead of calling wake_up_process(): the thread will park without
6472a1d4460SThomas Gleixner  * calling threadfn().
6482a1d4460SThomas Gleixner  *
6492a1d4460SThomas Gleixner  * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
6502a1d4460SThomas Gleixner  * If called by the kthread itself just the park bit is set.
6512a1d4460SThomas Gleixner  */
kthread_park(struct task_struct * k)6522a1d4460SThomas Gleixner int kthread_park(struct task_struct *k)
6532a1d4460SThomas Gleixner {
654cf380a4aSOleg Nesterov 	struct kthread *kthread = to_kthread(k);
6552a1d4460SThomas Gleixner 
656cf380a4aSOleg Nesterov 	if (WARN_ON(k->flags & PF_EXITING))
657cf380a4aSOleg Nesterov 		return -ENOSYS;
658cf380a4aSOleg Nesterov 
659f83ee19bSPeter Zijlstra 	if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
660f83ee19bSPeter Zijlstra 		return -EBUSY;
661f83ee19bSPeter Zijlstra 
6622a1d4460SThomas Gleixner 	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
6632a1d4460SThomas Gleixner 	if (k != current) {
6642a1d4460SThomas Gleixner 		wake_up_process(k);
6651cef1150SPeter Zijlstra 		/*
6661cef1150SPeter Zijlstra 		 * Wait for __kthread_parkme() to complete(), this means we
6671cef1150SPeter Zijlstra 		 * _will_ have TASK_PARKED and are about to call schedule().
6681cef1150SPeter Zijlstra 		 */
6692a1d4460SThomas Gleixner 		wait_for_completion(&kthread->parked);
6701cef1150SPeter Zijlstra 		/*
6711cef1150SPeter Zijlstra 		 * Now wait for that schedule() to complete and the task to
6721cef1150SPeter Zijlstra 		 * get scheduled out.
6731cef1150SPeter Zijlstra 		 */
6741cef1150SPeter Zijlstra 		WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
6752a1d4460SThomas Gleixner 	}
676cf380a4aSOleg Nesterov 
677cf380a4aSOleg Nesterov 	return 0;
6782a1d4460SThomas Gleixner }
67918896451SDavid Kershner EXPORT_SYMBOL_GPL(kthread_park);
6802a1d4460SThomas Gleixner 
6812a1d4460SThomas Gleixner /**
6829e37bd30SRandy Dunlap  * kthread_stop - stop a thread created by kthread_create().
6839e37bd30SRandy Dunlap  * @k: thread created by kthread_create().
6849e37bd30SRandy Dunlap  *
6859e37bd30SRandy Dunlap  * Sets kthread_should_stop() for @k to return true, wakes it, and
6869ae26027SOleg Nesterov  * waits for it to exit. This can also be called after kthread_create()
6879ae26027SOleg Nesterov  * instead of calling wake_up_process(): the thread will exit without
6889ae26027SOleg Nesterov  * calling threadfn().
6899ae26027SOleg Nesterov  *
690bbda86e9SEric W. Biederman  * If threadfn() may call kthread_exit() itself, the caller must ensure
6919ae26027SOleg Nesterov  * task_struct can't go away.
6929e37bd30SRandy Dunlap  *
6939e37bd30SRandy Dunlap  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
6949e37bd30SRandy Dunlap  * was never called.
6959e37bd30SRandy Dunlap  */
kthread_stop(struct task_struct * k)6961da177e4SLinus Torvalds int kthread_stop(struct task_struct *k)
6971da177e4SLinus Torvalds {
698b5c5442bSOleg Nesterov 	struct kthread *kthread;
6991da177e4SLinus Torvalds 	int ret;
7001da177e4SLinus Torvalds 
70163706172SOleg Nesterov 	trace_sched_kthread_stop(k);
702b5c5442bSOleg Nesterov 
703b5c5442bSOleg Nesterov 	get_task_struct(k);
704efb29fbfSOleg Nesterov 	kthread = to_kthread(k);
7052a1d4460SThomas Gleixner 	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
706cf380a4aSOleg Nesterov 	kthread_unpark(k);
707a7c01fa9SJason A. Donenfeld 	set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL);
7081da177e4SLinus Torvalds 	wake_up_process(k);
70963706172SOleg Nesterov 	wait_for_completion(&kthread->exited);
7106b124879SEric W. Biederman 	ret = kthread->result;
7111da177e4SLinus Torvalds 	put_task_struct(k);
7120a16b607SMathieu Desnoyers 
713b5c5442bSOleg Nesterov 	trace_sched_kthread_stop_ret(ret);
7141da177e4SLinus Torvalds 	return ret;
7151da177e4SLinus Torvalds }
71652e92e57SAdrian Bunk EXPORT_SYMBOL(kthread_stop);
7171da177e4SLinus Torvalds 
kthreadd(void * unused)718e804a4a4SSatyam Sharma int kthreadd(void *unused)
7191da177e4SLinus Torvalds {
72073c27992SEric W. Biederman 	struct task_struct *tsk = current;
72173c27992SEric W. Biederman 
722e804a4a4SSatyam Sharma 	/* Setup a clean context for our children to inherit. */
72373c27992SEric W. Biederman 	set_task_comm(tsk, "kthreadd");
72410ab825bSOleg Nesterov 	ignore_signals(tsk);
72504d4e665SFrederic Weisbecker 	set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
726aee4faa4SLai Jiangshan 	set_mems_allowed(node_states[N_MEMORY]);
72773c27992SEric W. Biederman 
72834b087e4STejun Heo 	current->flags |= PF_NOFREEZE;
72977f88796STejun Heo 	cgroup_init_kthreadd();
73073c27992SEric W. Biederman 
73173c27992SEric W. Biederman 	for (;;) {
73273c27992SEric W. Biederman 		set_current_state(TASK_INTERRUPTIBLE);
73373c27992SEric W. Biederman 		if (list_empty(&kthread_create_list))
73473c27992SEric W. Biederman 			schedule();
73573c27992SEric W. Biederman 		__set_current_state(TASK_RUNNING);
73673c27992SEric W. Biederman 
73773c27992SEric W. Biederman 		spin_lock(&kthread_create_lock);
73873c27992SEric W. Biederman 		while (!list_empty(&kthread_create_list)) {
73973c27992SEric W. Biederman 			struct kthread_create_info *create;
74073c27992SEric W. Biederman 
74173c27992SEric W. Biederman 			create = list_entry(kthread_create_list.next,
74273c27992SEric W. Biederman 					    struct kthread_create_info, list);
74373c27992SEric W. Biederman 			list_del_init(&create->list);
74473c27992SEric W. Biederman 			spin_unlock(&kthread_create_lock);
74573c27992SEric W. Biederman 
74673c27992SEric W. Biederman 			create_kthread(create);
74773c27992SEric W. Biederman 
74873c27992SEric W. Biederman 			spin_lock(&kthread_create_lock);
74973c27992SEric W. Biederman 		}
75073c27992SEric W. Biederman 		spin_unlock(&kthread_create_lock);
75173c27992SEric W. Biederman 	}
7521da177e4SLinus Torvalds 
7531da177e4SLinus Torvalds 	return 0;
7541da177e4SLinus Torvalds }
755b56c0d89STejun Heo 
__kthread_init_worker(struct kthread_worker * worker,const char * name,struct lock_class_key * key)7563989144fSPetr Mladek void __kthread_init_worker(struct kthread_worker *worker,
7574f32e9b1SYong Zhang 				const char *name,
7584f32e9b1SYong Zhang 				struct lock_class_key *key)
7594f32e9b1SYong Zhang {
760dbf52682SPetr Mladek 	memset(worker, 0, sizeof(struct kthread_worker));
761fe99a4f4SJulia Cartwright 	raw_spin_lock_init(&worker->lock);
7624f32e9b1SYong Zhang 	lockdep_set_class_and_name(&worker->lock, key, name);
7634f32e9b1SYong Zhang 	INIT_LIST_HEAD(&worker->work_list);
76422597dc3SPetr Mladek 	INIT_LIST_HEAD(&worker->delayed_work_list);
7654f32e9b1SYong Zhang }
7663989144fSPetr Mladek EXPORT_SYMBOL_GPL(__kthread_init_worker);
7674f32e9b1SYong Zhang 
768b56c0d89STejun Heo /**
769b56c0d89STejun Heo  * kthread_worker_fn - kthread function to process kthread_worker
770b56c0d89STejun Heo  * @worker_ptr: pointer to initialized kthread_worker
771b56c0d89STejun Heo  *
772fbae2d44SPetr Mladek  * This function implements the main cycle of kthread worker. It processes
773fbae2d44SPetr Mladek  * work_list until it is stopped with kthread_stop(). It sleeps when the queue
774fbae2d44SPetr Mladek  * is empty.
775b56c0d89STejun Heo  *
776fbae2d44SPetr Mladek  * The works are not allowed to keep any locks, disable preemption or interrupts
777fbae2d44SPetr Mladek  * when they finish. There is defined a safe point for freezing when one work
778fbae2d44SPetr Mladek  * finishes and before a new one is started.
7798197b3d4SPetr Mladek  *
7808197b3d4SPetr Mladek  * Also the works must not be handled by more than one worker at the same time,
7818197b3d4SPetr Mladek  * see also kthread_queue_work().
782b56c0d89STejun Heo  */
kthread_worker_fn(void * worker_ptr)783b56c0d89STejun Heo int kthread_worker_fn(void *worker_ptr)
784b56c0d89STejun Heo {
785b56c0d89STejun Heo 	struct kthread_worker *worker = worker_ptr;
786b56c0d89STejun Heo 	struct kthread_work *work;
787b56c0d89STejun Heo 
788fbae2d44SPetr Mladek 	/*
789fbae2d44SPetr Mladek 	 * FIXME: Update the check and remove the assignment when all kthread
790fbae2d44SPetr Mladek 	 * worker users are created using kthread_create_worker*() functions.
791fbae2d44SPetr Mladek 	 */
792fbae2d44SPetr Mladek 	WARN_ON(worker->task && worker->task != current);
793b56c0d89STejun Heo 	worker->task = current;
794dbf52682SPetr Mladek 
795dbf52682SPetr Mladek 	if (worker->flags & KTW_FREEZABLE)
796dbf52682SPetr Mladek 		set_freezable();
797dbf52682SPetr Mladek 
798b56c0d89STejun Heo repeat:
799b56c0d89STejun Heo 	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
800b56c0d89STejun Heo 
801b56c0d89STejun Heo 	if (kthread_should_stop()) {
802b56c0d89STejun Heo 		__set_current_state(TASK_RUNNING);
803fe99a4f4SJulia Cartwright 		raw_spin_lock_irq(&worker->lock);
804b56c0d89STejun Heo 		worker->task = NULL;
805fe99a4f4SJulia Cartwright 		raw_spin_unlock_irq(&worker->lock);
806b56c0d89STejun Heo 		return 0;
807b56c0d89STejun Heo 	}
808b56c0d89STejun Heo 
809b56c0d89STejun Heo 	work = NULL;
810fe99a4f4SJulia Cartwright 	raw_spin_lock_irq(&worker->lock);
811b56c0d89STejun Heo 	if (!list_empty(&worker->work_list)) {
812b56c0d89STejun Heo 		work = list_first_entry(&worker->work_list,
813b56c0d89STejun Heo 					struct kthread_work, node);
814b56c0d89STejun Heo 		list_del_init(&work->node);
815b56c0d89STejun Heo 	}
81646f3d976STejun Heo 	worker->current_work = work;
817fe99a4f4SJulia Cartwright 	raw_spin_unlock_irq(&worker->lock);
818b56c0d89STejun Heo 
819b56c0d89STejun Heo 	if (work) {
820f630c7c6SRob Clark 		kthread_work_func_t func = work->func;
821b56c0d89STejun Heo 		__set_current_state(TASK_RUNNING);
822f630c7c6SRob Clark 		trace_sched_kthread_work_execute_start(work);
823b56c0d89STejun Heo 		work->func(work);
824f630c7c6SRob Clark 		/*
825f630c7c6SRob Clark 		 * Avoid dereferencing work after this point.  The trace
826f630c7c6SRob Clark 		 * event only cares about the address.
827f630c7c6SRob Clark 		 */
828f630c7c6SRob Clark 		trace_sched_kthread_work_execute_end(work, func);
829b56c0d89STejun Heo 	} else if (!freezing(current))
830b56c0d89STejun Heo 		schedule();
831b56c0d89STejun Heo 
832b56c0d89STejun Heo 	try_to_freeze();
83322cf8bc6SShaohua Li 	cond_resched();
834b56c0d89STejun Heo 	goto repeat;
835b56c0d89STejun Heo }
836b56c0d89STejun Heo EXPORT_SYMBOL_GPL(kthread_worker_fn);
837b56c0d89STejun Heo 
838c0b942a7SNicolas Iooss static __printf(3, 0) struct kthread_worker *
__kthread_create_worker(int cpu,unsigned int flags,const char namefmt[],va_list args)839dbf52682SPetr Mladek __kthread_create_worker(int cpu, unsigned int flags,
840dbf52682SPetr Mladek 			const char namefmt[], va_list args)
841fbae2d44SPetr Mladek {
842fbae2d44SPetr Mladek 	struct kthread_worker *worker;
843fbae2d44SPetr Mladek 	struct task_struct *task;
84498fa15f3SAnshuman Khandual 	int node = NUMA_NO_NODE;
845fbae2d44SPetr Mladek 
846fbae2d44SPetr Mladek 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
847fbae2d44SPetr Mladek 	if (!worker)
848fbae2d44SPetr Mladek 		return ERR_PTR(-ENOMEM);
849fbae2d44SPetr Mladek 
850fbae2d44SPetr Mladek 	kthread_init_worker(worker);
851fbae2d44SPetr Mladek 
8528fb9dcbdSOleg Nesterov 	if (cpu >= 0)
8538fb9dcbdSOleg Nesterov 		node = cpu_to_node(cpu);
854fbae2d44SPetr Mladek 
855fbae2d44SPetr Mladek 	task = __kthread_create_on_node(kthread_worker_fn, worker,
8568fb9dcbdSOleg Nesterov 						node, namefmt, args);
857fbae2d44SPetr Mladek 	if (IS_ERR(task))
858fbae2d44SPetr Mladek 		goto fail_task;
859fbae2d44SPetr Mladek 
8608fb9dcbdSOleg Nesterov 	if (cpu >= 0)
8618fb9dcbdSOleg Nesterov 		kthread_bind(task, cpu);
8628fb9dcbdSOleg Nesterov 
863dbf52682SPetr Mladek 	worker->flags = flags;
864fbae2d44SPetr Mladek 	worker->task = task;
865fbae2d44SPetr Mladek 	wake_up_process(task);
866fbae2d44SPetr Mladek 	return worker;
867fbae2d44SPetr Mladek 
868fbae2d44SPetr Mladek fail_task:
869fbae2d44SPetr Mladek 	kfree(worker);
870fbae2d44SPetr Mladek 	return ERR_CAST(task);
871fbae2d44SPetr Mladek }
872fbae2d44SPetr Mladek 
873fbae2d44SPetr Mladek /**
874fbae2d44SPetr Mladek  * kthread_create_worker - create a kthread worker
875dbf52682SPetr Mladek  * @flags: flags modifying the default behavior of the worker
876fbae2d44SPetr Mladek  * @namefmt: printf-style name for the kthread worker (task).
877fbae2d44SPetr Mladek  *
878fbae2d44SPetr Mladek  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
879fbae2d44SPetr Mladek  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
880d25c83c6SPetr Mladek  * when the caller was killed by a fatal signal.
881fbae2d44SPetr Mladek  */
882fbae2d44SPetr Mladek struct kthread_worker *
kthread_create_worker(unsigned int flags,const char namefmt[],...)883dbf52682SPetr Mladek kthread_create_worker(unsigned int flags, const char namefmt[], ...)
884fbae2d44SPetr Mladek {
885fbae2d44SPetr Mladek 	struct kthread_worker *worker;
886fbae2d44SPetr Mladek 	va_list args;
887fbae2d44SPetr Mladek 
888fbae2d44SPetr Mladek 	va_start(args, namefmt);
889dbf52682SPetr Mladek 	worker = __kthread_create_worker(-1, flags, namefmt, args);
890fbae2d44SPetr Mladek 	va_end(args);
891fbae2d44SPetr Mladek 
892fbae2d44SPetr Mladek 	return worker;
893fbae2d44SPetr Mladek }
894fbae2d44SPetr Mladek EXPORT_SYMBOL(kthread_create_worker);
895fbae2d44SPetr Mladek 
896fbae2d44SPetr Mladek /**
897fbae2d44SPetr Mladek  * kthread_create_worker_on_cpu - create a kthread worker and bind it
8987b7b8a2cSRandy Dunlap  *	to a given CPU and the associated NUMA node.
899fbae2d44SPetr Mladek  * @cpu: CPU number
900dbf52682SPetr Mladek  * @flags: flags modifying the default behavior of the worker
901fbae2d44SPetr Mladek  * @namefmt: printf-style name for the kthread worker (task).
902fbae2d44SPetr Mladek  *
903fbae2d44SPetr Mladek  * Use a valid CPU number if you want to bind the kthread worker
904fbae2d44SPetr Mladek  * to the given CPU and the associated NUMA node.
905fbae2d44SPetr Mladek  *
906fbae2d44SPetr Mladek  * A good practice is to add the cpu number also into the worker name.
907fbae2d44SPetr Mladek  * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
908fbae2d44SPetr Mladek  *
909ebb2bdceSPetr Mladek  * CPU hotplug:
910ebb2bdceSPetr Mladek  * The kthread worker API is simple and generic. It just provides a way
911ebb2bdceSPetr Mladek  * to create, use, and destroy workers.
912ebb2bdceSPetr Mladek  *
913ebb2bdceSPetr Mladek  * It is up to the API user how to handle CPU hotplug. They have to decide
914ebb2bdceSPetr Mladek  * how to handle pending work items, prevent queuing new ones, and
915ebb2bdceSPetr Mladek  * restore the functionality when the CPU goes off and on. There are a
916ebb2bdceSPetr Mladek  * few catches:
917ebb2bdceSPetr Mladek  *
918ebb2bdceSPetr Mladek  *    - CPU affinity gets lost when it is scheduled on an offline CPU.
919ebb2bdceSPetr Mladek  *
920ebb2bdceSPetr Mladek  *    - The worker might not exist when the CPU was off when the user
921ebb2bdceSPetr Mladek  *      created the workers.
922ebb2bdceSPetr Mladek  *
923ebb2bdceSPetr Mladek  * Good practice is to implement two CPU hotplug callbacks and to
924ebb2bdceSPetr Mladek  * destroy/create the worker when the CPU goes down/up.
925ebb2bdceSPetr Mladek  *
926ebb2bdceSPetr Mladek  * Return:
927ebb2bdceSPetr Mladek  * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
928fbae2d44SPetr Mladek  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
929d25c83c6SPetr Mladek  * when the caller was killed by a fatal signal.
930fbae2d44SPetr Mladek  */
931fbae2d44SPetr Mladek struct kthread_worker *
kthread_create_worker_on_cpu(int cpu,unsigned int flags,const char namefmt[],...)932dbf52682SPetr Mladek kthread_create_worker_on_cpu(int cpu, unsigned int flags,
933dbf52682SPetr Mladek 			     const char namefmt[], ...)
934fbae2d44SPetr Mladek {
935fbae2d44SPetr Mladek 	struct kthread_worker *worker;
936fbae2d44SPetr Mladek 	va_list args;
937fbae2d44SPetr Mladek 
938fbae2d44SPetr Mladek 	va_start(args, namefmt);
939dbf52682SPetr Mladek 	worker = __kthread_create_worker(cpu, flags, namefmt, args);
940fbae2d44SPetr Mladek 	va_end(args);
941fbae2d44SPetr Mladek 
942fbae2d44SPetr Mladek 	return worker;
943fbae2d44SPetr Mladek }
944fbae2d44SPetr Mladek EXPORT_SYMBOL(kthread_create_worker_on_cpu);
945fbae2d44SPetr Mladek 
94637be45d4SPetr Mladek /*
94737be45d4SPetr Mladek  * Returns true when the work could not be queued at the moment.
94837be45d4SPetr Mladek  * It happens when it is already pending in a worker list
94937be45d4SPetr Mladek  * or when it is being cancelled.
95037be45d4SPetr Mladek  */
queuing_blocked(struct kthread_worker * worker,struct kthread_work * work)95137be45d4SPetr Mladek static inline bool queuing_blocked(struct kthread_worker *worker,
95237be45d4SPetr Mladek 				   struct kthread_work *work)
95337be45d4SPetr Mladek {
95437be45d4SPetr Mladek 	lockdep_assert_held(&worker->lock);
95537be45d4SPetr Mladek 
95637be45d4SPetr Mladek 	return !list_empty(&work->node) || work->canceling;
95737be45d4SPetr Mladek }
95837be45d4SPetr Mladek 
kthread_insert_work_sanity_check(struct kthread_worker * worker,struct kthread_work * work)9598197b3d4SPetr Mladek static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
9608197b3d4SPetr Mladek 					     struct kthread_work *work)
9618197b3d4SPetr Mladek {
9628197b3d4SPetr Mladek 	lockdep_assert_held(&worker->lock);
9638197b3d4SPetr Mladek 	WARN_ON_ONCE(!list_empty(&work->node));
9648197b3d4SPetr Mladek 	/* Do not use a work with >1 worker, see kthread_queue_work() */
9658197b3d4SPetr Mladek 	WARN_ON_ONCE(work->worker && work->worker != worker);
9668197b3d4SPetr Mladek }
9678197b3d4SPetr Mladek 
9689a2e03d8STejun Heo /* insert @work before @pos in @worker */
kthread_insert_work(struct kthread_worker * worker,struct kthread_work * work,struct list_head * pos)9693989144fSPetr Mladek static void kthread_insert_work(struct kthread_worker *worker,
9709a2e03d8STejun Heo 				struct kthread_work *work,
9719a2e03d8STejun Heo 				struct list_head *pos)
9729a2e03d8STejun Heo {
9738197b3d4SPetr Mladek 	kthread_insert_work_sanity_check(worker, work);
9749a2e03d8STejun Heo 
975f630c7c6SRob Clark 	trace_sched_kthread_work_queue_work(worker, work);
976f630c7c6SRob Clark 
9779a2e03d8STejun Heo 	list_add_tail(&work->node, pos);
97846f3d976STejun Heo 	work->worker = worker;
979ed1403ecSLai Jiangshan 	if (!worker->current_work && likely(worker->task))
9809a2e03d8STejun Heo 		wake_up_process(worker->task);
9819a2e03d8STejun Heo }
9829a2e03d8STejun Heo 
983b56c0d89STejun Heo /**
9843989144fSPetr Mladek  * kthread_queue_work - queue a kthread_work
985b56c0d89STejun Heo  * @worker: target kthread_worker
986b56c0d89STejun Heo  * @work: kthread_work to queue
987b56c0d89STejun Heo  *
988b56c0d89STejun Heo  * Queue @work to work processor @task for async execution.  @task
989b56c0d89STejun Heo  * must have been created with kthread_worker_create().  Returns %true
990b56c0d89STejun Heo  * if @work was successfully queued, %false if it was already pending.
9918197b3d4SPetr Mladek  *
9928197b3d4SPetr Mladek  * Reinitialize the work if it needs to be used by another worker.
9938197b3d4SPetr Mladek  * For example, when the worker was stopped and started again.
994b56c0d89STejun Heo  */
kthread_queue_work(struct kthread_worker * worker,struct kthread_work * work)9953989144fSPetr Mladek bool kthread_queue_work(struct kthread_worker *worker,
996b56c0d89STejun Heo 			struct kthread_work *work)
997b56c0d89STejun Heo {
998b56c0d89STejun Heo 	bool ret = false;
999b56c0d89STejun Heo 	unsigned long flags;
1000b56c0d89STejun Heo 
1001fe99a4f4SJulia Cartwright 	raw_spin_lock_irqsave(&worker->lock, flags);
100237be45d4SPetr Mladek 	if (!queuing_blocked(worker, work)) {
10033989144fSPetr Mladek 		kthread_insert_work(worker, work, &worker->work_list);
1004b56c0d89STejun Heo 		ret = true;
1005b56c0d89STejun Heo 	}
1006fe99a4f4SJulia Cartwright 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1007b56c0d89STejun Heo 	return ret;
1008b56c0d89STejun Heo }
10093989144fSPetr Mladek EXPORT_SYMBOL_GPL(kthread_queue_work);
1010b56c0d89STejun Heo 
101122597dc3SPetr Mladek /**
101222597dc3SPetr Mladek  * kthread_delayed_work_timer_fn - callback that queues the associated kthread
101322597dc3SPetr Mladek  *	delayed work when the timer expires.
1014fe5c3b69SKees Cook  * @t: pointer to the expired timer
101522597dc3SPetr Mladek  *
101622597dc3SPetr Mladek  * The format of the function is defined by struct timer_list.
101722597dc3SPetr Mladek  * It should have been called from irqsafe timer with irq already off.
101822597dc3SPetr Mladek  */
kthread_delayed_work_timer_fn(struct timer_list * t)1019fe5c3b69SKees Cook void kthread_delayed_work_timer_fn(struct timer_list *t)
102022597dc3SPetr Mladek {
1021fe5c3b69SKees Cook 	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
102222597dc3SPetr Mladek 	struct kthread_work *work = &dwork->work;
102322597dc3SPetr Mladek 	struct kthread_worker *worker = work->worker;
1024ad01423aSSebastian Andrzej Siewior 	unsigned long flags;
102522597dc3SPetr Mladek 
102622597dc3SPetr Mladek 	/*
102722597dc3SPetr Mladek 	 * This might happen when a pending work is reinitialized.
102822597dc3SPetr Mladek 	 * It means that it is used a wrong way.
102922597dc3SPetr Mladek 	 */
103022597dc3SPetr Mladek 	if (WARN_ON_ONCE(!worker))
103122597dc3SPetr Mladek 		return;
103222597dc3SPetr Mladek 
1033ad01423aSSebastian Andrzej Siewior 	raw_spin_lock_irqsave(&worker->lock, flags);
103422597dc3SPetr Mladek 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
103522597dc3SPetr Mladek 	WARN_ON_ONCE(work->worker != worker);
103622597dc3SPetr Mladek 
103722597dc3SPetr Mladek 	/* Move the work from worker->delayed_work_list. */
103822597dc3SPetr Mladek 	WARN_ON_ONCE(list_empty(&work->node));
103922597dc3SPetr Mladek 	list_del_init(&work->node);
10406993d0fdSZqiang 	if (!work->canceling)
104122597dc3SPetr Mladek 		kthread_insert_work(worker, work, &worker->work_list);
104222597dc3SPetr Mladek 
1043ad01423aSSebastian Andrzej Siewior 	raw_spin_unlock_irqrestore(&worker->lock, flags);
104422597dc3SPetr Mladek }
104522597dc3SPetr Mladek EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
104622597dc3SPetr Mladek 
__kthread_queue_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)1047bc88f85cSBen Dooks static void __kthread_queue_delayed_work(struct kthread_worker *worker,
104822597dc3SPetr Mladek 					 struct kthread_delayed_work *dwork,
104922597dc3SPetr Mladek 					 unsigned long delay)
105022597dc3SPetr Mladek {
105122597dc3SPetr Mladek 	struct timer_list *timer = &dwork->timer;
105222597dc3SPetr Mladek 	struct kthread_work *work = &dwork->work;
105322597dc3SPetr Mladek 
10544b243563SSami Tolvanen 	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
105522597dc3SPetr Mladek 
105622597dc3SPetr Mladek 	/*
105722597dc3SPetr Mladek 	 * If @delay is 0, queue @dwork->work immediately.  This is for
105822597dc3SPetr Mladek 	 * both optimization and correctness.  The earliest @timer can
105922597dc3SPetr Mladek 	 * expire is on the closest next tick and delayed_work users depend
106022597dc3SPetr Mladek 	 * on that there's no such delay when @delay is 0.
106122597dc3SPetr Mladek 	 */
106222597dc3SPetr Mladek 	if (!delay) {
106322597dc3SPetr Mladek 		kthread_insert_work(worker, work, &worker->work_list);
106422597dc3SPetr Mladek 		return;
106522597dc3SPetr Mladek 	}
106622597dc3SPetr Mladek 
106722597dc3SPetr Mladek 	/* Be paranoid and try to detect possible races already now. */
106822597dc3SPetr Mladek 	kthread_insert_work_sanity_check(worker, work);
106922597dc3SPetr Mladek 
107022597dc3SPetr Mladek 	list_add(&work->node, &worker->delayed_work_list);
107122597dc3SPetr Mladek 	work->worker = worker;
107222597dc3SPetr Mladek 	timer->expires = jiffies + delay;
107322597dc3SPetr Mladek 	add_timer(timer);
107422597dc3SPetr Mladek }
107522597dc3SPetr Mladek 
107622597dc3SPetr Mladek /**
107722597dc3SPetr Mladek  * kthread_queue_delayed_work - queue the associated kthread work
107822597dc3SPetr Mladek  *	after a delay.
107922597dc3SPetr Mladek  * @worker: target kthread_worker
108022597dc3SPetr Mladek  * @dwork: kthread_delayed_work to queue
108122597dc3SPetr Mladek  * @delay: number of jiffies to wait before queuing
108222597dc3SPetr Mladek  *
108322597dc3SPetr Mladek  * If the work has not been pending it starts a timer that will queue
108422597dc3SPetr Mladek  * the work after the given @delay. If @delay is zero, it queues the
108522597dc3SPetr Mladek  * work immediately.
108622597dc3SPetr Mladek  *
108722597dc3SPetr Mladek  * Return: %false if the @work has already been pending. It means that
108822597dc3SPetr Mladek  * either the timer was running or the work was queued. It returns %true
108922597dc3SPetr Mladek  * otherwise.
109022597dc3SPetr Mladek  */
kthread_queue_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)109122597dc3SPetr Mladek bool kthread_queue_delayed_work(struct kthread_worker *worker,
109222597dc3SPetr Mladek 				struct kthread_delayed_work *dwork,
109322597dc3SPetr Mladek 				unsigned long delay)
109422597dc3SPetr Mladek {
109522597dc3SPetr Mladek 	struct kthread_work *work = &dwork->work;
109622597dc3SPetr Mladek 	unsigned long flags;
109722597dc3SPetr Mladek 	bool ret = false;
109822597dc3SPetr Mladek 
1099fe99a4f4SJulia Cartwright 	raw_spin_lock_irqsave(&worker->lock, flags);
110022597dc3SPetr Mladek 
110137be45d4SPetr Mladek 	if (!queuing_blocked(worker, work)) {
110222597dc3SPetr Mladek 		__kthread_queue_delayed_work(worker, dwork, delay);
110322597dc3SPetr Mladek 		ret = true;
110422597dc3SPetr Mladek 	}
110522597dc3SPetr Mladek 
1106fe99a4f4SJulia Cartwright 	raw_spin_unlock_irqrestore(&worker->lock, flags);
110722597dc3SPetr Mladek 	return ret;
110822597dc3SPetr Mladek }
110922597dc3SPetr Mladek EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
111022597dc3SPetr Mladek 
11119a2e03d8STejun Heo struct kthread_flush_work {
11129a2e03d8STejun Heo 	struct kthread_work	work;
11139a2e03d8STejun Heo 	struct completion	done;
11149a2e03d8STejun Heo };
11159a2e03d8STejun Heo 
kthread_flush_work_fn(struct kthread_work * work)11169a2e03d8STejun Heo static void kthread_flush_work_fn(struct kthread_work *work)
11179a2e03d8STejun Heo {
11189a2e03d8STejun Heo 	struct kthread_flush_work *fwork =
11199a2e03d8STejun Heo 		container_of(work, struct kthread_flush_work, work);
11209a2e03d8STejun Heo 	complete(&fwork->done);
11219a2e03d8STejun Heo }
11229a2e03d8STejun Heo 
1123b56c0d89STejun Heo /**
11243989144fSPetr Mladek  * kthread_flush_work - flush a kthread_work
1125b56c0d89STejun Heo  * @work: work to flush
1126b56c0d89STejun Heo  *
1127b56c0d89STejun Heo  * If @work is queued or executing, wait for it to finish execution.
1128b56c0d89STejun Heo  */
kthread_flush_work(struct kthread_work * work)11293989144fSPetr Mladek void kthread_flush_work(struct kthread_work *work)
1130b56c0d89STejun Heo {
113146f3d976STejun Heo 	struct kthread_flush_work fwork = {
113246f3d976STejun Heo 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
113346f3d976STejun Heo 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
113446f3d976STejun Heo 	};
113546f3d976STejun Heo 	struct kthread_worker *worker;
113646f3d976STejun Heo 	bool noop = false;
1137b56c0d89STejun Heo 
113846f3d976STejun Heo 	worker = work->worker;
113946f3d976STejun Heo 	if (!worker)
114046f3d976STejun Heo 		return;
1141b56c0d89STejun Heo 
1142fe99a4f4SJulia Cartwright 	raw_spin_lock_irq(&worker->lock);
11438197b3d4SPetr Mladek 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
11448197b3d4SPetr Mladek 	WARN_ON_ONCE(work->worker != worker);
1145b56c0d89STejun Heo 
114646f3d976STejun Heo 	if (!list_empty(&work->node))
11473989144fSPetr Mladek 		kthread_insert_work(worker, &fwork.work, work->node.next);
114846f3d976STejun Heo 	else if (worker->current_work == work)
11493989144fSPetr Mladek 		kthread_insert_work(worker, &fwork.work,
11503989144fSPetr Mladek 				    worker->work_list.next);
115146f3d976STejun Heo 	else
115246f3d976STejun Heo 		noop = true;
1153b56c0d89STejun Heo 
1154fe99a4f4SJulia Cartwright 	raw_spin_unlock_irq(&worker->lock);
115546f3d976STejun Heo 
115646f3d976STejun Heo 	if (!noop)
115746f3d976STejun Heo 		wait_for_completion(&fwork.done);
1158b56c0d89STejun Heo }
11593989144fSPetr Mladek EXPORT_SYMBOL_GPL(kthread_flush_work);
1160b56c0d89STejun Heo 
116137be45d4SPetr Mladek /*
116234b3d534SPetr Mladek  * Make sure that the timer is neither set nor running and could
116334b3d534SPetr Mladek  * not manipulate the work list_head any longer.
116437be45d4SPetr Mladek  *
116534b3d534SPetr Mladek  * The function is called under worker->lock. The lock is temporary
116634b3d534SPetr Mladek  * released but the timer can't be set again in the meantime.
116737be45d4SPetr Mladek  */
kthread_cancel_delayed_work_timer(struct kthread_work * work,unsigned long * flags)116834b3d534SPetr Mladek static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
116937be45d4SPetr Mladek 					      unsigned long *flags)
117037be45d4SPetr Mladek {
117137be45d4SPetr Mladek 	struct kthread_delayed_work *dwork =
117237be45d4SPetr Mladek 		container_of(work, struct kthread_delayed_work, work);
117337be45d4SPetr Mladek 	struct kthread_worker *worker = work->worker;
117437be45d4SPetr Mladek 
117537be45d4SPetr Mladek 	/*
117637be45d4SPetr Mladek 	 * del_timer_sync() must be called to make sure that the timer
117737be45d4SPetr Mladek 	 * callback is not running. The lock must be temporary released
117837be45d4SPetr Mladek 	 * to avoid a deadlock with the callback. In the meantime,
117937be45d4SPetr Mladek 	 * any queuing is blocked by setting the canceling counter.
118037be45d4SPetr Mladek 	 */
118137be45d4SPetr Mladek 	work->canceling++;
1182fe99a4f4SJulia Cartwright 	raw_spin_unlock_irqrestore(&worker->lock, *flags);
118337be45d4SPetr Mladek 	del_timer_sync(&dwork->timer);
1184fe99a4f4SJulia Cartwright 	raw_spin_lock_irqsave(&worker->lock, *flags);
118537be45d4SPetr Mladek 	work->canceling--;
118637be45d4SPetr Mladek }
118737be45d4SPetr Mladek 
118837be45d4SPetr Mladek /*
11895fa54346SPetr Mladek  * This function removes the work from the worker queue.
11905fa54346SPetr Mladek  *
11915fa54346SPetr Mladek  * It is called under worker->lock. The caller must make sure that
11925fa54346SPetr Mladek  * the timer used by delayed work is not running, e.g. by calling
11935fa54346SPetr Mladek  * kthread_cancel_delayed_work_timer().
119434b3d534SPetr Mladek  *
119534b3d534SPetr Mladek  * The work might still be in use when this function finishes. See the
119634b3d534SPetr Mladek  * current_work proceed by the worker.
119734b3d534SPetr Mladek  *
119834b3d534SPetr Mladek  * Return: %true if @work was pending and successfully canceled,
119934b3d534SPetr Mladek  *	%false if @work was not pending
120034b3d534SPetr Mladek  */
__kthread_cancel_work(struct kthread_work * work)12015fa54346SPetr Mladek static bool __kthread_cancel_work(struct kthread_work *work)
120234b3d534SPetr Mladek {
120334b3d534SPetr Mladek 	/*
120437be45d4SPetr Mladek 	 * Try to remove the work from a worker list. It might either
120537be45d4SPetr Mladek 	 * be from worker->work_list or from worker->delayed_work_list.
120637be45d4SPetr Mladek 	 */
120737be45d4SPetr Mladek 	if (!list_empty(&work->node)) {
120837be45d4SPetr Mladek 		list_del_init(&work->node);
120937be45d4SPetr Mladek 		return true;
121037be45d4SPetr Mladek 	}
121137be45d4SPetr Mladek 
121237be45d4SPetr Mladek 	return false;
121337be45d4SPetr Mladek }
121437be45d4SPetr Mladek 
12159a6b06c8SPetr Mladek /**
12169a6b06c8SPetr Mladek  * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
12179a6b06c8SPetr Mladek  * @worker: kthread worker to use
12189a6b06c8SPetr Mladek  * @dwork: kthread delayed work to queue
12199a6b06c8SPetr Mladek  * @delay: number of jiffies to wait before queuing
12209a6b06c8SPetr Mladek  *
12219a6b06c8SPetr Mladek  * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
12229a6b06c8SPetr Mladek  * modify @dwork's timer so that it expires after @delay. If @delay is zero,
12239a6b06c8SPetr Mladek  * @work is guaranteed to be queued immediately.
12249a6b06c8SPetr Mladek  *
1225d71ba164SPetr Mladek  * Return: %false if @dwork was idle and queued, %true otherwise.
12269a6b06c8SPetr Mladek  *
12279a6b06c8SPetr Mladek  * A special case is when the work is being canceled in parallel.
12289a6b06c8SPetr Mladek  * It might be caused either by the real kthread_cancel_delayed_work_sync()
12299a6b06c8SPetr Mladek  * or yet another kthread_mod_delayed_work() call. We let the other command
1230d71ba164SPetr Mladek  * win and return %true here. The return value can be used for reference
1231d71ba164SPetr Mladek  * counting and the number of queued works stays the same. Anyway, the caller
1232d71ba164SPetr Mladek  * is supposed to synchronize these operations a reasonable way.
12339a6b06c8SPetr Mladek  *
12349a6b06c8SPetr Mladek  * This function is safe to call from any context including IRQ handler.
12359a6b06c8SPetr Mladek  * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
12369a6b06c8SPetr Mladek  * for details.
12379a6b06c8SPetr Mladek  */
kthread_mod_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)12389a6b06c8SPetr Mladek bool kthread_mod_delayed_work(struct kthread_worker *worker,
12399a6b06c8SPetr Mladek 			      struct kthread_delayed_work *dwork,
12409a6b06c8SPetr Mladek 			      unsigned long delay)
12419a6b06c8SPetr Mladek {
12429a6b06c8SPetr Mladek 	struct kthread_work *work = &dwork->work;
12439a6b06c8SPetr Mladek 	unsigned long flags;
1244d71ba164SPetr Mladek 	int ret;
12459a6b06c8SPetr Mladek 
1246fe99a4f4SJulia Cartwright 	raw_spin_lock_irqsave(&worker->lock, flags);
12479a6b06c8SPetr Mladek 
12489a6b06c8SPetr Mladek 	/* Do not bother with canceling when never queued. */
1249d71ba164SPetr Mladek 	if (!work->worker) {
1250d71ba164SPetr Mladek 		ret = false;
12519a6b06c8SPetr Mladek 		goto fast_queue;
1252d71ba164SPetr Mladek 	}
12539a6b06c8SPetr Mladek 
12549a6b06c8SPetr Mladek 	/* Work must not be used with >1 worker, see kthread_queue_work() */
12559a6b06c8SPetr Mladek 	WARN_ON_ONCE(work->worker != worker);
12569a6b06c8SPetr Mladek 
12575fa54346SPetr Mladek 	/*
12585fa54346SPetr Mladek 	 * Temporary cancel the work but do not fight with another command
12595fa54346SPetr Mladek 	 * that is canceling the work as well.
12605fa54346SPetr Mladek 	 *
12615fa54346SPetr Mladek 	 * It is a bit tricky because of possible races with another
12625fa54346SPetr Mladek 	 * mod_delayed_work() and cancel_delayed_work() callers.
12635fa54346SPetr Mladek 	 *
12645fa54346SPetr Mladek 	 * The timer must be canceled first because worker->lock is released
12655fa54346SPetr Mladek 	 * when doing so. But the work can be removed from the queue (list)
12665fa54346SPetr Mladek 	 * only when it can be queued again so that the return value can
12675fa54346SPetr Mladek 	 * be used for reference counting.
12685fa54346SPetr Mladek 	 */
12695fa54346SPetr Mladek 	kthread_cancel_delayed_work_timer(work, &flags);
1270d71ba164SPetr Mladek 	if (work->canceling) {
1271d71ba164SPetr Mladek 		/* The number of works in the queue does not change. */
1272d71ba164SPetr Mladek 		ret = true;
12739a6b06c8SPetr Mladek 		goto out;
1274d71ba164SPetr Mladek 	}
12755fa54346SPetr Mladek 	ret = __kthread_cancel_work(work);
12769a6b06c8SPetr Mladek 
12779a6b06c8SPetr Mladek fast_queue:
12789a6b06c8SPetr Mladek 	__kthread_queue_delayed_work(worker, dwork, delay);
12799a6b06c8SPetr Mladek out:
1280fe99a4f4SJulia Cartwright 	raw_spin_unlock_irqrestore(&worker->lock, flags);
12819a6b06c8SPetr Mladek 	return ret;
12829a6b06c8SPetr Mladek }
12839a6b06c8SPetr Mladek EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
12849a6b06c8SPetr Mladek 
__kthread_cancel_work_sync(struct kthread_work * work,bool is_dwork)128537be45d4SPetr Mladek static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
128637be45d4SPetr Mladek {
128737be45d4SPetr Mladek 	struct kthread_worker *worker = work->worker;
128837be45d4SPetr Mladek 	unsigned long flags;
128937be45d4SPetr Mladek 	int ret = false;
129037be45d4SPetr Mladek 
129137be45d4SPetr Mladek 	if (!worker)
129237be45d4SPetr Mladek 		goto out;
129337be45d4SPetr Mladek 
1294fe99a4f4SJulia Cartwright 	raw_spin_lock_irqsave(&worker->lock, flags);
129537be45d4SPetr Mladek 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
129637be45d4SPetr Mladek 	WARN_ON_ONCE(work->worker != worker);
129737be45d4SPetr Mladek 
12985fa54346SPetr Mladek 	if (is_dwork)
12995fa54346SPetr Mladek 		kthread_cancel_delayed_work_timer(work, &flags);
13005fa54346SPetr Mladek 
13015fa54346SPetr Mladek 	ret = __kthread_cancel_work(work);
130237be45d4SPetr Mladek 
130337be45d4SPetr Mladek 	if (worker->current_work != work)
130437be45d4SPetr Mladek 		goto out_fast;
130537be45d4SPetr Mladek 
130637be45d4SPetr Mladek 	/*
130737be45d4SPetr Mladek 	 * The work is in progress and we need to wait with the lock released.
130837be45d4SPetr Mladek 	 * In the meantime, block any queuing by setting the canceling counter.
130937be45d4SPetr Mladek 	 */
131037be45d4SPetr Mladek 	work->canceling++;
1311fe99a4f4SJulia Cartwright 	raw_spin_unlock_irqrestore(&worker->lock, flags);
131237be45d4SPetr Mladek 	kthread_flush_work(work);
1313fe99a4f4SJulia Cartwright 	raw_spin_lock_irqsave(&worker->lock, flags);
131437be45d4SPetr Mladek 	work->canceling--;
131537be45d4SPetr Mladek 
131637be45d4SPetr Mladek out_fast:
1317fe99a4f4SJulia Cartwright 	raw_spin_unlock_irqrestore(&worker->lock, flags);
131837be45d4SPetr Mladek out:
131937be45d4SPetr Mladek 	return ret;
132037be45d4SPetr Mladek }
132137be45d4SPetr Mladek 
132237be45d4SPetr Mladek /**
132337be45d4SPetr Mladek  * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
132437be45d4SPetr Mladek  * @work: the kthread work to cancel
132537be45d4SPetr Mladek  *
132637be45d4SPetr Mladek  * Cancel @work and wait for its execution to finish.  This function
132737be45d4SPetr Mladek  * can be used even if the work re-queues itself. On return from this
132837be45d4SPetr Mladek  * function, @work is guaranteed to be not pending or executing on any CPU.
132937be45d4SPetr Mladek  *
133037be45d4SPetr Mladek  * kthread_cancel_work_sync(&delayed_work->work) must not be used for
133137be45d4SPetr Mladek  * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
133237be45d4SPetr Mladek  *
133337be45d4SPetr Mladek  * The caller must ensure that the worker on which @work was last
133437be45d4SPetr Mladek  * queued can't be destroyed before this function returns.
133537be45d4SPetr Mladek  *
133637be45d4SPetr Mladek  * Return: %true if @work was pending, %false otherwise.
133737be45d4SPetr Mladek  */
kthread_cancel_work_sync(struct kthread_work * work)133837be45d4SPetr Mladek bool kthread_cancel_work_sync(struct kthread_work *work)
133937be45d4SPetr Mladek {
134037be45d4SPetr Mladek 	return __kthread_cancel_work_sync(work, false);
134137be45d4SPetr Mladek }
134237be45d4SPetr Mladek EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
134337be45d4SPetr Mladek 
134437be45d4SPetr Mladek /**
134537be45d4SPetr Mladek  * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
134637be45d4SPetr Mladek  *	wait for it to finish.
134737be45d4SPetr Mladek  * @dwork: the kthread delayed work to cancel
134837be45d4SPetr Mladek  *
134937be45d4SPetr Mladek  * This is kthread_cancel_work_sync() for delayed works.
135037be45d4SPetr Mladek  *
135137be45d4SPetr Mladek  * Return: %true if @dwork was pending, %false otherwise.
135237be45d4SPetr Mladek  */
kthread_cancel_delayed_work_sync(struct kthread_delayed_work * dwork)135337be45d4SPetr Mladek bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
135437be45d4SPetr Mladek {
135537be45d4SPetr Mladek 	return __kthread_cancel_work_sync(&dwork->work, true);
135637be45d4SPetr Mladek }
135737be45d4SPetr Mladek EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
135837be45d4SPetr Mladek 
1359b56c0d89STejun Heo /**
13603989144fSPetr Mladek  * kthread_flush_worker - flush all current works on a kthread_worker
1361b56c0d89STejun Heo  * @worker: worker to flush
1362b56c0d89STejun Heo  *
1363b56c0d89STejun Heo  * Wait until all currently executing or pending works on @worker are
1364b56c0d89STejun Heo  * finished.
1365b56c0d89STejun Heo  */
kthread_flush_worker(struct kthread_worker * worker)13663989144fSPetr Mladek void kthread_flush_worker(struct kthread_worker *worker)
1367b56c0d89STejun Heo {
1368b56c0d89STejun Heo 	struct kthread_flush_work fwork = {
1369b56c0d89STejun Heo 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1370b56c0d89STejun Heo 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1371b56c0d89STejun Heo 	};
1372b56c0d89STejun Heo 
13733989144fSPetr Mladek 	kthread_queue_work(worker, &fwork.work);
1374b56c0d89STejun Heo 	wait_for_completion(&fwork.done);
1375b56c0d89STejun Heo }
13763989144fSPetr Mladek EXPORT_SYMBOL_GPL(kthread_flush_worker);
137735033fe9SPetr Mladek 
137835033fe9SPetr Mladek /**
137935033fe9SPetr Mladek  * kthread_destroy_worker - destroy a kthread worker
138035033fe9SPetr Mladek  * @worker: worker to be destroyed
138135033fe9SPetr Mladek  *
138235033fe9SPetr Mladek  * Flush and destroy @worker.  The simple flush is enough because the kthread
138335033fe9SPetr Mladek  * worker API is used only in trivial scenarios.  There are no multi-step state
138435033fe9SPetr Mladek  * machines needed.
1385eb79fa7eSZqiang  *
1386eb79fa7eSZqiang  * Note that this function is not responsible for handling delayed work, so
1387eb79fa7eSZqiang  * caller should be responsible for queuing or canceling all delayed work items
1388eb79fa7eSZqiang  * before invoke this function.
138935033fe9SPetr Mladek  */
kthread_destroy_worker(struct kthread_worker * worker)139035033fe9SPetr Mladek void kthread_destroy_worker(struct kthread_worker *worker)
139135033fe9SPetr Mladek {
139235033fe9SPetr Mladek 	struct task_struct *task;
139335033fe9SPetr Mladek 
139435033fe9SPetr Mladek 	task = worker->task;
139535033fe9SPetr Mladek 	if (WARN_ON(!task))
139635033fe9SPetr Mladek 		return;
139735033fe9SPetr Mladek 
139835033fe9SPetr Mladek 	kthread_flush_worker(worker);
139935033fe9SPetr Mladek 	kthread_stop(task);
1400eb79fa7eSZqiang 	WARN_ON(!list_empty(&worker->delayed_work_list));
140135033fe9SPetr Mladek 	WARN_ON(!list_empty(&worker->work_list));
140235033fe9SPetr Mladek 	kfree(worker);
140335033fe9SPetr Mladek }
140435033fe9SPetr Mladek EXPORT_SYMBOL(kthread_destroy_worker);
140505e3db95SShaohua Li 
1406f5678e7fSChristoph Hellwig /**
1407f5678e7fSChristoph Hellwig  * kthread_use_mm - make the calling kthread operate on an address space
1408f5678e7fSChristoph Hellwig  * @mm: address space to operate on
14099bf5b9ebSChristoph Hellwig  */
kthread_use_mm(struct mm_struct * mm)1410f5678e7fSChristoph Hellwig void kthread_use_mm(struct mm_struct *mm)
14119bf5b9ebSChristoph Hellwig {
14129bf5b9ebSChristoph Hellwig 	struct mm_struct *active_mm;
14139bf5b9ebSChristoph Hellwig 	struct task_struct *tsk = current;
14149bf5b9ebSChristoph Hellwig 
1415f5678e7fSChristoph Hellwig 	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1416f5678e7fSChristoph Hellwig 	WARN_ON_ONCE(tsk->mm);
1417f5678e7fSChristoph Hellwig 
1418aa464ba9SNicholas Piggin 	/*
1419aa464ba9SNicholas Piggin 	 * It is possible for mm to be the same as tsk->active_mm, but
1420aa464ba9SNicholas Piggin 	 * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm),
1421aa464ba9SNicholas Piggin 	 * because these references are not equivalent.
1422aa464ba9SNicholas Piggin 	 */
14236cad87b0SNicholas Piggin 	mmgrab(mm);
14246cad87b0SNicholas Piggin 
14259bf5b9ebSChristoph Hellwig 	task_lock(tsk);
142638cf307cSPeter Zijlstra 	/* Hold off tlb flush IPIs while switching mm's */
142738cf307cSPeter Zijlstra 	local_irq_disable();
14289bf5b9ebSChristoph Hellwig 	active_mm = tsk->active_mm;
14299bf5b9ebSChristoph Hellwig 	tsk->active_mm = mm;
14309bf5b9ebSChristoph Hellwig 	tsk->mm = mm;
1431618758edSMathieu Desnoyers 	membarrier_update_current_mm(mm);
143238cf307cSPeter Zijlstra 	switch_mm_irqs_off(active_mm, mm, tsk);
143338cf307cSPeter Zijlstra 	local_irq_enable();
14349bf5b9ebSChristoph Hellwig 	task_unlock(tsk);
14359bf5b9ebSChristoph Hellwig #ifdef finish_arch_post_lock_switch
14369bf5b9ebSChristoph Hellwig 	finish_arch_post_lock_switch();
14379bf5b9ebSChristoph Hellwig #endif
14389bf5b9ebSChristoph Hellwig 
1439618758edSMathieu Desnoyers 	/*
1440618758edSMathieu Desnoyers 	 * When a kthread starts operating on an address space, the loop
1441618758edSMathieu Desnoyers 	 * in membarrier_{private,global}_expedited() may not observe
1442618758edSMathieu Desnoyers 	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1443618758edSMathieu Desnoyers 	 * memory barrier after storing to tsk->mm, before accessing
1444618758edSMathieu Desnoyers 	 * user-space memory. A full memory barrier for membarrier
1445618758edSMathieu Desnoyers 	 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1446aa464ba9SNicholas Piggin 	 * mmdrop_lazy_tlb().
1447618758edSMathieu Desnoyers 	 */
1448aa464ba9SNicholas Piggin 	mmdrop_lazy_tlb(active_mm);
14499bf5b9ebSChristoph Hellwig }
1450f5678e7fSChristoph Hellwig EXPORT_SYMBOL_GPL(kthread_use_mm);
14519bf5b9ebSChristoph Hellwig 
1452f5678e7fSChristoph Hellwig /**
1453f5678e7fSChristoph Hellwig  * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1454f5678e7fSChristoph Hellwig  * @mm: address space to operate on
14559bf5b9ebSChristoph Hellwig  */
kthread_unuse_mm(struct mm_struct * mm)1456f5678e7fSChristoph Hellwig void kthread_unuse_mm(struct mm_struct *mm)
14579bf5b9ebSChristoph Hellwig {
14589bf5b9ebSChristoph Hellwig 	struct task_struct *tsk = current;
14599bf5b9ebSChristoph Hellwig 
1460f5678e7fSChristoph Hellwig 	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1461f5678e7fSChristoph Hellwig 	WARN_ON_ONCE(!tsk->mm);
1462f5678e7fSChristoph Hellwig 
14639bf5b9ebSChristoph Hellwig 	task_lock(tsk);
1464618758edSMathieu Desnoyers 	/*
1465618758edSMathieu Desnoyers 	 * When a kthread stops operating on an address space, the loop
1466618758edSMathieu Desnoyers 	 * in membarrier_{private,global}_expedited() may not observe
1467618758edSMathieu Desnoyers 	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1468618758edSMathieu Desnoyers 	 * memory barrier after accessing user-space memory, before
1469618758edSMathieu Desnoyers 	 * clearing tsk->mm.
1470618758edSMathieu Desnoyers 	 */
1471618758edSMathieu Desnoyers 	smp_mb__after_spinlock();
14729bf5b9ebSChristoph Hellwig 	sync_mm_rss(mm);
147338cf307cSPeter Zijlstra 	local_irq_disable();
14749bf5b9ebSChristoph Hellwig 	tsk->mm = NULL;
1475618758edSMathieu Desnoyers 	membarrier_update_current_mm(NULL);
1476aa464ba9SNicholas Piggin 	mmgrab_lazy_tlb(mm);
14779bf5b9ebSChristoph Hellwig 	/* active_mm is still 'mm' */
14789bf5b9ebSChristoph Hellwig 	enter_lazy_tlb(mm, tsk);
147938cf307cSPeter Zijlstra 	local_irq_enable();
14809bf5b9ebSChristoph Hellwig 	task_unlock(tsk);
1481aa464ba9SNicholas Piggin 
1482aa464ba9SNicholas Piggin 	mmdrop(mm);
14839bf5b9ebSChristoph Hellwig }
1484f5678e7fSChristoph Hellwig EXPORT_SYMBOL_GPL(kthread_unuse_mm);
14859bf5b9ebSChristoph Hellwig 
14860b508bc9SShaohua Li #ifdef CONFIG_BLK_CGROUP
148705e3db95SShaohua Li /**
148805e3db95SShaohua Li  * kthread_associate_blkcg - associate blkcg to current kthread
148905e3db95SShaohua Li  * @css: the cgroup info
149005e3db95SShaohua Li  *
149105e3db95SShaohua Li  * Current thread must be a kthread. The thread is running jobs on behalf of
149205e3db95SShaohua Li  * other threads. In some cases, we expect the jobs attach cgroup info of
149305e3db95SShaohua Li  * original threads instead of that of current thread. This function stores
149405e3db95SShaohua Li  * original thread's cgroup info in current kthread context for later
149505e3db95SShaohua Li  * retrieval.
149605e3db95SShaohua Li  */
kthread_associate_blkcg(struct cgroup_subsys_state * css)149705e3db95SShaohua Li void kthread_associate_blkcg(struct cgroup_subsys_state *css)
149805e3db95SShaohua Li {
149905e3db95SShaohua Li 	struct kthread *kthread;
150005e3db95SShaohua Li 
150105e3db95SShaohua Li 	if (!(current->flags & PF_KTHREAD))
150205e3db95SShaohua Li 		return;
150305e3db95SShaohua Li 	kthread = to_kthread(current);
150405e3db95SShaohua Li 	if (!kthread)
150505e3db95SShaohua Li 		return;
150605e3db95SShaohua Li 
150705e3db95SShaohua Li 	if (kthread->blkcg_css) {
150805e3db95SShaohua Li 		css_put(kthread->blkcg_css);
150905e3db95SShaohua Li 		kthread->blkcg_css = NULL;
151005e3db95SShaohua Li 	}
151105e3db95SShaohua Li 	if (css) {
151205e3db95SShaohua Li 		css_get(css);
151305e3db95SShaohua Li 		kthread->blkcg_css = css;
151405e3db95SShaohua Li 	}
151505e3db95SShaohua Li }
151605e3db95SShaohua Li EXPORT_SYMBOL(kthread_associate_blkcg);
151705e3db95SShaohua Li 
151805e3db95SShaohua Li /**
151905e3db95SShaohua Li  * kthread_blkcg - get associated blkcg css of current kthread
152005e3db95SShaohua Li  *
152105e3db95SShaohua Li  * Current thread must be a kthread.
152205e3db95SShaohua Li  */
kthread_blkcg(void)152305e3db95SShaohua Li struct cgroup_subsys_state *kthread_blkcg(void)
152405e3db95SShaohua Li {
152505e3db95SShaohua Li 	struct kthread *kthread;
152605e3db95SShaohua Li 
152705e3db95SShaohua Li 	if (current->flags & PF_KTHREAD) {
152805e3db95SShaohua Li 		kthread = to_kthread(current);
152905e3db95SShaohua Li 		if (kthread)
153005e3db95SShaohua Li 			return kthread->blkcg_css;
153105e3db95SShaohua Li 	}
153205e3db95SShaohua Li 	return NULL;
153305e3db95SShaohua Li }
153405e3db95SShaohua Li #endif
1535