xref: /openbmc/linux/kernel/kthread.c (revision 4ecdafc8)
11da177e4SLinus Torvalds /* Kernel thread helper functions.
21da177e4SLinus Torvalds  *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
31da177e4SLinus Torvalds  *
473c27992SEric W. Biederman  * Creation is done via kthreadd, so that we get a clean environment
51da177e4SLinus Torvalds  * even if we're invoked from userspace (think modprobe, hotplug cpu,
61da177e4SLinus Torvalds  * etc.).
71da177e4SLinus Torvalds  */
81da177e4SLinus Torvalds #include <linux/sched.h>
91da177e4SLinus Torvalds #include <linux/kthread.h>
101da177e4SLinus Torvalds #include <linux/completion.h>
111da177e4SLinus Torvalds #include <linux/err.h>
1258568d2aSMiao Xie #include <linux/cpuset.h>
131da177e4SLinus Torvalds #include <linux/unistd.h>
141da177e4SLinus Torvalds #include <linux/file.h>
159984de1aSPaul Gortmaker #include <linux/export.h>
1697d1f15bSArjan van de Ven #include <linux/mutex.h>
17b56c0d89STejun Heo #include <linux/slab.h>
18b56c0d89STejun Heo #include <linux/freezer.h>
19a74fb73cSAl Viro #include <linux/ptrace.h>
20ad8d75ffSSteven Rostedt #include <trace/events/sched.h>
211da177e4SLinus Torvalds 
2273c27992SEric W. Biederman static DEFINE_SPINLOCK(kthread_create_lock);
2373c27992SEric W. Biederman static LIST_HEAD(kthread_create_list);
2473c27992SEric W. Biederman struct task_struct *kthreadd_task;
251da177e4SLinus Torvalds 
261da177e4SLinus Torvalds struct kthread_create_info
271da177e4SLinus Torvalds {
2873c27992SEric W. Biederman 	/* Information passed to kthread() from kthreadd. */
291da177e4SLinus Torvalds 	int (*threadfn)(void *data);
301da177e4SLinus Torvalds 	void *data;
31207205a2SEric Dumazet 	int node;
321da177e4SLinus Torvalds 
3373c27992SEric W. Biederman 	/* Result passed back to kthread_create() from kthreadd. */
341da177e4SLinus Torvalds 	struct task_struct *result;
351da177e4SLinus Torvalds 	struct completion done;
3665f27f38SDavid Howells 
3773c27992SEric W. Biederman 	struct list_head list;
381da177e4SLinus Torvalds };
391da177e4SLinus Torvalds 
4063706172SOleg Nesterov struct kthread {
412a1d4460SThomas Gleixner 	unsigned long flags;
422a1d4460SThomas Gleixner 	unsigned int cpu;
4382805ab7STejun Heo 	void *data;
442a1d4460SThomas Gleixner 	struct completion parked;
4563706172SOleg Nesterov 	struct completion exited;
461da177e4SLinus Torvalds };
471da177e4SLinus Torvalds 
482a1d4460SThomas Gleixner enum KTHREAD_BITS {
492a1d4460SThomas Gleixner 	KTHREAD_IS_PER_CPU = 0,
502a1d4460SThomas Gleixner 	KTHREAD_SHOULD_STOP,
512a1d4460SThomas Gleixner 	KTHREAD_SHOULD_PARK,
522a1d4460SThomas Gleixner 	KTHREAD_IS_PARKED,
532a1d4460SThomas Gleixner };
542a1d4460SThomas Gleixner 
554ecdafc8SOleg Nesterov #define __to_kthread(vfork)	\
564ecdafc8SOleg Nesterov 	container_of(vfork, struct kthread, exited)
574ecdafc8SOleg Nesterov 
584ecdafc8SOleg Nesterov static inline struct kthread *to_kthread(struct task_struct *k)
594ecdafc8SOleg Nesterov {
604ecdafc8SOleg Nesterov 	return __to_kthread(k->vfork_done);
614ecdafc8SOleg Nesterov }
624ecdafc8SOleg Nesterov 
634ecdafc8SOleg Nesterov static struct kthread *to_live_kthread(struct task_struct *k)
644ecdafc8SOleg Nesterov {
654ecdafc8SOleg Nesterov 	struct completion *vfork = ACCESS_ONCE(k->vfork_done);
664ecdafc8SOleg Nesterov 	if (likely(vfork))
674ecdafc8SOleg Nesterov 		return __to_kthread(vfork);
684ecdafc8SOleg Nesterov 	return NULL;
694ecdafc8SOleg Nesterov }
701da177e4SLinus Torvalds 
719e37bd30SRandy Dunlap /**
729e37bd30SRandy Dunlap  * kthread_should_stop - should this kthread return now?
739e37bd30SRandy Dunlap  *
7472fd4a35SRobert P. J. Day  * When someone calls kthread_stop() on your kthread, it will be woken
759e37bd30SRandy Dunlap  * and this will return true.  You should then return, and your return
769e37bd30SRandy Dunlap  * value will be passed through to kthread_stop().
779e37bd30SRandy Dunlap  */
782a1d4460SThomas Gleixner bool kthread_should_stop(void)
791da177e4SLinus Torvalds {
802a1d4460SThomas Gleixner 	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
811da177e4SLinus Torvalds }
821da177e4SLinus Torvalds EXPORT_SYMBOL(kthread_should_stop);
831da177e4SLinus Torvalds 
8482805ab7STejun Heo /**
852a1d4460SThomas Gleixner  * kthread_should_park - should this kthread park now?
862a1d4460SThomas Gleixner  *
872a1d4460SThomas Gleixner  * When someone calls kthread_park() on your kthread, it will be woken
882a1d4460SThomas Gleixner  * and this will return true.  You should then do the necessary
892a1d4460SThomas Gleixner  * cleanup and call kthread_parkme()
902a1d4460SThomas Gleixner  *
912a1d4460SThomas Gleixner  * Similar to kthread_should_stop(), but this keeps the thread alive
922a1d4460SThomas Gleixner  * and in a park position. kthread_unpark() "restarts" the thread and
932a1d4460SThomas Gleixner  * calls the thread function again.
942a1d4460SThomas Gleixner  */
952a1d4460SThomas Gleixner bool kthread_should_park(void)
962a1d4460SThomas Gleixner {
972a1d4460SThomas Gleixner 	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
982a1d4460SThomas Gleixner }
992a1d4460SThomas Gleixner 
1002a1d4460SThomas Gleixner /**
1018a32c441STejun Heo  * kthread_freezable_should_stop - should this freezable kthread return now?
1028a32c441STejun Heo  * @was_frozen: optional out parameter, indicates whether %current was frozen
1038a32c441STejun Heo  *
1048a32c441STejun Heo  * kthread_should_stop() for freezable kthreads, which will enter
1058a32c441STejun Heo  * refrigerator if necessary.  This function is safe from kthread_stop() /
1068a32c441STejun Heo  * freezer deadlock and freezable kthreads should use this function instead
1078a32c441STejun Heo  * of calling try_to_freeze() directly.
1088a32c441STejun Heo  */
1098a32c441STejun Heo bool kthread_freezable_should_stop(bool *was_frozen)
1108a32c441STejun Heo {
1118a32c441STejun Heo 	bool frozen = false;
1128a32c441STejun Heo 
1138a32c441STejun Heo 	might_sleep();
1148a32c441STejun Heo 
1158a32c441STejun Heo 	if (unlikely(freezing(current)))
1168a32c441STejun Heo 		frozen = __refrigerator(true);
1178a32c441STejun Heo 
1188a32c441STejun Heo 	if (was_frozen)
1198a32c441STejun Heo 		*was_frozen = frozen;
1208a32c441STejun Heo 
1218a32c441STejun Heo 	return kthread_should_stop();
1228a32c441STejun Heo }
1238a32c441STejun Heo EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
1248a32c441STejun Heo 
1258a32c441STejun Heo /**
12682805ab7STejun Heo  * kthread_data - return data value specified on kthread creation
12782805ab7STejun Heo  * @task: kthread task in question
12882805ab7STejun Heo  *
12982805ab7STejun Heo  * Return the data value specified when kthread @task was created.
13082805ab7STejun Heo  * The caller is responsible for ensuring the validity of @task when
13182805ab7STejun Heo  * calling this function.
13282805ab7STejun Heo  */
13382805ab7STejun Heo void *kthread_data(struct task_struct *task)
13482805ab7STejun Heo {
13582805ab7STejun Heo 	return to_kthread(task)->data;
13682805ab7STejun Heo }
13782805ab7STejun Heo 
1382a1d4460SThomas Gleixner static void __kthread_parkme(struct kthread *self)
1392a1d4460SThomas Gleixner {
140f2530dc7SThomas Gleixner 	__set_current_state(TASK_PARKED);
1412a1d4460SThomas Gleixner 	while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
1422a1d4460SThomas Gleixner 		if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
1432a1d4460SThomas Gleixner 			complete(&self->parked);
1442a1d4460SThomas Gleixner 		schedule();
145f2530dc7SThomas Gleixner 		__set_current_state(TASK_PARKED);
1462a1d4460SThomas Gleixner 	}
1472a1d4460SThomas Gleixner 	clear_bit(KTHREAD_IS_PARKED, &self->flags);
1482a1d4460SThomas Gleixner 	__set_current_state(TASK_RUNNING);
1492a1d4460SThomas Gleixner }
1502a1d4460SThomas Gleixner 
1512a1d4460SThomas Gleixner void kthread_parkme(void)
1522a1d4460SThomas Gleixner {
1532a1d4460SThomas Gleixner 	__kthread_parkme(to_kthread(current));
1542a1d4460SThomas Gleixner }
1552a1d4460SThomas Gleixner 
1561da177e4SLinus Torvalds static int kthread(void *_create)
1571da177e4SLinus Torvalds {
15873c27992SEric W. Biederman 	/* Copy data: it's on kthread's stack */
15963706172SOleg Nesterov 	struct kthread_create_info *create = _create;
16063706172SOleg Nesterov 	int (*threadfn)(void *data) = create->threadfn;
16163706172SOleg Nesterov 	void *data = create->data;
16263706172SOleg Nesterov 	struct kthread self;
16363706172SOleg Nesterov 	int ret;
16463706172SOleg Nesterov 
1652a1d4460SThomas Gleixner 	self.flags = 0;
16682805ab7STejun Heo 	self.data = data;
16763706172SOleg Nesterov 	init_completion(&self.exited);
1682a1d4460SThomas Gleixner 	init_completion(&self.parked);
16963706172SOleg Nesterov 	current->vfork_done = &self.exited;
1701da177e4SLinus Torvalds 
1711da177e4SLinus Torvalds 	/* OK, tell user we're spawned, wait for stop or wakeup */
172a076e4bcSOleg Nesterov 	__set_current_state(TASK_UNINTERRUPTIBLE);
1733217ab97SVitaliy Gusev 	create->result = current;
174cdd140bdSOleg Nesterov 	complete(&create->done);
1751da177e4SLinus Torvalds 	schedule();
1761da177e4SLinus Torvalds 
17763706172SOleg Nesterov 	ret = -EINTR;
1781da177e4SLinus Torvalds 
1792a1d4460SThomas Gleixner 	if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
1802a1d4460SThomas Gleixner 		__kthread_parkme(&self);
1812a1d4460SThomas Gleixner 		ret = threadfn(data);
1822a1d4460SThomas Gleixner 	}
18363706172SOleg Nesterov 	/* we can't just return, we must preserve "self" on stack */
18463706172SOleg Nesterov 	do_exit(ret);
1851da177e4SLinus Torvalds }
1861da177e4SLinus Torvalds 
187207205a2SEric Dumazet /* called from do_fork() to get node information for about to be created task */
188207205a2SEric Dumazet int tsk_fork_get_node(struct task_struct *tsk)
189207205a2SEric Dumazet {
190207205a2SEric Dumazet #ifdef CONFIG_NUMA
191207205a2SEric Dumazet 	if (tsk == kthreadd_task)
192207205a2SEric Dumazet 		return tsk->pref_node_fork;
193207205a2SEric Dumazet #endif
194207205a2SEric Dumazet 	return numa_node_id();
195207205a2SEric Dumazet }
196207205a2SEric Dumazet 
19773c27992SEric W. Biederman static void create_kthread(struct kthread_create_info *create)
1981da177e4SLinus Torvalds {
1991da177e4SLinus Torvalds 	int pid;
2001da177e4SLinus Torvalds 
201207205a2SEric Dumazet #ifdef CONFIG_NUMA
202207205a2SEric Dumazet 	current->pref_node_fork = create->node;
203207205a2SEric Dumazet #endif
2041da177e4SLinus Torvalds 	/* We want our own signal handler (we take no signals by default). */
2051da177e4SLinus Torvalds 	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
206cdd140bdSOleg Nesterov 	if (pid < 0) {
2071da177e4SLinus Torvalds 		create->result = ERR_PTR(pid);
2081da177e4SLinus Torvalds 		complete(&create->done);
2091da177e4SLinus Torvalds 	}
210cdd140bdSOleg Nesterov }
2111da177e4SLinus Torvalds 
2129e37bd30SRandy Dunlap /**
213207205a2SEric Dumazet  * kthread_create_on_node - create a kthread.
2149e37bd30SRandy Dunlap  * @threadfn: the function to run until signal_pending(current).
2159e37bd30SRandy Dunlap  * @data: data ptr for @threadfn.
216207205a2SEric Dumazet  * @node: memory node number.
2179e37bd30SRandy Dunlap  * @namefmt: printf-style name for the thread.
2189e37bd30SRandy Dunlap  *
2199e37bd30SRandy Dunlap  * Description: This helper function creates and names a kernel
2209e37bd30SRandy Dunlap  * thread.  The thread will be stopped: use wake_up_process() to start
221301ba045SAnton Blanchard  * it.  See also kthread_run().
2229e37bd30SRandy Dunlap  *
223207205a2SEric Dumazet  * If thread is going to be bound on a particular cpu, give its node
224207205a2SEric Dumazet  * in @node, to get NUMA affinity for kthread stack, or else give -1.
2259e37bd30SRandy Dunlap  * When woken, the thread will run @threadfn() with @data as its
22672fd4a35SRobert P. J. Day  * argument. @threadfn() can either call do_exit() directly if it is a
2279e37bd30SRandy Dunlap  * standalone thread for which no one will call kthread_stop(), or
2289e37bd30SRandy Dunlap  * return when 'kthread_should_stop()' is true (which means
2299e37bd30SRandy Dunlap  * kthread_stop() has been called).  The return value should be zero
2309e37bd30SRandy Dunlap  * or a negative error number; it will be passed to kthread_stop().
2319e37bd30SRandy Dunlap  *
2329e37bd30SRandy Dunlap  * Returns a task_struct or ERR_PTR(-ENOMEM).
2339e37bd30SRandy Dunlap  */
234207205a2SEric Dumazet struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
2352a1d4460SThomas Gleixner 					   void *data, int node,
2361da177e4SLinus Torvalds 					   const char namefmt[],
2371da177e4SLinus Torvalds 					   ...)
2381da177e4SLinus Torvalds {
2391da177e4SLinus Torvalds 	struct kthread_create_info create;
2401da177e4SLinus Torvalds 
2411da177e4SLinus Torvalds 	create.threadfn = threadfn;
2421da177e4SLinus Torvalds 	create.data = data;
243207205a2SEric Dumazet 	create.node = node;
2441da177e4SLinus Torvalds 	init_completion(&create.done);
2451da177e4SLinus Torvalds 
24673c27992SEric W. Biederman 	spin_lock(&kthread_create_lock);
24773c27992SEric W. Biederman 	list_add_tail(&create.list, &kthread_create_list);
24873c27992SEric W. Biederman 	spin_unlock(&kthread_create_lock);
24973c27992SEric W. Biederman 
250cbd9b67bSDmitry Adamushko 	wake_up_process(kthreadd_task);
2511da177e4SLinus Torvalds 	wait_for_completion(&create.done);
25273c27992SEric W. Biederman 
2531da177e4SLinus Torvalds 	if (!IS_ERR(create.result)) {
254c9b5f501SPeter Zijlstra 		static const struct sched_param param = { .sched_priority = 0 };
2551da177e4SLinus Torvalds 		va_list args;
2561c99315bSOleg Nesterov 
2571da177e4SLinus Torvalds 		va_start(args, namefmt);
2581da177e4SLinus Torvalds 		vsnprintf(create.result->comm, sizeof(create.result->comm),
2591da177e4SLinus Torvalds 			  namefmt, args);
2601da177e4SLinus Torvalds 		va_end(args);
2611c99315bSOleg Nesterov 		/*
2621c99315bSOleg Nesterov 		 * root may have changed our (kthreadd's) priority or CPU mask.
2631c99315bSOleg Nesterov 		 * The kernel thread should not inherit these properties.
2641c99315bSOleg Nesterov 		 */
2651c99315bSOleg Nesterov 		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
2661c99315bSOleg Nesterov 		set_cpus_allowed_ptr(create.result, cpu_all_mask);
2671da177e4SLinus Torvalds 	}
2681da177e4SLinus Torvalds 	return create.result;
2691da177e4SLinus Torvalds }
270207205a2SEric Dumazet EXPORT_SYMBOL(kthread_create_on_node);
2711da177e4SLinus Torvalds 
272f2530dc7SThomas Gleixner static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
2732a1d4460SThomas Gleixner {
274f2530dc7SThomas Gleixner 	/* Must have done schedule() in kthread() before we set_task_cpu */
275f2530dc7SThomas Gleixner 	if (!wait_task_inactive(p, state)) {
276f2530dc7SThomas Gleixner 		WARN_ON(1);
277f2530dc7SThomas Gleixner 		return;
278f2530dc7SThomas Gleixner 	}
2792a1d4460SThomas Gleixner 	/* It's safe because the task is inactive. */
2802a1d4460SThomas Gleixner 	do_set_cpus_allowed(p, cpumask_of(cpu));
2812a1d4460SThomas Gleixner 	p->flags |= PF_THREAD_BOUND;
2822a1d4460SThomas Gleixner }
2832a1d4460SThomas Gleixner 
2849e37bd30SRandy Dunlap /**
285881232b7SPeter Zijlstra  * kthread_bind - bind a just-created kthread to a cpu.
286881232b7SPeter Zijlstra  * @p: thread created by kthread_create().
287881232b7SPeter Zijlstra  * @cpu: cpu (might not be online, must be possible) for @k to run on.
288881232b7SPeter Zijlstra  *
289881232b7SPeter Zijlstra  * Description: This function is equivalent to set_cpus_allowed(),
290881232b7SPeter Zijlstra  * except that @cpu doesn't need to be online, and the thread must be
291881232b7SPeter Zijlstra  * stopped (i.e., just returned from kthread_create()).
292881232b7SPeter Zijlstra  */
293881232b7SPeter Zijlstra void kthread_bind(struct task_struct *p, unsigned int cpu)
294881232b7SPeter Zijlstra {
295f2530dc7SThomas Gleixner 	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
296881232b7SPeter Zijlstra }
297881232b7SPeter Zijlstra EXPORT_SYMBOL(kthread_bind);
298881232b7SPeter Zijlstra 
299881232b7SPeter Zijlstra /**
3002a1d4460SThomas Gleixner  * kthread_create_on_cpu - Create a cpu bound kthread
3012a1d4460SThomas Gleixner  * @threadfn: the function to run until signal_pending(current).
3022a1d4460SThomas Gleixner  * @data: data ptr for @threadfn.
3032a1d4460SThomas Gleixner  * @cpu: The cpu on which the thread should be bound,
3042a1d4460SThomas Gleixner  * @namefmt: printf-style name for the thread. Format is restricted
3052a1d4460SThomas Gleixner  *	     to "name.*%u". Code fills in cpu number.
3062a1d4460SThomas Gleixner  *
3072a1d4460SThomas Gleixner  * Description: This helper function creates and names a kernel thread
3082a1d4460SThomas Gleixner  * The thread will be woken and put into park mode.
3092a1d4460SThomas Gleixner  */
3102a1d4460SThomas Gleixner struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
3112a1d4460SThomas Gleixner 					  void *data, unsigned int cpu,
3122a1d4460SThomas Gleixner 					  const char *namefmt)
3132a1d4460SThomas Gleixner {
3142a1d4460SThomas Gleixner 	struct task_struct *p;
3152a1d4460SThomas Gleixner 
3162a1d4460SThomas Gleixner 	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
3172a1d4460SThomas Gleixner 				   cpu);
3182a1d4460SThomas Gleixner 	if (IS_ERR(p))
3192a1d4460SThomas Gleixner 		return p;
3202a1d4460SThomas Gleixner 	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
3212a1d4460SThomas Gleixner 	to_kthread(p)->cpu = cpu;
3222a1d4460SThomas Gleixner 	/* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
3232a1d4460SThomas Gleixner 	kthread_park(p);
3242a1d4460SThomas Gleixner 	return p;
3252a1d4460SThomas Gleixner }
3262a1d4460SThomas Gleixner 
3272a1d4460SThomas Gleixner static struct kthread *task_get_live_kthread(struct task_struct *k)
3282a1d4460SThomas Gleixner {
3292a1d4460SThomas Gleixner 	get_task_struct(k);
3304ecdafc8SOleg Nesterov 	return to_live_kthread(k);
3312a1d4460SThomas Gleixner }
3322a1d4460SThomas Gleixner 
333f2530dc7SThomas Gleixner static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
334f2530dc7SThomas Gleixner {
335f2530dc7SThomas Gleixner 	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
336f2530dc7SThomas Gleixner 	/*
337f2530dc7SThomas Gleixner 	 * We clear the IS_PARKED bit here as we don't wait
338f2530dc7SThomas Gleixner 	 * until the task has left the park code. So if we'd
339f2530dc7SThomas Gleixner 	 * park before that happens we'd see the IS_PARKED bit
340f2530dc7SThomas Gleixner 	 * which might be about to be cleared.
341f2530dc7SThomas Gleixner 	 */
342f2530dc7SThomas Gleixner 	if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
343f2530dc7SThomas Gleixner 		if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
344f2530dc7SThomas Gleixner 			__kthread_bind(k, kthread->cpu, TASK_PARKED);
345f2530dc7SThomas Gleixner 		wake_up_state(k, TASK_PARKED);
346f2530dc7SThomas Gleixner 	}
347f2530dc7SThomas Gleixner }
348f2530dc7SThomas Gleixner 
3492a1d4460SThomas Gleixner /**
3502a1d4460SThomas Gleixner  * kthread_unpark - unpark a thread created by kthread_create().
3512a1d4460SThomas Gleixner  * @k:		thread created by kthread_create().
3522a1d4460SThomas Gleixner  *
3532a1d4460SThomas Gleixner  * Sets kthread_should_park() for @k to return false, wakes it, and
3542a1d4460SThomas Gleixner  * waits for it to return. If the thread is marked percpu then its
3552a1d4460SThomas Gleixner  * bound to the cpu again.
3562a1d4460SThomas Gleixner  */
3572a1d4460SThomas Gleixner void kthread_unpark(struct task_struct *k)
3582a1d4460SThomas Gleixner {
3592a1d4460SThomas Gleixner 	struct kthread *kthread = task_get_live_kthread(k);
3602a1d4460SThomas Gleixner 
361f2530dc7SThomas Gleixner 	if (kthread)
362f2530dc7SThomas Gleixner 		__kthread_unpark(k, kthread);
3632a1d4460SThomas Gleixner 	put_task_struct(k);
3642a1d4460SThomas Gleixner }
3652a1d4460SThomas Gleixner 
3662a1d4460SThomas Gleixner /**
3672a1d4460SThomas Gleixner  * kthread_park - park a thread created by kthread_create().
3682a1d4460SThomas Gleixner  * @k: thread created by kthread_create().
3692a1d4460SThomas Gleixner  *
3702a1d4460SThomas Gleixner  * Sets kthread_should_park() for @k to return true, wakes it, and
3712a1d4460SThomas Gleixner  * waits for it to return. This can also be called after kthread_create()
3722a1d4460SThomas Gleixner  * instead of calling wake_up_process(): the thread will park without
3732a1d4460SThomas Gleixner  * calling threadfn().
3742a1d4460SThomas Gleixner  *
3752a1d4460SThomas Gleixner  * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
3762a1d4460SThomas Gleixner  * If called by the kthread itself just the park bit is set.
3772a1d4460SThomas Gleixner  */
3782a1d4460SThomas Gleixner int kthread_park(struct task_struct *k)
3792a1d4460SThomas Gleixner {
3802a1d4460SThomas Gleixner 	struct kthread *kthread = task_get_live_kthread(k);
3812a1d4460SThomas Gleixner 	int ret = -ENOSYS;
3822a1d4460SThomas Gleixner 
3832a1d4460SThomas Gleixner 	if (kthread) {
3842a1d4460SThomas Gleixner 		if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
3852a1d4460SThomas Gleixner 			set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
3862a1d4460SThomas Gleixner 			if (k != current) {
3872a1d4460SThomas Gleixner 				wake_up_process(k);
3882a1d4460SThomas Gleixner 				wait_for_completion(&kthread->parked);
3892a1d4460SThomas Gleixner 			}
3902a1d4460SThomas Gleixner 		}
3912a1d4460SThomas Gleixner 		ret = 0;
3922a1d4460SThomas Gleixner 	}
3932a1d4460SThomas Gleixner 	put_task_struct(k);
3942a1d4460SThomas Gleixner 	return ret;
3952a1d4460SThomas Gleixner }
3962a1d4460SThomas Gleixner 
3972a1d4460SThomas Gleixner /**
3989e37bd30SRandy Dunlap  * kthread_stop - stop a thread created by kthread_create().
3999e37bd30SRandy Dunlap  * @k: thread created by kthread_create().
4009e37bd30SRandy Dunlap  *
4019e37bd30SRandy Dunlap  * Sets kthread_should_stop() for @k to return true, wakes it, and
4029ae26027SOleg Nesterov  * waits for it to exit. This can also be called after kthread_create()
4039ae26027SOleg Nesterov  * instead of calling wake_up_process(): the thread will exit without
4049ae26027SOleg Nesterov  * calling threadfn().
4059ae26027SOleg Nesterov  *
4069ae26027SOleg Nesterov  * If threadfn() may call do_exit() itself, the caller must ensure
4079ae26027SOleg Nesterov  * task_struct can't go away.
4089e37bd30SRandy Dunlap  *
4099e37bd30SRandy Dunlap  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
4109e37bd30SRandy Dunlap  * was never called.
4119e37bd30SRandy Dunlap  */
4121da177e4SLinus Torvalds int kthread_stop(struct task_struct *k)
4131da177e4SLinus Torvalds {
4142a1d4460SThomas Gleixner 	struct kthread *kthread = task_get_live_kthread(k);
4151da177e4SLinus Torvalds 	int ret;
4161da177e4SLinus Torvalds 
41763706172SOleg Nesterov 	trace_sched_kthread_stop(k);
4182a1d4460SThomas Gleixner 	if (kthread) {
4192a1d4460SThomas Gleixner 		set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
420f2530dc7SThomas Gleixner 		__kthread_unpark(k, kthread);
4211da177e4SLinus Torvalds 		wake_up_process(k);
42263706172SOleg Nesterov 		wait_for_completion(&kthread->exited);
42363706172SOleg Nesterov 	}
42463706172SOleg Nesterov 	ret = k->exit_code;
42563706172SOleg Nesterov 
4261da177e4SLinus Torvalds 	put_task_struct(k);
4270a16b607SMathieu Desnoyers 	trace_sched_kthread_stop_ret(ret);
4280a16b607SMathieu Desnoyers 
4291da177e4SLinus Torvalds 	return ret;
4301da177e4SLinus Torvalds }
43152e92e57SAdrian Bunk EXPORT_SYMBOL(kthread_stop);
4321da177e4SLinus Torvalds 
433e804a4a4SSatyam Sharma int kthreadd(void *unused)
4341da177e4SLinus Torvalds {
43573c27992SEric W. Biederman 	struct task_struct *tsk = current;
43673c27992SEric W. Biederman 
437e804a4a4SSatyam Sharma 	/* Setup a clean context for our children to inherit. */
43873c27992SEric W. Biederman 	set_task_comm(tsk, "kthreadd");
43910ab825bSOleg Nesterov 	ignore_signals(tsk);
4401a2142afSRusty Russell 	set_cpus_allowed_ptr(tsk, cpu_all_mask);
441aee4faa4SLai Jiangshan 	set_mems_allowed(node_states[N_MEMORY]);
44273c27992SEric W. Biederman 
44334b087e4STejun Heo 	current->flags |= PF_NOFREEZE;
44473c27992SEric W. Biederman 
44573c27992SEric W. Biederman 	for (;;) {
44673c27992SEric W. Biederman 		set_current_state(TASK_INTERRUPTIBLE);
44773c27992SEric W. Biederman 		if (list_empty(&kthread_create_list))
44873c27992SEric W. Biederman 			schedule();
44973c27992SEric W. Biederman 		__set_current_state(TASK_RUNNING);
45073c27992SEric W. Biederman 
45173c27992SEric W. Biederman 		spin_lock(&kthread_create_lock);
45273c27992SEric W. Biederman 		while (!list_empty(&kthread_create_list)) {
45373c27992SEric W. Biederman 			struct kthread_create_info *create;
45473c27992SEric W. Biederman 
45573c27992SEric W. Biederman 			create = list_entry(kthread_create_list.next,
45673c27992SEric W. Biederman 					    struct kthread_create_info, list);
45773c27992SEric W. Biederman 			list_del_init(&create->list);
45873c27992SEric W. Biederman 			spin_unlock(&kthread_create_lock);
45973c27992SEric W. Biederman 
46073c27992SEric W. Biederman 			create_kthread(create);
46173c27992SEric W. Biederman 
46273c27992SEric W. Biederman 			spin_lock(&kthread_create_lock);
46373c27992SEric W. Biederman 		}
46473c27992SEric W. Biederman 		spin_unlock(&kthread_create_lock);
46573c27992SEric W. Biederman 	}
4661da177e4SLinus Torvalds 
4671da177e4SLinus Torvalds 	return 0;
4681da177e4SLinus Torvalds }
469b56c0d89STejun Heo 
4704f32e9b1SYong Zhang void __init_kthread_worker(struct kthread_worker *worker,
4714f32e9b1SYong Zhang 				const char *name,
4724f32e9b1SYong Zhang 				struct lock_class_key *key)
4734f32e9b1SYong Zhang {
4744f32e9b1SYong Zhang 	spin_lock_init(&worker->lock);
4754f32e9b1SYong Zhang 	lockdep_set_class_and_name(&worker->lock, key, name);
4764f32e9b1SYong Zhang 	INIT_LIST_HEAD(&worker->work_list);
4774f32e9b1SYong Zhang 	worker->task = NULL;
4784f32e9b1SYong Zhang }
4794f32e9b1SYong Zhang EXPORT_SYMBOL_GPL(__init_kthread_worker);
4804f32e9b1SYong Zhang 
481b56c0d89STejun Heo /**
482b56c0d89STejun Heo  * kthread_worker_fn - kthread function to process kthread_worker
483b56c0d89STejun Heo  * @worker_ptr: pointer to initialized kthread_worker
484b56c0d89STejun Heo  *
485b56c0d89STejun Heo  * This function can be used as @threadfn to kthread_create() or
486b56c0d89STejun Heo  * kthread_run() with @worker_ptr argument pointing to an initialized
487b56c0d89STejun Heo  * kthread_worker.  The started kthread will process work_list until
488b56c0d89STejun Heo  * the it is stopped with kthread_stop().  A kthread can also call
489b56c0d89STejun Heo  * this function directly after extra initialization.
490b56c0d89STejun Heo  *
491b56c0d89STejun Heo  * Different kthreads can be used for the same kthread_worker as long
492b56c0d89STejun Heo  * as there's only one kthread attached to it at any given time.  A
493b56c0d89STejun Heo  * kthread_worker without an attached kthread simply collects queued
494b56c0d89STejun Heo  * kthread_works.
495b56c0d89STejun Heo  */
496b56c0d89STejun Heo int kthread_worker_fn(void *worker_ptr)
497b56c0d89STejun Heo {
498b56c0d89STejun Heo 	struct kthread_worker *worker = worker_ptr;
499b56c0d89STejun Heo 	struct kthread_work *work;
500b56c0d89STejun Heo 
501b56c0d89STejun Heo 	WARN_ON(worker->task);
502b56c0d89STejun Heo 	worker->task = current;
503b56c0d89STejun Heo repeat:
504b56c0d89STejun Heo 	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
505b56c0d89STejun Heo 
506b56c0d89STejun Heo 	if (kthread_should_stop()) {
507b56c0d89STejun Heo 		__set_current_state(TASK_RUNNING);
508b56c0d89STejun Heo 		spin_lock_irq(&worker->lock);
509b56c0d89STejun Heo 		worker->task = NULL;
510b56c0d89STejun Heo 		spin_unlock_irq(&worker->lock);
511b56c0d89STejun Heo 		return 0;
512b56c0d89STejun Heo 	}
513b56c0d89STejun Heo 
514b56c0d89STejun Heo 	work = NULL;
515b56c0d89STejun Heo 	spin_lock_irq(&worker->lock);
516b56c0d89STejun Heo 	if (!list_empty(&worker->work_list)) {
517b56c0d89STejun Heo 		work = list_first_entry(&worker->work_list,
518b56c0d89STejun Heo 					struct kthread_work, node);
519b56c0d89STejun Heo 		list_del_init(&work->node);
520b56c0d89STejun Heo 	}
52146f3d976STejun Heo 	worker->current_work = work;
522b56c0d89STejun Heo 	spin_unlock_irq(&worker->lock);
523b56c0d89STejun Heo 
524b56c0d89STejun Heo 	if (work) {
525b56c0d89STejun Heo 		__set_current_state(TASK_RUNNING);
526b56c0d89STejun Heo 		work->func(work);
527b56c0d89STejun Heo 	} else if (!freezing(current))
528b56c0d89STejun Heo 		schedule();
529b56c0d89STejun Heo 
530b56c0d89STejun Heo 	try_to_freeze();
531b56c0d89STejun Heo 	goto repeat;
532b56c0d89STejun Heo }
533b56c0d89STejun Heo EXPORT_SYMBOL_GPL(kthread_worker_fn);
534b56c0d89STejun Heo 
5359a2e03d8STejun Heo /* insert @work before @pos in @worker */
5369a2e03d8STejun Heo static void insert_kthread_work(struct kthread_worker *worker,
5379a2e03d8STejun Heo 			       struct kthread_work *work,
5389a2e03d8STejun Heo 			       struct list_head *pos)
5399a2e03d8STejun Heo {
5409a2e03d8STejun Heo 	lockdep_assert_held(&worker->lock);
5419a2e03d8STejun Heo 
5429a2e03d8STejun Heo 	list_add_tail(&work->node, pos);
54346f3d976STejun Heo 	work->worker = worker;
5449a2e03d8STejun Heo 	if (likely(worker->task))
5459a2e03d8STejun Heo 		wake_up_process(worker->task);
5469a2e03d8STejun Heo }
5479a2e03d8STejun Heo 
548b56c0d89STejun Heo /**
549b56c0d89STejun Heo  * queue_kthread_work - queue a kthread_work
550b56c0d89STejun Heo  * @worker: target kthread_worker
551b56c0d89STejun Heo  * @work: kthread_work to queue
552b56c0d89STejun Heo  *
553b56c0d89STejun Heo  * Queue @work to work processor @task for async execution.  @task
554b56c0d89STejun Heo  * must have been created with kthread_worker_create().  Returns %true
555b56c0d89STejun Heo  * if @work was successfully queued, %false if it was already pending.
556b56c0d89STejun Heo  */
557b56c0d89STejun Heo bool queue_kthread_work(struct kthread_worker *worker,
558b56c0d89STejun Heo 			struct kthread_work *work)
559b56c0d89STejun Heo {
560b56c0d89STejun Heo 	bool ret = false;
561b56c0d89STejun Heo 	unsigned long flags;
562b56c0d89STejun Heo 
563b56c0d89STejun Heo 	spin_lock_irqsave(&worker->lock, flags);
564b56c0d89STejun Heo 	if (list_empty(&work->node)) {
5659a2e03d8STejun Heo 		insert_kthread_work(worker, work, &worker->work_list);
566b56c0d89STejun Heo 		ret = true;
567b56c0d89STejun Heo 	}
568b56c0d89STejun Heo 	spin_unlock_irqrestore(&worker->lock, flags);
569b56c0d89STejun Heo 	return ret;
570b56c0d89STejun Heo }
571b56c0d89STejun Heo EXPORT_SYMBOL_GPL(queue_kthread_work);
572b56c0d89STejun Heo 
5739a2e03d8STejun Heo struct kthread_flush_work {
5749a2e03d8STejun Heo 	struct kthread_work	work;
5759a2e03d8STejun Heo 	struct completion	done;
5769a2e03d8STejun Heo };
5779a2e03d8STejun Heo 
5789a2e03d8STejun Heo static void kthread_flush_work_fn(struct kthread_work *work)
5799a2e03d8STejun Heo {
5809a2e03d8STejun Heo 	struct kthread_flush_work *fwork =
5819a2e03d8STejun Heo 		container_of(work, struct kthread_flush_work, work);
5829a2e03d8STejun Heo 	complete(&fwork->done);
5839a2e03d8STejun Heo }
5849a2e03d8STejun Heo 
585b56c0d89STejun Heo /**
586b56c0d89STejun Heo  * flush_kthread_work - flush a kthread_work
587b56c0d89STejun Heo  * @work: work to flush
588b56c0d89STejun Heo  *
589b56c0d89STejun Heo  * If @work is queued or executing, wait for it to finish execution.
590b56c0d89STejun Heo  */
591b56c0d89STejun Heo void flush_kthread_work(struct kthread_work *work)
592b56c0d89STejun Heo {
59346f3d976STejun Heo 	struct kthread_flush_work fwork = {
59446f3d976STejun Heo 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
59546f3d976STejun Heo 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
59646f3d976STejun Heo 	};
59746f3d976STejun Heo 	struct kthread_worker *worker;
59846f3d976STejun Heo 	bool noop = false;
599b56c0d89STejun Heo 
60046f3d976STejun Heo retry:
60146f3d976STejun Heo 	worker = work->worker;
60246f3d976STejun Heo 	if (!worker)
60346f3d976STejun Heo 		return;
604b56c0d89STejun Heo 
60546f3d976STejun Heo 	spin_lock_irq(&worker->lock);
60646f3d976STejun Heo 	if (work->worker != worker) {
60746f3d976STejun Heo 		spin_unlock_irq(&worker->lock);
60846f3d976STejun Heo 		goto retry;
60946f3d976STejun Heo 	}
610b56c0d89STejun Heo 
61146f3d976STejun Heo 	if (!list_empty(&work->node))
61246f3d976STejun Heo 		insert_kthread_work(worker, &fwork.work, work->node.next);
61346f3d976STejun Heo 	else if (worker->current_work == work)
61446f3d976STejun Heo 		insert_kthread_work(worker, &fwork.work, worker->work_list.next);
61546f3d976STejun Heo 	else
61646f3d976STejun Heo 		noop = true;
617b56c0d89STejun Heo 
61846f3d976STejun Heo 	spin_unlock_irq(&worker->lock);
61946f3d976STejun Heo 
62046f3d976STejun Heo 	if (!noop)
62146f3d976STejun Heo 		wait_for_completion(&fwork.done);
622b56c0d89STejun Heo }
623b56c0d89STejun Heo EXPORT_SYMBOL_GPL(flush_kthread_work);
624b56c0d89STejun Heo 
625b56c0d89STejun Heo /**
626b56c0d89STejun Heo  * flush_kthread_worker - flush all current works on a kthread_worker
627b56c0d89STejun Heo  * @worker: worker to flush
628b56c0d89STejun Heo  *
629b56c0d89STejun Heo  * Wait until all currently executing or pending works on @worker are
630b56c0d89STejun Heo  * finished.
631b56c0d89STejun Heo  */
632b56c0d89STejun Heo void flush_kthread_worker(struct kthread_worker *worker)
633b56c0d89STejun Heo {
634b56c0d89STejun Heo 	struct kthread_flush_work fwork = {
635b56c0d89STejun Heo 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
636b56c0d89STejun Heo 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
637b56c0d89STejun Heo 	};
638b56c0d89STejun Heo 
639b56c0d89STejun Heo 	queue_kthread_work(worker, &fwork.work);
640b56c0d89STejun Heo 	wait_for_completion(&fwork.done);
641b56c0d89STejun Heo }
642b56c0d89STejun Heo EXPORT_SYMBOL_GPL(flush_kthread_worker);
643