xref: /openbmc/linux/kernel/kthread.c (revision 3989144f)
11da177e4SLinus Torvalds /* Kernel thread helper functions.
21da177e4SLinus Torvalds  *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
31da177e4SLinus Torvalds  *
473c27992SEric W. Biederman  * Creation is done via kthreadd, so that we get a clean environment
51da177e4SLinus Torvalds  * even if we're invoked from userspace (think modprobe, hotplug cpu,
61da177e4SLinus Torvalds  * etc.).
71da177e4SLinus Torvalds  */
81da177e4SLinus Torvalds #include <linux/sched.h>
91da177e4SLinus Torvalds #include <linux/kthread.h>
101da177e4SLinus Torvalds #include <linux/completion.h>
111da177e4SLinus Torvalds #include <linux/err.h>
1258568d2aSMiao Xie #include <linux/cpuset.h>
131da177e4SLinus Torvalds #include <linux/unistd.h>
141da177e4SLinus Torvalds #include <linux/file.h>
159984de1aSPaul Gortmaker #include <linux/export.h>
1697d1f15bSArjan van de Ven #include <linux/mutex.h>
17b56c0d89STejun Heo #include <linux/slab.h>
18b56c0d89STejun Heo #include <linux/freezer.h>
19a74fb73cSAl Viro #include <linux/ptrace.h>
20cd42d559STejun Heo #include <linux/uaccess.h>
21ad8d75ffSSteven Rostedt #include <trace/events/sched.h>
221da177e4SLinus Torvalds 
2373c27992SEric W. Biederman static DEFINE_SPINLOCK(kthread_create_lock);
2473c27992SEric W. Biederman static LIST_HEAD(kthread_create_list);
2573c27992SEric W. Biederman struct task_struct *kthreadd_task;
261da177e4SLinus Torvalds 
271da177e4SLinus Torvalds struct kthread_create_info
281da177e4SLinus Torvalds {
2973c27992SEric W. Biederman 	/* Information passed to kthread() from kthreadd. */
301da177e4SLinus Torvalds 	int (*threadfn)(void *data);
311da177e4SLinus Torvalds 	void *data;
32207205a2SEric Dumazet 	int node;
331da177e4SLinus Torvalds 
3473c27992SEric W. Biederman 	/* Result passed back to kthread_create() from kthreadd. */
351da177e4SLinus Torvalds 	struct task_struct *result;
36786235eeSTetsuo Handa 	struct completion *done;
3765f27f38SDavid Howells 
3873c27992SEric W. Biederman 	struct list_head list;
391da177e4SLinus Torvalds };
401da177e4SLinus Torvalds 
4163706172SOleg Nesterov struct kthread {
422a1d4460SThomas Gleixner 	unsigned long flags;
432a1d4460SThomas Gleixner 	unsigned int cpu;
4482805ab7STejun Heo 	void *data;
452a1d4460SThomas Gleixner 	struct completion parked;
4663706172SOleg Nesterov 	struct completion exited;
471da177e4SLinus Torvalds };
481da177e4SLinus Torvalds 
492a1d4460SThomas Gleixner enum KTHREAD_BITS {
502a1d4460SThomas Gleixner 	KTHREAD_IS_PER_CPU = 0,
512a1d4460SThomas Gleixner 	KTHREAD_SHOULD_STOP,
522a1d4460SThomas Gleixner 	KTHREAD_SHOULD_PARK,
532a1d4460SThomas Gleixner 	KTHREAD_IS_PARKED,
542a1d4460SThomas Gleixner };
552a1d4460SThomas Gleixner 
564ecdafc8SOleg Nesterov #define __to_kthread(vfork)	\
574ecdafc8SOleg Nesterov 	container_of(vfork, struct kthread, exited)
584ecdafc8SOleg Nesterov 
594ecdafc8SOleg Nesterov static inline struct kthread *to_kthread(struct task_struct *k)
604ecdafc8SOleg Nesterov {
614ecdafc8SOleg Nesterov 	return __to_kthread(k->vfork_done);
624ecdafc8SOleg Nesterov }
634ecdafc8SOleg Nesterov 
644ecdafc8SOleg Nesterov static struct kthread *to_live_kthread(struct task_struct *k)
654ecdafc8SOleg Nesterov {
664ecdafc8SOleg Nesterov 	struct completion *vfork = ACCESS_ONCE(k->vfork_done);
6723196f2eSOleg Nesterov 	if (likely(vfork) && try_get_task_stack(k))
684ecdafc8SOleg Nesterov 		return __to_kthread(vfork);
694ecdafc8SOleg Nesterov 	return NULL;
704ecdafc8SOleg Nesterov }
711da177e4SLinus Torvalds 
729e37bd30SRandy Dunlap /**
739e37bd30SRandy Dunlap  * kthread_should_stop - should this kthread return now?
749e37bd30SRandy Dunlap  *
7572fd4a35SRobert P. J. Day  * When someone calls kthread_stop() on your kthread, it will be woken
769e37bd30SRandy Dunlap  * and this will return true.  You should then return, and your return
779e37bd30SRandy Dunlap  * value will be passed through to kthread_stop().
789e37bd30SRandy Dunlap  */
792a1d4460SThomas Gleixner bool kthread_should_stop(void)
801da177e4SLinus Torvalds {
812a1d4460SThomas Gleixner 	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
821da177e4SLinus Torvalds }
831da177e4SLinus Torvalds EXPORT_SYMBOL(kthread_should_stop);
841da177e4SLinus Torvalds 
8582805ab7STejun Heo /**
862a1d4460SThomas Gleixner  * kthread_should_park - should this kthread park now?
872a1d4460SThomas Gleixner  *
882a1d4460SThomas Gleixner  * When someone calls kthread_park() on your kthread, it will be woken
892a1d4460SThomas Gleixner  * and this will return true.  You should then do the necessary
902a1d4460SThomas Gleixner  * cleanup and call kthread_parkme()
912a1d4460SThomas Gleixner  *
922a1d4460SThomas Gleixner  * Similar to kthread_should_stop(), but this keeps the thread alive
932a1d4460SThomas Gleixner  * and in a park position. kthread_unpark() "restarts" the thread and
942a1d4460SThomas Gleixner  * calls the thread function again.
952a1d4460SThomas Gleixner  */
962a1d4460SThomas Gleixner bool kthread_should_park(void)
972a1d4460SThomas Gleixner {
982a1d4460SThomas Gleixner 	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
992a1d4460SThomas Gleixner }
10018896451SDavid Kershner EXPORT_SYMBOL_GPL(kthread_should_park);
1012a1d4460SThomas Gleixner 
1022a1d4460SThomas Gleixner /**
1038a32c441STejun Heo  * kthread_freezable_should_stop - should this freezable kthread return now?
1048a32c441STejun Heo  * @was_frozen: optional out parameter, indicates whether %current was frozen
1058a32c441STejun Heo  *
1068a32c441STejun Heo  * kthread_should_stop() for freezable kthreads, which will enter
1078a32c441STejun Heo  * refrigerator if necessary.  This function is safe from kthread_stop() /
1088a32c441STejun Heo  * freezer deadlock and freezable kthreads should use this function instead
1098a32c441STejun Heo  * of calling try_to_freeze() directly.
1108a32c441STejun Heo  */
1118a32c441STejun Heo bool kthread_freezable_should_stop(bool *was_frozen)
1128a32c441STejun Heo {
1138a32c441STejun Heo 	bool frozen = false;
1148a32c441STejun Heo 
1158a32c441STejun Heo 	might_sleep();
1168a32c441STejun Heo 
1178a32c441STejun Heo 	if (unlikely(freezing(current)))
1188a32c441STejun Heo 		frozen = __refrigerator(true);
1198a32c441STejun Heo 
1208a32c441STejun Heo 	if (was_frozen)
1218a32c441STejun Heo 		*was_frozen = frozen;
1228a32c441STejun Heo 
1238a32c441STejun Heo 	return kthread_should_stop();
1248a32c441STejun Heo }
1258a32c441STejun Heo EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
1268a32c441STejun Heo 
1278a32c441STejun Heo /**
12882805ab7STejun Heo  * kthread_data - return data value specified on kthread creation
12982805ab7STejun Heo  * @task: kthread task in question
13082805ab7STejun Heo  *
13182805ab7STejun Heo  * Return the data value specified when kthread @task was created.
13282805ab7STejun Heo  * The caller is responsible for ensuring the validity of @task when
13382805ab7STejun Heo  * calling this function.
13482805ab7STejun Heo  */
13582805ab7STejun Heo void *kthread_data(struct task_struct *task)
13682805ab7STejun Heo {
13782805ab7STejun Heo 	return to_kthread(task)->data;
13882805ab7STejun Heo }
13982805ab7STejun Heo 
140cd42d559STejun Heo /**
141e700591aSPetr Mladek  * kthread_probe_data - speculative version of kthread_data()
142cd42d559STejun Heo  * @task: possible kthread task in question
143cd42d559STejun Heo  *
144cd42d559STejun Heo  * @task could be a kthread task.  Return the data value specified when it
145cd42d559STejun Heo  * was created if accessible.  If @task isn't a kthread task or its data is
146cd42d559STejun Heo  * inaccessible for any reason, %NULL is returned.  This function requires
147cd42d559STejun Heo  * that @task itself is safe to dereference.
148cd42d559STejun Heo  */
149e700591aSPetr Mladek void *kthread_probe_data(struct task_struct *task)
150cd42d559STejun Heo {
151cd42d559STejun Heo 	struct kthread *kthread = to_kthread(task);
152cd42d559STejun Heo 	void *data = NULL;
153cd42d559STejun Heo 
154cd42d559STejun Heo 	probe_kernel_read(&data, &kthread->data, sizeof(data));
155cd42d559STejun Heo 	return data;
156cd42d559STejun Heo }
157cd42d559STejun Heo 
1582a1d4460SThomas Gleixner static void __kthread_parkme(struct kthread *self)
1592a1d4460SThomas Gleixner {
160f2530dc7SThomas Gleixner 	__set_current_state(TASK_PARKED);
1612a1d4460SThomas Gleixner 	while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
1622a1d4460SThomas Gleixner 		if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
1632a1d4460SThomas Gleixner 			complete(&self->parked);
1642a1d4460SThomas Gleixner 		schedule();
165f2530dc7SThomas Gleixner 		__set_current_state(TASK_PARKED);
1662a1d4460SThomas Gleixner 	}
1672a1d4460SThomas Gleixner 	clear_bit(KTHREAD_IS_PARKED, &self->flags);
1682a1d4460SThomas Gleixner 	__set_current_state(TASK_RUNNING);
1692a1d4460SThomas Gleixner }
1702a1d4460SThomas Gleixner 
1712a1d4460SThomas Gleixner void kthread_parkme(void)
1722a1d4460SThomas Gleixner {
1732a1d4460SThomas Gleixner 	__kthread_parkme(to_kthread(current));
1742a1d4460SThomas Gleixner }
17518896451SDavid Kershner EXPORT_SYMBOL_GPL(kthread_parkme);
1762a1d4460SThomas Gleixner 
1771da177e4SLinus Torvalds static int kthread(void *_create)
1781da177e4SLinus Torvalds {
17973c27992SEric W. Biederman 	/* Copy data: it's on kthread's stack */
18063706172SOleg Nesterov 	struct kthread_create_info *create = _create;
18163706172SOleg Nesterov 	int (*threadfn)(void *data) = create->threadfn;
18263706172SOleg Nesterov 	void *data = create->data;
183786235eeSTetsuo Handa 	struct completion *done;
18463706172SOleg Nesterov 	struct kthread self;
18563706172SOleg Nesterov 	int ret;
18663706172SOleg Nesterov 
1872a1d4460SThomas Gleixner 	self.flags = 0;
18882805ab7STejun Heo 	self.data = data;
18963706172SOleg Nesterov 	init_completion(&self.exited);
1902a1d4460SThomas Gleixner 	init_completion(&self.parked);
19163706172SOleg Nesterov 	current->vfork_done = &self.exited;
1921da177e4SLinus Torvalds 
193786235eeSTetsuo Handa 	/* If user was SIGKILLed, I release the structure. */
194786235eeSTetsuo Handa 	done = xchg(&create->done, NULL);
195786235eeSTetsuo Handa 	if (!done) {
196786235eeSTetsuo Handa 		kfree(create);
197786235eeSTetsuo Handa 		do_exit(-EINTR);
198786235eeSTetsuo Handa 	}
1991da177e4SLinus Torvalds 	/* OK, tell user we're spawned, wait for stop or wakeup */
200a076e4bcSOleg Nesterov 	__set_current_state(TASK_UNINTERRUPTIBLE);
2013217ab97SVitaliy Gusev 	create->result = current;
202786235eeSTetsuo Handa 	complete(done);
2031da177e4SLinus Torvalds 	schedule();
2041da177e4SLinus Torvalds 
20563706172SOleg Nesterov 	ret = -EINTR;
2061da177e4SLinus Torvalds 
2072a1d4460SThomas Gleixner 	if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
2082a1d4460SThomas Gleixner 		__kthread_parkme(&self);
2092a1d4460SThomas Gleixner 		ret = threadfn(data);
2102a1d4460SThomas Gleixner 	}
21163706172SOleg Nesterov 	/* we can't just return, we must preserve "self" on stack */
21263706172SOleg Nesterov 	do_exit(ret);
2131da177e4SLinus Torvalds }
2141da177e4SLinus Torvalds 
215207205a2SEric Dumazet /* called from do_fork() to get node information for about to be created task */
216207205a2SEric Dumazet int tsk_fork_get_node(struct task_struct *tsk)
217207205a2SEric Dumazet {
218207205a2SEric Dumazet #ifdef CONFIG_NUMA
219207205a2SEric Dumazet 	if (tsk == kthreadd_task)
220207205a2SEric Dumazet 		return tsk->pref_node_fork;
221207205a2SEric Dumazet #endif
22281c98869SNishanth Aravamudan 	return NUMA_NO_NODE;
223207205a2SEric Dumazet }
224207205a2SEric Dumazet 
22573c27992SEric W. Biederman static void create_kthread(struct kthread_create_info *create)
2261da177e4SLinus Torvalds {
2271da177e4SLinus Torvalds 	int pid;
2281da177e4SLinus Torvalds 
229207205a2SEric Dumazet #ifdef CONFIG_NUMA
230207205a2SEric Dumazet 	current->pref_node_fork = create->node;
231207205a2SEric Dumazet #endif
2321da177e4SLinus Torvalds 	/* We want our own signal handler (we take no signals by default). */
2331da177e4SLinus Torvalds 	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
234cdd140bdSOleg Nesterov 	if (pid < 0) {
235786235eeSTetsuo Handa 		/* If user was SIGKILLed, I release the structure. */
236786235eeSTetsuo Handa 		struct completion *done = xchg(&create->done, NULL);
237786235eeSTetsuo Handa 
238786235eeSTetsuo Handa 		if (!done) {
239786235eeSTetsuo Handa 			kfree(create);
240786235eeSTetsuo Handa 			return;
241786235eeSTetsuo Handa 		}
2421da177e4SLinus Torvalds 		create->result = ERR_PTR(pid);
243786235eeSTetsuo Handa 		complete(done);
2441da177e4SLinus Torvalds 	}
245cdd140bdSOleg Nesterov }
2461da177e4SLinus Torvalds 
2479e37bd30SRandy Dunlap /**
248207205a2SEric Dumazet  * kthread_create_on_node - create a kthread.
2499e37bd30SRandy Dunlap  * @threadfn: the function to run until signal_pending(current).
2509e37bd30SRandy Dunlap  * @data: data ptr for @threadfn.
251e9f06986SAndrew Morton  * @node: task and thread structures for the thread are allocated on this node
2529e37bd30SRandy Dunlap  * @namefmt: printf-style name for the thread.
2539e37bd30SRandy Dunlap  *
2549e37bd30SRandy Dunlap  * Description: This helper function creates and names a kernel
2559e37bd30SRandy Dunlap  * thread.  The thread will be stopped: use wake_up_process() to start
256e9f06986SAndrew Morton  * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
257e9f06986SAndrew Morton  * is affine to all CPUs.
2589e37bd30SRandy Dunlap  *
259207205a2SEric Dumazet  * If thread is going to be bound on a particular cpu, give its node
260e9f06986SAndrew Morton  * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
2619e37bd30SRandy Dunlap  * When woken, the thread will run @threadfn() with @data as its
26272fd4a35SRobert P. J. Day  * argument. @threadfn() can either call do_exit() directly if it is a
2639e37bd30SRandy Dunlap  * standalone thread for which no one will call kthread_stop(), or
2649e37bd30SRandy Dunlap  * return when 'kthread_should_stop()' is true (which means
2659e37bd30SRandy Dunlap  * kthread_stop() has been called).  The return value should be zero
2669e37bd30SRandy Dunlap  * or a negative error number; it will be passed to kthread_stop().
2679e37bd30SRandy Dunlap  *
2688fe6929cSTetsuo Handa  * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
2699e37bd30SRandy Dunlap  */
270207205a2SEric Dumazet struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
2712a1d4460SThomas Gleixner 					   void *data, int node,
2721da177e4SLinus Torvalds 					   const char namefmt[],
2731da177e4SLinus Torvalds 					   ...)
2741da177e4SLinus Torvalds {
275786235eeSTetsuo Handa 	DECLARE_COMPLETION_ONSTACK(done);
276786235eeSTetsuo Handa 	struct task_struct *task;
277786235eeSTetsuo Handa 	struct kthread_create_info *create = kmalloc(sizeof(*create),
278786235eeSTetsuo Handa 						     GFP_KERNEL);
2791da177e4SLinus Torvalds 
280786235eeSTetsuo Handa 	if (!create)
281786235eeSTetsuo Handa 		return ERR_PTR(-ENOMEM);
282786235eeSTetsuo Handa 	create->threadfn = threadfn;
283786235eeSTetsuo Handa 	create->data = data;
284786235eeSTetsuo Handa 	create->node = node;
285786235eeSTetsuo Handa 	create->done = &done;
2861da177e4SLinus Torvalds 
28773c27992SEric W. Biederman 	spin_lock(&kthread_create_lock);
288786235eeSTetsuo Handa 	list_add_tail(&create->list, &kthread_create_list);
28973c27992SEric W. Biederman 	spin_unlock(&kthread_create_lock);
29073c27992SEric W. Biederman 
291cbd9b67bSDmitry Adamushko 	wake_up_process(kthreadd_task);
292786235eeSTetsuo Handa 	/*
293786235eeSTetsuo Handa 	 * Wait for completion in killable state, for I might be chosen by
294786235eeSTetsuo Handa 	 * the OOM killer while kthreadd is trying to allocate memory for
295786235eeSTetsuo Handa 	 * new kernel thread.
296786235eeSTetsuo Handa 	 */
297786235eeSTetsuo Handa 	if (unlikely(wait_for_completion_killable(&done))) {
298786235eeSTetsuo Handa 		/*
299786235eeSTetsuo Handa 		 * If I was SIGKILLed before kthreadd (or new kernel thread)
300786235eeSTetsuo Handa 		 * calls complete(), leave the cleanup of this structure to
301786235eeSTetsuo Handa 		 * that thread.
302786235eeSTetsuo Handa 		 */
303786235eeSTetsuo Handa 		if (xchg(&create->done, NULL))
3048fe6929cSTetsuo Handa 			return ERR_PTR(-EINTR);
305786235eeSTetsuo Handa 		/*
306786235eeSTetsuo Handa 		 * kthreadd (or new kernel thread) will call complete()
307786235eeSTetsuo Handa 		 * shortly.
308786235eeSTetsuo Handa 		 */
309786235eeSTetsuo Handa 		wait_for_completion(&done);
310786235eeSTetsuo Handa 	}
311786235eeSTetsuo Handa 	task = create->result;
312786235eeSTetsuo Handa 	if (!IS_ERR(task)) {
313c9b5f501SPeter Zijlstra 		static const struct sched_param param = { .sched_priority = 0 };
3141da177e4SLinus Torvalds 		va_list args;
3151c99315bSOleg Nesterov 
3161da177e4SLinus Torvalds 		va_start(args, namefmt);
317786235eeSTetsuo Handa 		vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
3181da177e4SLinus Torvalds 		va_end(args);
3191c99315bSOleg Nesterov 		/*
3201c99315bSOleg Nesterov 		 * root may have changed our (kthreadd's) priority or CPU mask.
3211c99315bSOleg Nesterov 		 * The kernel thread should not inherit these properties.
3221c99315bSOleg Nesterov 		 */
323786235eeSTetsuo Handa 		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
324786235eeSTetsuo Handa 		set_cpus_allowed_ptr(task, cpu_all_mask);
3251da177e4SLinus Torvalds 	}
326786235eeSTetsuo Handa 	kfree(create);
327786235eeSTetsuo Handa 	return task;
3281da177e4SLinus Torvalds }
329207205a2SEric Dumazet EXPORT_SYMBOL(kthread_create_on_node);
3301da177e4SLinus Torvalds 
33125834c73SPeter Zijlstra static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
3322a1d4460SThomas Gleixner {
33325834c73SPeter Zijlstra 	unsigned long flags;
33425834c73SPeter Zijlstra 
335f2530dc7SThomas Gleixner 	if (!wait_task_inactive(p, state)) {
336f2530dc7SThomas Gleixner 		WARN_ON(1);
337f2530dc7SThomas Gleixner 		return;
338f2530dc7SThomas Gleixner 	}
33925834c73SPeter Zijlstra 
3402a1d4460SThomas Gleixner 	/* It's safe because the task is inactive. */
34125834c73SPeter Zijlstra 	raw_spin_lock_irqsave(&p->pi_lock, flags);
34225834c73SPeter Zijlstra 	do_set_cpus_allowed(p, mask);
34314a40ffcSTejun Heo 	p->flags |= PF_NO_SETAFFINITY;
34425834c73SPeter Zijlstra 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
34525834c73SPeter Zijlstra }
34625834c73SPeter Zijlstra 
34725834c73SPeter Zijlstra static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
34825834c73SPeter Zijlstra {
34925834c73SPeter Zijlstra 	__kthread_bind_mask(p, cpumask_of(cpu), state);
35025834c73SPeter Zijlstra }
35125834c73SPeter Zijlstra 
35225834c73SPeter Zijlstra void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
35325834c73SPeter Zijlstra {
35425834c73SPeter Zijlstra 	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
3552a1d4460SThomas Gleixner }
3562a1d4460SThomas Gleixner 
3579e37bd30SRandy Dunlap /**
358881232b7SPeter Zijlstra  * kthread_bind - bind a just-created kthread to a cpu.
359881232b7SPeter Zijlstra  * @p: thread created by kthread_create().
360881232b7SPeter Zijlstra  * @cpu: cpu (might not be online, must be possible) for @k to run on.
361881232b7SPeter Zijlstra  *
362881232b7SPeter Zijlstra  * Description: This function is equivalent to set_cpus_allowed(),
363881232b7SPeter Zijlstra  * except that @cpu doesn't need to be online, and the thread must be
364881232b7SPeter Zijlstra  * stopped (i.e., just returned from kthread_create()).
365881232b7SPeter Zijlstra  */
366881232b7SPeter Zijlstra void kthread_bind(struct task_struct *p, unsigned int cpu)
367881232b7SPeter Zijlstra {
368f2530dc7SThomas Gleixner 	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
369881232b7SPeter Zijlstra }
370881232b7SPeter Zijlstra EXPORT_SYMBOL(kthread_bind);
371881232b7SPeter Zijlstra 
372881232b7SPeter Zijlstra /**
3732a1d4460SThomas Gleixner  * kthread_create_on_cpu - Create a cpu bound kthread
3742a1d4460SThomas Gleixner  * @threadfn: the function to run until signal_pending(current).
3752a1d4460SThomas Gleixner  * @data: data ptr for @threadfn.
3762a1d4460SThomas Gleixner  * @cpu: The cpu on which the thread should be bound,
3772a1d4460SThomas Gleixner  * @namefmt: printf-style name for the thread. Format is restricted
3782a1d4460SThomas Gleixner  *	     to "name.*%u". Code fills in cpu number.
3792a1d4460SThomas Gleixner  *
3802a1d4460SThomas Gleixner  * Description: This helper function creates and names a kernel thread
3812a1d4460SThomas Gleixner  * The thread will be woken and put into park mode.
3822a1d4460SThomas Gleixner  */
3832a1d4460SThomas Gleixner struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
3842a1d4460SThomas Gleixner 					  void *data, unsigned int cpu,
3852a1d4460SThomas Gleixner 					  const char *namefmt)
3862a1d4460SThomas Gleixner {
3872a1d4460SThomas Gleixner 	struct task_struct *p;
3882a1d4460SThomas Gleixner 
38910922838SNishanth Aravamudan 	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
3902a1d4460SThomas Gleixner 				   cpu);
3912a1d4460SThomas Gleixner 	if (IS_ERR(p))
3922a1d4460SThomas Gleixner 		return p;
3932a1d4460SThomas Gleixner 	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
3942a1d4460SThomas Gleixner 	to_kthread(p)->cpu = cpu;
3952a1d4460SThomas Gleixner 	/* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
3962a1d4460SThomas Gleixner 	kthread_park(p);
3972a1d4460SThomas Gleixner 	return p;
3982a1d4460SThomas Gleixner }
3992a1d4460SThomas Gleixner 
400f2530dc7SThomas Gleixner static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
401f2530dc7SThomas Gleixner {
402f2530dc7SThomas Gleixner 	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
403f2530dc7SThomas Gleixner 	/*
404f2530dc7SThomas Gleixner 	 * We clear the IS_PARKED bit here as we don't wait
405f2530dc7SThomas Gleixner 	 * until the task has left the park code. So if we'd
406f2530dc7SThomas Gleixner 	 * park before that happens we'd see the IS_PARKED bit
407f2530dc7SThomas Gleixner 	 * which might be about to be cleared.
408f2530dc7SThomas Gleixner 	 */
409f2530dc7SThomas Gleixner 	if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
410f2530dc7SThomas Gleixner 		if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
411f2530dc7SThomas Gleixner 			__kthread_bind(k, kthread->cpu, TASK_PARKED);
412f2530dc7SThomas Gleixner 		wake_up_state(k, TASK_PARKED);
413f2530dc7SThomas Gleixner 	}
414f2530dc7SThomas Gleixner }
415f2530dc7SThomas Gleixner 
4162a1d4460SThomas Gleixner /**
4172a1d4460SThomas Gleixner  * kthread_unpark - unpark a thread created by kthread_create().
4182a1d4460SThomas Gleixner  * @k:		thread created by kthread_create().
4192a1d4460SThomas Gleixner  *
4202a1d4460SThomas Gleixner  * Sets kthread_should_park() for @k to return false, wakes it, and
4212a1d4460SThomas Gleixner  * waits for it to return. If the thread is marked percpu then its
4222a1d4460SThomas Gleixner  * bound to the cpu again.
4232a1d4460SThomas Gleixner  */
4242a1d4460SThomas Gleixner void kthread_unpark(struct task_struct *k)
4252a1d4460SThomas Gleixner {
426b5c5442bSOleg Nesterov 	struct kthread *kthread = to_live_kthread(k);
4272a1d4460SThomas Gleixner 
42823196f2eSOleg Nesterov 	if (kthread) {
429f2530dc7SThomas Gleixner 		__kthread_unpark(k, kthread);
43023196f2eSOleg Nesterov 		put_task_stack(k);
43123196f2eSOleg Nesterov 	}
4322a1d4460SThomas Gleixner }
43318896451SDavid Kershner EXPORT_SYMBOL_GPL(kthread_unpark);
4342a1d4460SThomas Gleixner 
4352a1d4460SThomas Gleixner /**
4362a1d4460SThomas Gleixner  * kthread_park - park a thread created by kthread_create().
4372a1d4460SThomas Gleixner  * @k: thread created by kthread_create().
4382a1d4460SThomas Gleixner  *
4392a1d4460SThomas Gleixner  * Sets kthread_should_park() for @k to return true, wakes it, and
4402a1d4460SThomas Gleixner  * waits for it to return. This can also be called after kthread_create()
4412a1d4460SThomas Gleixner  * instead of calling wake_up_process(): the thread will park without
4422a1d4460SThomas Gleixner  * calling threadfn().
4432a1d4460SThomas Gleixner  *
4442a1d4460SThomas Gleixner  * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
4452a1d4460SThomas Gleixner  * If called by the kthread itself just the park bit is set.
4462a1d4460SThomas Gleixner  */
4472a1d4460SThomas Gleixner int kthread_park(struct task_struct *k)
4482a1d4460SThomas Gleixner {
449b5c5442bSOleg Nesterov 	struct kthread *kthread = to_live_kthread(k);
4502a1d4460SThomas Gleixner 	int ret = -ENOSYS;
4512a1d4460SThomas Gleixner 
4522a1d4460SThomas Gleixner 	if (kthread) {
4532a1d4460SThomas Gleixner 		if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
4542a1d4460SThomas Gleixner 			set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
4552a1d4460SThomas Gleixner 			if (k != current) {
4562a1d4460SThomas Gleixner 				wake_up_process(k);
4572a1d4460SThomas Gleixner 				wait_for_completion(&kthread->parked);
4582a1d4460SThomas Gleixner 			}
4592a1d4460SThomas Gleixner 		}
46023196f2eSOleg Nesterov 		put_task_stack(k);
4612a1d4460SThomas Gleixner 		ret = 0;
4622a1d4460SThomas Gleixner 	}
4632a1d4460SThomas Gleixner 	return ret;
4642a1d4460SThomas Gleixner }
46518896451SDavid Kershner EXPORT_SYMBOL_GPL(kthread_park);
4662a1d4460SThomas Gleixner 
4672a1d4460SThomas Gleixner /**
4689e37bd30SRandy Dunlap  * kthread_stop - stop a thread created by kthread_create().
4699e37bd30SRandy Dunlap  * @k: thread created by kthread_create().
4709e37bd30SRandy Dunlap  *
4719e37bd30SRandy Dunlap  * Sets kthread_should_stop() for @k to return true, wakes it, and
4729ae26027SOleg Nesterov  * waits for it to exit. This can also be called after kthread_create()
4739ae26027SOleg Nesterov  * instead of calling wake_up_process(): the thread will exit without
4749ae26027SOleg Nesterov  * calling threadfn().
4759ae26027SOleg Nesterov  *
4769ae26027SOleg Nesterov  * If threadfn() may call do_exit() itself, the caller must ensure
4779ae26027SOleg Nesterov  * task_struct can't go away.
4789e37bd30SRandy Dunlap  *
4799e37bd30SRandy Dunlap  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
4809e37bd30SRandy Dunlap  * was never called.
4819e37bd30SRandy Dunlap  */
4821da177e4SLinus Torvalds int kthread_stop(struct task_struct *k)
4831da177e4SLinus Torvalds {
484b5c5442bSOleg Nesterov 	struct kthread *kthread;
4851da177e4SLinus Torvalds 	int ret;
4861da177e4SLinus Torvalds 
48763706172SOleg Nesterov 	trace_sched_kthread_stop(k);
488b5c5442bSOleg Nesterov 
489b5c5442bSOleg Nesterov 	get_task_struct(k);
490b5c5442bSOleg Nesterov 	kthread = to_live_kthread(k);
4912a1d4460SThomas Gleixner 	if (kthread) {
4922a1d4460SThomas Gleixner 		set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
493f2530dc7SThomas Gleixner 		__kthread_unpark(k, kthread);
4941da177e4SLinus Torvalds 		wake_up_process(k);
49563706172SOleg Nesterov 		wait_for_completion(&kthread->exited);
49623196f2eSOleg Nesterov 		put_task_stack(k);
49763706172SOleg Nesterov 	}
49863706172SOleg Nesterov 	ret = k->exit_code;
4991da177e4SLinus Torvalds 	put_task_struct(k);
5000a16b607SMathieu Desnoyers 
501b5c5442bSOleg Nesterov 	trace_sched_kthread_stop_ret(ret);
5021da177e4SLinus Torvalds 	return ret;
5031da177e4SLinus Torvalds }
50452e92e57SAdrian Bunk EXPORT_SYMBOL(kthread_stop);
5051da177e4SLinus Torvalds 
506e804a4a4SSatyam Sharma int kthreadd(void *unused)
5071da177e4SLinus Torvalds {
50873c27992SEric W. Biederman 	struct task_struct *tsk = current;
50973c27992SEric W. Biederman 
510e804a4a4SSatyam Sharma 	/* Setup a clean context for our children to inherit. */
51173c27992SEric W. Biederman 	set_task_comm(tsk, "kthreadd");
51210ab825bSOleg Nesterov 	ignore_signals(tsk);
5131a2142afSRusty Russell 	set_cpus_allowed_ptr(tsk, cpu_all_mask);
514aee4faa4SLai Jiangshan 	set_mems_allowed(node_states[N_MEMORY]);
51573c27992SEric W. Biederman 
51634b087e4STejun Heo 	current->flags |= PF_NOFREEZE;
51773c27992SEric W. Biederman 
51873c27992SEric W. Biederman 	for (;;) {
51973c27992SEric W. Biederman 		set_current_state(TASK_INTERRUPTIBLE);
52073c27992SEric W. Biederman 		if (list_empty(&kthread_create_list))
52173c27992SEric W. Biederman 			schedule();
52273c27992SEric W. Biederman 		__set_current_state(TASK_RUNNING);
52373c27992SEric W. Biederman 
52473c27992SEric W. Biederman 		spin_lock(&kthread_create_lock);
52573c27992SEric W. Biederman 		while (!list_empty(&kthread_create_list)) {
52673c27992SEric W. Biederman 			struct kthread_create_info *create;
52773c27992SEric W. Biederman 
52873c27992SEric W. Biederman 			create = list_entry(kthread_create_list.next,
52973c27992SEric W. Biederman 					    struct kthread_create_info, list);
53073c27992SEric W. Biederman 			list_del_init(&create->list);
53173c27992SEric W. Biederman 			spin_unlock(&kthread_create_lock);
53273c27992SEric W. Biederman 
53373c27992SEric W. Biederman 			create_kthread(create);
53473c27992SEric W. Biederman 
53573c27992SEric W. Biederman 			spin_lock(&kthread_create_lock);
53673c27992SEric W. Biederman 		}
53773c27992SEric W. Biederman 		spin_unlock(&kthread_create_lock);
53873c27992SEric W. Biederman 	}
5391da177e4SLinus Torvalds 
5401da177e4SLinus Torvalds 	return 0;
5411da177e4SLinus Torvalds }
542b56c0d89STejun Heo 
5433989144fSPetr Mladek void __kthread_init_worker(struct kthread_worker *worker,
5444f32e9b1SYong Zhang 				const char *name,
5454f32e9b1SYong Zhang 				struct lock_class_key *key)
5464f32e9b1SYong Zhang {
5474f32e9b1SYong Zhang 	spin_lock_init(&worker->lock);
5484f32e9b1SYong Zhang 	lockdep_set_class_and_name(&worker->lock, key, name);
5494f32e9b1SYong Zhang 	INIT_LIST_HEAD(&worker->work_list);
5504f32e9b1SYong Zhang 	worker->task = NULL;
5514f32e9b1SYong Zhang }
5523989144fSPetr Mladek EXPORT_SYMBOL_GPL(__kthread_init_worker);
5534f32e9b1SYong Zhang 
554b56c0d89STejun Heo /**
555b56c0d89STejun Heo  * kthread_worker_fn - kthread function to process kthread_worker
556b56c0d89STejun Heo  * @worker_ptr: pointer to initialized kthread_worker
557b56c0d89STejun Heo  *
558b56c0d89STejun Heo  * This function can be used as @threadfn to kthread_create() or
559b56c0d89STejun Heo  * kthread_run() with @worker_ptr argument pointing to an initialized
560b56c0d89STejun Heo  * kthread_worker.  The started kthread will process work_list until
561b56c0d89STejun Heo  * the it is stopped with kthread_stop().  A kthread can also call
562b56c0d89STejun Heo  * this function directly after extra initialization.
563b56c0d89STejun Heo  *
564b56c0d89STejun Heo  * Different kthreads can be used for the same kthread_worker as long
565b56c0d89STejun Heo  * as there's only one kthread attached to it at any given time.  A
566b56c0d89STejun Heo  * kthread_worker without an attached kthread simply collects queued
567b56c0d89STejun Heo  * kthread_works.
568b56c0d89STejun Heo  */
569b56c0d89STejun Heo int kthread_worker_fn(void *worker_ptr)
570b56c0d89STejun Heo {
571b56c0d89STejun Heo 	struct kthread_worker *worker = worker_ptr;
572b56c0d89STejun Heo 	struct kthread_work *work;
573b56c0d89STejun Heo 
574b56c0d89STejun Heo 	WARN_ON(worker->task);
575b56c0d89STejun Heo 	worker->task = current;
576b56c0d89STejun Heo repeat:
577b56c0d89STejun Heo 	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
578b56c0d89STejun Heo 
579b56c0d89STejun Heo 	if (kthread_should_stop()) {
580b56c0d89STejun Heo 		__set_current_state(TASK_RUNNING);
581b56c0d89STejun Heo 		spin_lock_irq(&worker->lock);
582b56c0d89STejun Heo 		worker->task = NULL;
583b56c0d89STejun Heo 		spin_unlock_irq(&worker->lock);
584b56c0d89STejun Heo 		return 0;
585b56c0d89STejun Heo 	}
586b56c0d89STejun Heo 
587b56c0d89STejun Heo 	work = NULL;
588b56c0d89STejun Heo 	spin_lock_irq(&worker->lock);
589b56c0d89STejun Heo 	if (!list_empty(&worker->work_list)) {
590b56c0d89STejun Heo 		work = list_first_entry(&worker->work_list,
591b56c0d89STejun Heo 					struct kthread_work, node);
592b56c0d89STejun Heo 		list_del_init(&work->node);
593b56c0d89STejun Heo 	}
59446f3d976STejun Heo 	worker->current_work = work;
595b56c0d89STejun Heo 	spin_unlock_irq(&worker->lock);
596b56c0d89STejun Heo 
597b56c0d89STejun Heo 	if (work) {
598b56c0d89STejun Heo 		__set_current_state(TASK_RUNNING);
599b56c0d89STejun Heo 		work->func(work);
600b56c0d89STejun Heo 	} else if (!freezing(current))
601b56c0d89STejun Heo 		schedule();
602b56c0d89STejun Heo 
603b56c0d89STejun Heo 	try_to_freeze();
604b56c0d89STejun Heo 	goto repeat;
605b56c0d89STejun Heo }
606b56c0d89STejun Heo EXPORT_SYMBOL_GPL(kthread_worker_fn);
607b56c0d89STejun Heo 
6089a2e03d8STejun Heo /* insert @work before @pos in @worker */
6093989144fSPetr Mladek static void kthread_insert_work(struct kthread_worker *worker,
6109a2e03d8STejun Heo 			       struct kthread_work *work,
6119a2e03d8STejun Heo 			       struct list_head *pos)
6129a2e03d8STejun Heo {
6139a2e03d8STejun Heo 	lockdep_assert_held(&worker->lock);
6149a2e03d8STejun Heo 
6159a2e03d8STejun Heo 	list_add_tail(&work->node, pos);
61646f3d976STejun Heo 	work->worker = worker;
617ed1403ecSLai Jiangshan 	if (!worker->current_work && likely(worker->task))
6189a2e03d8STejun Heo 		wake_up_process(worker->task);
6199a2e03d8STejun Heo }
6209a2e03d8STejun Heo 
621b56c0d89STejun Heo /**
6223989144fSPetr Mladek  * kthread_queue_work - queue a kthread_work
623b56c0d89STejun Heo  * @worker: target kthread_worker
624b56c0d89STejun Heo  * @work: kthread_work to queue
625b56c0d89STejun Heo  *
626b56c0d89STejun Heo  * Queue @work to work processor @task for async execution.  @task
627b56c0d89STejun Heo  * must have been created with kthread_worker_create().  Returns %true
628b56c0d89STejun Heo  * if @work was successfully queued, %false if it was already pending.
629b56c0d89STejun Heo  */
6303989144fSPetr Mladek bool kthread_queue_work(struct kthread_worker *worker,
631b56c0d89STejun Heo 			struct kthread_work *work)
632b56c0d89STejun Heo {
633b56c0d89STejun Heo 	bool ret = false;
634b56c0d89STejun Heo 	unsigned long flags;
635b56c0d89STejun Heo 
636b56c0d89STejun Heo 	spin_lock_irqsave(&worker->lock, flags);
637b56c0d89STejun Heo 	if (list_empty(&work->node)) {
6383989144fSPetr Mladek 		kthread_insert_work(worker, work, &worker->work_list);
639b56c0d89STejun Heo 		ret = true;
640b56c0d89STejun Heo 	}
641b56c0d89STejun Heo 	spin_unlock_irqrestore(&worker->lock, flags);
642b56c0d89STejun Heo 	return ret;
643b56c0d89STejun Heo }
6443989144fSPetr Mladek EXPORT_SYMBOL_GPL(kthread_queue_work);
645b56c0d89STejun Heo 
6469a2e03d8STejun Heo struct kthread_flush_work {
6479a2e03d8STejun Heo 	struct kthread_work	work;
6489a2e03d8STejun Heo 	struct completion	done;
6499a2e03d8STejun Heo };
6509a2e03d8STejun Heo 
6519a2e03d8STejun Heo static void kthread_flush_work_fn(struct kthread_work *work)
6529a2e03d8STejun Heo {
6539a2e03d8STejun Heo 	struct kthread_flush_work *fwork =
6549a2e03d8STejun Heo 		container_of(work, struct kthread_flush_work, work);
6559a2e03d8STejun Heo 	complete(&fwork->done);
6569a2e03d8STejun Heo }
6579a2e03d8STejun Heo 
658b56c0d89STejun Heo /**
6593989144fSPetr Mladek  * kthread_flush_work - flush a kthread_work
660b56c0d89STejun Heo  * @work: work to flush
661b56c0d89STejun Heo  *
662b56c0d89STejun Heo  * If @work is queued or executing, wait for it to finish execution.
663b56c0d89STejun Heo  */
6643989144fSPetr Mladek void kthread_flush_work(struct kthread_work *work)
665b56c0d89STejun Heo {
66646f3d976STejun Heo 	struct kthread_flush_work fwork = {
66746f3d976STejun Heo 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
66846f3d976STejun Heo 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
66946f3d976STejun Heo 	};
67046f3d976STejun Heo 	struct kthread_worker *worker;
67146f3d976STejun Heo 	bool noop = false;
672b56c0d89STejun Heo 
67346f3d976STejun Heo retry:
67446f3d976STejun Heo 	worker = work->worker;
67546f3d976STejun Heo 	if (!worker)
67646f3d976STejun Heo 		return;
677b56c0d89STejun Heo 
67846f3d976STejun Heo 	spin_lock_irq(&worker->lock);
67946f3d976STejun Heo 	if (work->worker != worker) {
68046f3d976STejun Heo 		spin_unlock_irq(&worker->lock);
68146f3d976STejun Heo 		goto retry;
68246f3d976STejun Heo 	}
683b56c0d89STejun Heo 
68446f3d976STejun Heo 	if (!list_empty(&work->node))
6853989144fSPetr Mladek 		kthread_insert_work(worker, &fwork.work, work->node.next);
68646f3d976STejun Heo 	else if (worker->current_work == work)
6873989144fSPetr Mladek 		kthread_insert_work(worker, &fwork.work,
6883989144fSPetr Mladek 				    worker->work_list.next);
68946f3d976STejun Heo 	else
69046f3d976STejun Heo 		noop = true;
691b56c0d89STejun Heo 
69246f3d976STejun Heo 	spin_unlock_irq(&worker->lock);
69346f3d976STejun Heo 
69446f3d976STejun Heo 	if (!noop)
69546f3d976STejun Heo 		wait_for_completion(&fwork.done);
696b56c0d89STejun Heo }
6973989144fSPetr Mladek EXPORT_SYMBOL_GPL(kthread_flush_work);
698b56c0d89STejun Heo 
699b56c0d89STejun Heo /**
7003989144fSPetr Mladek  * kthread_flush_worker - flush all current works on a kthread_worker
701b56c0d89STejun Heo  * @worker: worker to flush
702b56c0d89STejun Heo  *
703b56c0d89STejun Heo  * Wait until all currently executing or pending works on @worker are
704b56c0d89STejun Heo  * finished.
705b56c0d89STejun Heo  */
7063989144fSPetr Mladek void kthread_flush_worker(struct kthread_worker *worker)
707b56c0d89STejun Heo {
708b56c0d89STejun Heo 	struct kthread_flush_work fwork = {
709b56c0d89STejun Heo 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
710b56c0d89STejun Heo 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
711b56c0d89STejun Heo 	};
712b56c0d89STejun Heo 
7133989144fSPetr Mladek 	kthread_queue_work(worker, &fwork.work);
714b56c0d89STejun Heo 	wait_for_completion(&fwork.done);
715b56c0d89STejun Heo }
7163989144fSPetr Mladek EXPORT_SYMBOL_GPL(kthread_flush_worker);
717