1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /* Kernel thread helper functions.
31da177e4SLinus Torvalds * Copyright (C) 2004 IBM Corporation, Rusty Russell.
49bf5b9ebSChristoph Hellwig * Copyright (C) 2009 Red Hat, Inc.
51da177e4SLinus Torvalds *
673c27992SEric W. Biederman * Creation is done via kthreadd, so that we get a clean environment
71da177e4SLinus Torvalds * even if we're invoked from userspace (think modprobe, hotplug cpu,
81da177e4SLinus Torvalds * etc.).
91da177e4SLinus Torvalds */
10ae7e81c0SIngo Molnar #include <uapi/linux/sched/types.h>
119bf5b9ebSChristoph Hellwig #include <linux/mm.h>
129bf5b9ebSChristoph Hellwig #include <linux/mmu_context.h>
131da177e4SLinus Torvalds #include <linux/sched.h>
149bf5b9ebSChristoph Hellwig #include <linux/sched/mm.h>
1529930025SIngo Molnar #include <linux/sched/task.h>
161da177e4SLinus Torvalds #include <linux/kthread.h>
171da177e4SLinus Torvalds #include <linux/completion.h>
181da177e4SLinus Torvalds #include <linux/err.h>
198af0c18aSSuren Baghdasaryan #include <linux/cgroup.h>
2058568d2aSMiao Xie #include <linux/cpuset.h>
211da177e4SLinus Torvalds #include <linux/unistd.h>
221da177e4SLinus Torvalds #include <linux/file.h>
239984de1aSPaul Gortmaker #include <linux/export.h>
2497d1f15bSArjan van de Ven #include <linux/mutex.h>
25b56c0d89STejun Heo #include <linux/slab.h>
26b56c0d89STejun Heo #include <linux/freezer.h>
27a74fb73cSAl Viro #include <linux/ptrace.h>
28cd42d559STejun Heo #include <linux/uaccess.h>
2998fa15f3SAnshuman Khandual #include <linux/numa.h>
309cc5b865SMarcelo Tosatti #include <linux/sched/isolation.h>
31ad8d75ffSSteven Rostedt #include <trace/events/sched.h>
321da177e4SLinus Torvalds
339bf5b9ebSChristoph Hellwig
3473c27992SEric W. Biederman static DEFINE_SPINLOCK(kthread_create_lock);
3573c27992SEric W. Biederman static LIST_HEAD(kthread_create_list);
3673c27992SEric W. Biederman struct task_struct *kthreadd_task;
371da177e4SLinus Torvalds
381da177e4SLinus Torvalds struct kthread_create_info
391da177e4SLinus Torvalds {
4073c27992SEric W. Biederman /* Information passed to kthread() from kthreadd. */
4173e0c116SMike Christie char *full_name;
421da177e4SLinus Torvalds int (*threadfn)(void *data);
431da177e4SLinus Torvalds void *data;
44207205a2SEric Dumazet int node;
451da177e4SLinus Torvalds
4673c27992SEric W. Biederman /* Result passed back to kthread_create() from kthreadd. */
471da177e4SLinus Torvalds struct task_struct *result;
48786235eeSTetsuo Handa struct completion *done;
4965f27f38SDavid Howells
5073c27992SEric W. Biederman struct list_head list;
511da177e4SLinus Torvalds };
521da177e4SLinus Torvalds
5363706172SOleg Nesterov struct kthread {
542a1d4460SThomas Gleixner unsigned long flags;
552a1d4460SThomas Gleixner unsigned int cpu;
566b124879SEric W. Biederman int result;
5752782c92SJ. Bruce Fields int (*threadfn)(void *);
5882805ab7STejun Heo void *data;
592a1d4460SThomas Gleixner struct completion parked;
6063706172SOleg Nesterov struct completion exited;
610b508bc9SShaohua Li #ifdef CONFIG_BLK_CGROUP
6205e3db95SShaohua Li struct cgroup_subsys_state *blkcg_css;
6305e3db95SShaohua Li #endif
64d6986ce2SYafang Shao /* To store the full name if task comm is truncated. */
65d6986ce2SYafang Shao char *full_name;
661da177e4SLinus Torvalds };
671da177e4SLinus Torvalds
682a1d4460SThomas Gleixner enum KTHREAD_BITS {
692a1d4460SThomas Gleixner KTHREAD_IS_PER_CPU = 0,
702a1d4460SThomas Gleixner KTHREAD_SHOULD_STOP,
712a1d4460SThomas Gleixner KTHREAD_SHOULD_PARK,
722a1d4460SThomas Gleixner };
732a1d4460SThomas Gleixner
to_kthread(struct task_struct * k)744ecdafc8SOleg Nesterov static inline struct kthread *to_kthread(struct task_struct *k)
754ecdafc8SOleg Nesterov {
761da5c46fSOleg Nesterov WARN_ON(!(k->flags & PF_KTHREAD));
77e32cf5dfSEric W. Biederman return k->worker_private;
784ecdafc8SOleg Nesterov }
794ecdafc8SOleg Nesterov
803a7956e2SPeter Zijlstra /*
813a7956e2SPeter Zijlstra * Variant of to_kthread() that doesn't assume @p is a kthread.
823a7956e2SPeter Zijlstra *
833a7956e2SPeter Zijlstra * Per construction; when:
843a7956e2SPeter Zijlstra *
85e32cf5dfSEric W. Biederman * (p->flags & PF_KTHREAD) && p->worker_private
863a7956e2SPeter Zijlstra *
873a7956e2SPeter Zijlstra * the task is both a kthread and struct kthread is persistent. However
883a7956e2SPeter Zijlstra * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
893a7956e2SPeter Zijlstra * begin_new_exec()).
903a7956e2SPeter Zijlstra */
__to_kthread(struct task_struct * p)913a7956e2SPeter Zijlstra static inline struct kthread *__to_kthread(struct task_struct *p)
923a7956e2SPeter Zijlstra {
93e32cf5dfSEric W. Biederman void *kthread = p->worker_private;
943a7956e2SPeter Zijlstra if (kthread && !(p->flags & PF_KTHREAD))
953a7956e2SPeter Zijlstra kthread = NULL;
963a7956e2SPeter Zijlstra return kthread;
973a7956e2SPeter Zijlstra }
983a7956e2SPeter Zijlstra
get_kthread_comm(char * buf,size_t buf_size,struct task_struct * tsk)99d6986ce2SYafang Shao void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
100d6986ce2SYafang Shao {
101d6986ce2SYafang Shao struct kthread *kthread = to_kthread(tsk);
102d6986ce2SYafang Shao
103d6986ce2SYafang Shao if (!kthread || !kthread->full_name) {
104d6986ce2SYafang Shao __get_task_comm(buf, buf_size, tsk);
105d6986ce2SYafang Shao return;
106d6986ce2SYafang Shao }
107d6986ce2SYafang Shao
108d6986ce2SYafang Shao strscpy_pad(buf, kthread->full_name, buf_size);
109d6986ce2SYafang Shao }
110d6986ce2SYafang Shao
set_kthread_struct(struct task_struct * p)11140966e31SEric W. Biederman bool set_kthread_struct(struct task_struct *p)
11200b89fe0SValentin Schneider {
11300b89fe0SValentin Schneider struct kthread *kthread;
11400b89fe0SValentin Schneider
11540966e31SEric W. Biederman if (WARN_ON_ONCE(to_kthread(p)))
11640966e31SEric W. Biederman return false;
11700b89fe0SValentin Schneider
11800b89fe0SValentin Schneider kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
11940966e31SEric W. Biederman if (!kthread)
12040966e31SEric W. Biederman return false;
12140966e31SEric W. Biederman
12240966e31SEric W. Biederman init_completion(&kthread->exited);
12340966e31SEric W. Biederman init_completion(&kthread->parked);
12440966e31SEric W. Biederman p->vfork_done = &kthread->exited;
12540966e31SEric W. Biederman
126e32cf5dfSEric W. Biederman p->worker_private = kthread;
12740966e31SEric W. Biederman return true;
12800b89fe0SValentin Schneider }
12900b89fe0SValentin Schneider
free_kthread_struct(struct task_struct * k)1301da5c46fSOleg Nesterov void free_kthread_struct(struct task_struct *k)
1311da5c46fSOleg Nesterov {
13205e3db95SShaohua Li struct kthread *kthread;
13305e3db95SShaohua Li
1341da5c46fSOleg Nesterov /*
13540966e31SEric W. Biederman * Can be NULL if kmalloc() in set_kthread_struct() failed.
1361da5c46fSOleg Nesterov */
13705e3db95SShaohua Li kthread = to_kthread(k);
138d6986ce2SYafang Shao if (!kthread)
139d6986ce2SYafang Shao return;
140d6986ce2SYafang Shao
1410b508bc9SShaohua Li #ifdef CONFIG_BLK_CGROUP
142d6986ce2SYafang Shao WARN_ON_ONCE(kthread->blkcg_css);
14305e3db95SShaohua Li #endif
144e32cf5dfSEric W. Biederman k->worker_private = NULL;
145d6986ce2SYafang Shao kfree(kthread->full_name);
14605e3db95SShaohua Li kfree(kthread);
1471da5c46fSOleg Nesterov }
1481da5c46fSOleg Nesterov
1499e37bd30SRandy Dunlap /**
1509e37bd30SRandy Dunlap * kthread_should_stop - should this kthread return now?
1519e37bd30SRandy Dunlap *
15272fd4a35SRobert P. J. Day * When someone calls kthread_stop() on your kthread, it will be woken
1539e37bd30SRandy Dunlap * and this will return true. You should then return, and your return
1549e37bd30SRandy Dunlap * value will be passed through to kthread_stop().
1559e37bd30SRandy Dunlap */
kthread_should_stop(void)1562a1d4460SThomas Gleixner bool kthread_should_stop(void)
1571da177e4SLinus Torvalds {
1582a1d4460SThomas Gleixner return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
1591da177e4SLinus Torvalds }
1601da177e4SLinus Torvalds EXPORT_SYMBOL(kthread_should_stop);
1611da177e4SLinus Torvalds
__kthread_should_park(struct task_struct * k)162be33db21SGreg Kroah-Hartman static bool __kthread_should_park(struct task_struct *k)
1630121805dSMatthias Kaehlcke {
1640121805dSMatthias Kaehlcke return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
1650121805dSMatthias Kaehlcke }
1660121805dSMatthias Kaehlcke
16782805ab7STejun Heo /**
1682a1d4460SThomas Gleixner * kthread_should_park - should this kthread park now?
1692a1d4460SThomas Gleixner *
1702a1d4460SThomas Gleixner * When someone calls kthread_park() on your kthread, it will be woken
1712a1d4460SThomas Gleixner * and this will return true. You should then do the necessary
1722a1d4460SThomas Gleixner * cleanup and call kthread_parkme()
1732a1d4460SThomas Gleixner *
1742a1d4460SThomas Gleixner * Similar to kthread_should_stop(), but this keeps the thread alive
1752a1d4460SThomas Gleixner * and in a park position. kthread_unpark() "restarts" the thread and
1762a1d4460SThomas Gleixner * calls the thread function again.
1772a1d4460SThomas Gleixner */
kthread_should_park(void)1782a1d4460SThomas Gleixner bool kthread_should_park(void)
1792a1d4460SThomas Gleixner {
1800121805dSMatthias Kaehlcke return __kthread_should_park(current);
1812a1d4460SThomas Gleixner }
18218896451SDavid Kershner EXPORT_SYMBOL_GPL(kthread_should_park);
1832a1d4460SThomas Gleixner
kthread_should_stop_or_park(void)184ef73d6a4SArve Hjønnevåg bool kthread_should_stop_or_park(void)
185ef73d6a4SArve Hjønnevåg {
186ef73d6a4SArve Hjønnevåg struct kthread *kthread = __to_kthread(current);
187ef73d6a4SArve Hjønnevåg
188ef73d6a4SArve Hjønnevåg if (!kthread)
189ef73d6a4SArve Hjønnevåg return false;
190ef73d6a4SArve Hjønnevåg
191ef73d6a4SArve Hjønnevåg return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK));
192ef73d6a4SArve Hjønnevåg }
193ef73d6a4SArve Hjønnevåg
1942a1d4460SThomas Gleixner /**
1958a32c441STejun Heo * kthread_freezable_should_stop - should this freezable kthread return now?
1968a32c441STejun Heo * @was_frozen: optional out parameter, indicates whether %current was frozen
1978a32c441STejun Heo *
1988a32c441STejun Heo * kthread_should_stop() for freezable kthreads, which will enter
1998a32c441STejun Heo * refrigerator if necessary. This function is safe from kthread_stop() /
2008a32c441STejun Heo * freezer deadlock and freezable kthreads should use this function instead
2018a32c441STejun Heo * of calling try_to_freeze() directly.
2028a32c441STejun Heo */
kthread_freezable_should_stop(bool * was_frozen)2038a32c441STejun Heo bool kthread_freezable_should_stop(bool *was_frozen)
2048a32c441STejun Heo {
2058a32c441STejun Heo bool frozen = false;
2068a32c441STejun Heo
2078a32c441STejun Heo might_sleep();
2088a32c441STejun Heo
2098a32c441STejun Heo if (unlikely(freezing(current)))
2108a32c441STejun Heo frozen = __refrigerator(true);
2118a32c441STejun Heo
2128a32c441STejun Heo if (was_frozen)
2138a32c441STejun Heo *was_frozen = frozen;
2148a32c441STejun Heo
2158a32c441STejun Heo return kthread_should_stop();
2168a32c441STejun Heo }
2178a32c441STejun Heo EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
2188a32c441STejun Heo
2198a32c441STejun Heo /**
22052782c92SJ. Bruce Fields * kthread_func - return the function specified on kthread creation
22152782c92SJ. Bruce Fields * @task: kthread task in question
22252782c92SJ. Bruce Fields *
22352782c92SJ. Bruce Fields * Returns NULL if the task is not a kthread.
22452782c92SJ. Bruce Fields */
kthread_func(struct task_struct * task)22552782c92SJ. Bruce Fields void *kthread_func(struct task_struct *task)
22652782c92SJ. Bruce Fields {
2273a7956e2SPeter Zijlstra struct kthread *kthread = __to_kthread(task);
2283a7956e2SPeter Zijlstra if (kthread)
2293a7956e2SPeter Zijlstra return kthread->threadfn;
23052782c92SJ. Bruce Fields return NULL;
23152782c92SJ. Bruce Fields }
23252782c92SJ. Bruce Fields EXPORT_SYMBOL_GPL(kthread_func);
23352782c92SJ. Bruce Fields
23452782c92SJ. Bruce Fields /**
23582805ab7STejun Heo * kthread_data - return data value specified on kthread creation
23682805ab7STejun Heo * @task: kthread task in question
23782805ab7STejun Heo *
23882805ab7STejun Heo * Return the data value specified when kthread @task was created.
23982805ab7STejun Heo * The caller is responsible for ensuring the validity of @task when
24082805ab7STejun Heo * calling this function.
24182805ab7STejun Heo */
kthread_data(struct task_struct * task)24282805ab7STejun Heo void *kthread_data(struct task_struct *task)
24382805ab7STejun Heo {
24482805ab7STejun Heo return to_kthread(task)->data;
24582805ab7STejun Heo }
24652782c92SJ. Bruce Fields EXPORT_SYMBOL_GPL(kthread_data);
24782805ab7STejun Heo
248cd42d559STejun Heo /**
249e700591aSPetr Mladek * kthread_probe_data - speculative version of kthread_data()
250cd42d559STejun Heo * @task: possible kthread task in question
251cd42d559STejun Heo *
252cd42d559STejun Heo * @task could be a kthread task. Return the data value specified when it
253cd42d559STejun Heo * was created if accessible. If @task isn't a kthread task or its data is
254cd42d559STejun Heo * inaccessible for any reason, %NULL is returned. This function requires
255cd42d559STejun Heo * that @task itself is safe to dereference.
256cd42d559STejun Heo */
kthread_probe_data(struct task_struct * task)257e700591aSPetr Mladek void *kthread_probe_data(struct task_struct *task)
258cd42d559STejun Heo {
2593a7956e2SPeter Zijlstra struct kthread *kthread = __to_kthread(task);
260cd42d559STejun Heo void *data = NULL;
261cd42d559STejun Heo
2623a7956e2SPeter Zijlstra if (kthread)
263fe557319SChristoph Hellwig copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
264cd42d559STejun Heo return data;
265cd42d559STejun Heo }
266cd42d559STejun Heo
__kthread_parkme(struct kthread * self)2672a1d4460SThomas Gleixner static void __kthread_parkme(struct kthread *self)
2682a1d4460SThomas Gleixner {
269741a76b3SPeter Zijlstra for (;;) {
2701cef1150SPeter Zijlstra /*
2711cef1150SPeter Zijlstra * TASK_PARKED is a special state; we must serialize against
2721cef1150SPeter Zijlstra * possible pending wakeups to avoid store-store collisions on
2731cef1150SPeter Zijlstra * task->state.
2741cef1150SPeter Zijlstra *
2751cef1150SPeter Zijlstra * Such a collision might possibly result in the task state
2761cef1150SPeter Zijlstra * changin from TASK_PARKED and us failing the
2771cef1150SPeter Zijlstra * wait_task_inactive() in kthread_park().
2781cef1150SPeter Zijlstra */
2791cef1150SPeter Zijlstra set_special_state(TASK_PARKED);
280741a76b3SPeter Zijlstra if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
281741a76b3SPeter Zijlstra break;
2821cef1150SPeter Zijlstra
28326c7295bSLiang Chen /*
28426c7295bSLiang Chen * Thread is going to call schedule(), do not preempt it,
28526c7295bSLiang Chen * or the caller of kthread_park() may spend more time in
28626c7295bSLiang Chen * wait_task_inactive().
28726c7295bSLiang Chen */
28826c7295bSLiang Chen preempt_disable();
289f83ee19bSPeter Zijlstra complete(&self->parked);
29026c7295bSLiang Chen schedule_preempt_disabled();
29126c7295bSLiang Chen preempt_enable();
2922a1d4460SThomas Gleixner }
2932a1d4460SThomas Gleixner __set_current_state(TASK_RUNNING);
2942a1d4460SThomas Gleixner }
2952a1d4460SThomas Gleixner
kthread_parkme(void)2962a1d4460SThomas Gleixner void kthread_parkme(void)
2972a1d4460SThomas Gleixner {
2982a1d4460SThomas Gleixner __kthread_parkme(to_kthread(current));
2992a1d4460SThomas Gleixner }
30018896451SDavid Kershner EXPORT_SYMBOL_GPL(kthread_parkme);
3012a1d4460SThomas Gleixner
302bbda86e9SEric W. Biederman /**
303bbda86e9SEric W. Biederman * kthread_exit - Cause the current kthread return @result to kthread_stop().
304bbda86e9SEric W. Biederman * @result: The integer value to return to kthread_stop().
305bbda86e9SEric W. Biederman *
306bbda86e9SEric W. Biederman * While kthread_exit can be called directly, it exists so that
307bbda86e9SEric W. Biederman * functions which do some additional work in non-modular code such as
308bbda86e9SEric W. Biederman * module_put_and_kthread_exit can be implemented.
309bbda86e9SEric W. Biederman *
310bbda86e9SEric W. Biederman * Does not return.
311bbda86e9SEric W. Biederman */
kthread_exit(long result)312bbda86e9SEric W. Biederman void __noreturn kthread_exit(long result)
313bbda86e9SEric W. Biederman {
3146b124879SEric W. Biederman struct kthread *kthread = to_kthread(current);
3156b124879SEric W. Biederman kthread->result = result;
3166b124879SEric W. Biederman do_exit(0);
317bbda86e9SEric W. Biederman }
318bbda86e9SEric W. Biederman
319cead1855SEric W. Biederman /**
3205eb6f228SEric W. Biederman * kthread_complete_and_exit - Exit the current kthread.
321cead1855SEric W. Biederman * @comp: Completion to complete
322cead1855SEric W. Biederman * @code: The integer value to return to kthread_stop().
323cead1855SEric W. Biederman *
3246a25212dSPrathu Baronia * If present, complete @comp and then return code to kthread_stop().
325cead1855SEric W. Biederman *
326cead1855SEric W. Biederman * A kernel thread whose module may be removed after the completion of
3276a25212dSPrathu Baronia * @comp can use this function to exit safely.
328cead1855SEric W. Biederman *
329cead1855SEric W. Biederman * Does not return.
330cead1855SEric W. Biederman */
kthread_complete_and_exit(struct completion * comp,long code)331cead1855SEric W. Biederman void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
332cead1855SEric W. Biederman {
333cead1855SEric W. Biederman if (comp)
334cead1855SEric W. Biederman complete(comp);
335cead1855SEric W. Biederman
336cead1855SEric W. Biederman kthread_exit(code);
337cead1855SEric W. Biederman }
338cead1855SEric W. Biederman EXPORT_SYMBOL(kthread_complete_and_exit);
339cead1855SEric W. Biederman
kthread(void * _create)3401da177e4SLinus Torvalds static int kthread(void *_create)
3411da177e4SLinus Torvalds {
3421a7243caSSebastian Andrzej Siewior static const struct sched_param param = { .sched_priority = 0 };
34373c27992SEric W. Biederman /* Copy data: it's on kthread's stack */
34463706172SOleg Nesterov struct kthread_create_info *create = _create;
34563706172SOleg Nesterov int (*threadfn)(void *data) = create->threadfn;
34663706172SOleg Nesterov void *data = create->data;
347786235eeSTetsuo Handa struct completion *done;
3481da5c46fSOleg Nesterov struct kthread *self;
34963706172SOleg Nesterov int ret;
35063706172SOleg Nesterov
35100b89fe0SValentin Schneider self = to_kthread(current);
3521da177e4SLinus Torvalds
353d25c83c6SPetr Mladek /* Release the structure when caller killed by a fatal signal. */
354786235eeSTetsuo Handa done = xchg(&create->done, NULL);
355786235eeSTetsuo Handa if (!done) {
35673e0c116SMike Christie kfree(create->full_name);
357786235eeSTetsuo Handa kfree(create);
358bbda86e9SEric W. Biederman kthread_exit(-EINTR);
3591da5c46fSOleg Nesterov }
3601da5c46fSOleg Nesterov
36173e0c116SMike Christie self->full_name = create->full_name;
36252782c92SJ. Bruce Fields self->threadfn = threadfn;
3631da5c46fSOleg Nesterov self->data = data;
3641da5c46fSOleg Nesterov
3651a7243caSSebastian Andrzej Siewior /*
3661a7243caSSebastian Andrzej Siewior * The new thread inherited kthreadd's priority and CPU mask. Reset
3671a7243caSSebastian Andrzej Siewior * back to default in case they have been changed.
3681a7243caSSebastian Andrzej Siewior */
3691a7243caSSebastian Andrzej Siewior sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m);
37004d4e665SFrederic Weisbecker set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
3711a7243caSSebastian Andrzej Siewior
3721da177e4SLinus Torvalds /* OK, tell user we're spawned, wait for stop or wakeup */
373a076e4bcSOleg Nesterov __set_current_state(TASK_UNINTERRUPTIBLE);
3743217ab97SVitaliy Gusev create->result = current;
37526c7295bSLiang Chen /*
37626c7295bSLiang Chen * Thread is going to call schedule(), do not preempt it,
37726c7295bSLiang Chen * or the creator may spend more time in wait_task_inactive().
37826c7295bSLiang Chen */
37926c7295bSLiang Chen preempt_disable();
380786235eeSTetsuo Handa complete(done);
38126c7295bSLiang Chen schedule_preempt_disabled();
38226c7295bSLiang Chen preempt_enable();
3831da177e4SLinus Torvalds
38463706172SOleg Nesterov ret = -EINTR;
3851da5c46fSOleg Nesterov if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
38677f88796STejun Heo cgroup_kthread_ready();
3871da5c46fSOleg Nesterov __kthread_parkme(self);
3882a1d4460SThomas Gleixner ret = threadfn(data);
3892a1d4460SThomas Gleixner }
390bbda86e9SEric W. Biederman kthread_exit(ret);
3911da177e4SLinus Torvalds }
3921da177e4SLinus Torvalds
393cb5021caSYanfei Xu /* called from kernel_clone() to get node information for about to be created task */
tsk_fork_get_node(struct task_struct * tsk)394207205a2SEric Dumazet int tsk_fork_get_node(struct task_struct *tsk)
395207205a2SEric Dumazet {
396207205a2SEric Dumazet #ifdef CONFIG_NUMA
397207205a2SEric Dumazet if (tsk == kthreadd_task)
398207205a2SEric Dumazet return tsk->pref_node_fork;
399207205a2SEric Dumazet #endif
40081c98869SNishanth Aravamudan return NUMA_NO_NODE;
401207205a2SEric Dumazet }
402207205a2SEric Dumazet
create_kthread(struct kthread_create_info * create)40373c27992SEric W. Biederman static void create_kthread(struct kthread_create_info *create)
4041da177e4SLinus Torvalds {
4051da177e4SLinus Torvalds int pid;
4061da177e4SLinus Torvalds
407207205a2SEric Dumazet #ifdef CONFIG_NUMA
408207205a2SEric Dumazet current->pref_node_fork = create->node;
409207205a2SEric Dumazet #endif
4101da177e4SLinus Torvalds /* We want our own signal handler (we take no signals by default). */
41173e0c116SMike Christie pid = kernel_thread(kthread, create, create->full_name,
412cf587db2SMike Christie CLONE_FS | CLONE_FILES | SIGCHLD);
413cdd140bdSOleg Nesterov if (pid < 0) {
414d25c83c6SPetr Mladek /* Release the structure when caller killed by a fatal signal. */
415786235eeSTetsuo Handa struct completion *done = xchg(&create->done, NULL);
416786235eeSTetsuo Handa
41773e0c116SMike Christie kfree(create->full_name);
418786235eeSTetsuo Handa if (!done) {
419786235eeSTetsuo Handa kfree(create);
420786235eeSTetsuo Handa return;
421786235eeSTetsuo Handa }
4221da177e4SLinus Torvalds create->result = ERR_PTR(pid);
423786235eeSTetsuo Handa complete(done);
4241da177e4SLinus Torvalds }
425cdd140bdSOleg Nesterov }
4261da177e4SLinus Torvalds
427c0b942a7SNicolas Iooss static __printf(4, 0)
__kthread_create_on_node(int (* threadfn)(void * data),void * data,int node,const char namefmt[],va_list args)428c0b942a7SNicolas Iooss struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
4292a1d4460SThomas Gleixner void *data, int node,
4301da177e4SLinus Torvalds const char namefmt[],
431255451e4SPetr Mladek va_list args)
4321da177e4SLinus Torvalds {
433786235eeSTetsuo Handa DECLARE_COMPLETION_ONSTACK(done);
434786235eeSTetsuo Handa struct task_struct *task;
435786235eeSTetsuo Handa struct kthread_create_info *create = kmalloc(sizeof(*create),
436786235eeSTetsuo Handa GFP_KERNEL);
4371da177e4SLinus Torvalds
438786235eeSTetsuo Handa if (!create)
439786235eeSTetsuo Handa return ERR_PTR(-ENOMEM);
440786235eeSTetsuo Handa create->threadfn = threadfn;
441786235eeSTetsuo Handa create->data = data;
442786235eeSTetsuo Handa create->node = node;
443786235eeSTetsuo Handa create->done = &done;
44473e0c116SMike Christie create->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
44573e0c116SMike Christie if (!create->full_name) {
44673e0c116SMike Christie task = ERR_PTR(-ENOMEM);
44773e0c116SMike Christie goto free_create;
44873e0c116SMike Christie }
4491da177e4SLinus Torvalds
45073c27992SEric W. Biederman spin_lock(&kthread_create_lock);
451786235eeSTetsuo Handa list_add_tail(&create->list, &kthread_create_list);
45273c27992SEric W. Biederman spin_unlock(&kthread_create_lock);
45373c27992SEric W. Biederman
454cbd9b67bSDmitry Adamushko wake_up_process(kthreadd_task);
455786235eeSTetsuo Handa /*
456786235eeSTetsuo Handa * Wait for completion in killable state, for I might be chosen by
457786235eeSTetsuo Handa * the OOM killer while kthreadd is trying to allocate memory for
458786235eeSTetsuo Handa * new kernel thread.
459786235eeSTetsuo Handa */
460786235eeSTetsuo Handa if (unlikely(wait_for_completion_killable(&done))) {
461786235eeSTetsuo Handa /*
462d25c83c6SPetr Mladek * If I was killed by a fatal signal before kthreadd (or new
463d25c83c6SPetr Mladek * kernel thread) calls complete(), leave the cleanup of this
464d25c83c6SPetr Mladek * structure to that thread.
465786235eeSTetsuo Handa */
466786235eeSTetsuo Handa if (xchg(&create->done, NULL))
4678fe6929cSTetsuo Handa return ERR_PTR(-EINTR);
468786235eeSTetsuo Handa /*
469786235eeSTetsuo Handa * kthreadd (or new kernel thread) will call complete()
470786235eeSTetsuo Handa * shortly.
471786235eeSTetsuo Handa */
472786235eeSTetsuo Handa wait_for_completion(&done);
473786235eeSTetsuo Handa }
474786235eeSTetsuo Handa task = create->result;
47573e0c116SMike Christie free_create:
476786235eeSTetsuo Handa kfree(create);
477786235eeSTetsuo Handa return task;
4781da177e4SLinus Torvalds }
479255451e4SPetr Mladek
480255451e4SPetr Mladek /**
481255451e4SPetr Mladek * kthread_create_on_node - create a kthread.
482255451e4SPetr Mladek * @threadfn: the function to run until signal_pending(current).
483255451e4SPetr Mladek * @data: data ptr for @threadfn.
484255451e4SPetr Mladek * @node: task and thread structures for the thread are allocated on this node
485255451e4SPetr Mladek * @namefmt: printf-style name for the thread.
486255451e4SPetr Mladek *
487255451e4SPetr Mladek * Description: This helper function creates and names a kernel
488255451e4SPetr Mladek * thread. The thread will be stopped: use wake_up_process() to start
489255451e4SPetr Mladek * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
490255451e4SPetr Mladek * is affine to all CPUs.
491255451e4SPetr Mladek *
492255451e4SPetr Mladek * If thread is going to be bound on a particular cpu, give its node
493255451e4SPetr Mladek * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
494255451e4SPetr Mladek * When woken, the thread will run @threadfn() with @data as its
495111e7049SEric W. Biederman * argument. @threadfn() can either return directly if it is a
496255451e4SPetr Mladek * standalone thread for which no one will call kthread_stop(), or
497255451e4SPetr Mladek * return when 'kthread_should_stop()' is true (which means
498255451e4SPetr Mladek * kthread_stop() has been called). The return value should be zero
499255451e4SPetr Mladek * or a negative error number; it will be passed to kthread_stop().
500255451e4SPetr Mladek *
501255451e4SPetr Mladek * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
502255451e4SPetr Mladek */
kthread_create_on_node(int (* threadfn)(void * data),void * data,int node,const char namefmt[],...)503255451e4SPetr Mladek struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
504255451e4SPetr Mladek void *data, int node,
505255451e4SPetr Mladek const char namefmt[],
506255451e4SPetr Mladek ...)
507255451e4SPetr Mladek {
508255451e4SPetr Mladek struct task_struct *task;
509255451e4SPetr Mladek va_list args;
510255451e4SPetr Mladek
511255451e4SPetr Mladek va_start(args, namefmt);
512255451e4SPetr Mladek task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
513255451e4SPetr Mladek va_end(args);
514255451e4SPetr Mladek
515255451e4SPetr Mladek return task;
516255451e4SPetr Mladek }
517207205a2SEric Dumazet EXPORT_SYMBOL(kthread_create_on_node);
5181da177e4SLinus Torvalds
__kthread_bind_mask(struct task_struct * p,const struct cpumask * mask,unsigned int state)5192f064a59SPeter Zijlstra static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
5202a1d4460SThomas Gleixner {
52125834c73SPeter Zijlstra unsigned long flags;
52225834c73SPeter Zijlstra
523f2530dc7SThomas Gleixner if (!wait_task_inactive(p, state)) {
524f2530dc7SThomas Gleixner WARN_ON(1);
525f2530dc7SThomas Gleixner return;
526f2530dc7SThomas Gleixner }
52725834c73SPeter Zijlstra
5282a1d4460SThomas Gleixner /* It's safe because the task is inactive. */
52925834c73SPeter Zijlstra raw_spin_lock_irqsave(&p->pi_lock, flags);
53025834c73SPeter Zijlstra do_set_cpus_allowed(p, mask);
53114a40ffcSTejun Heo p->flags |= PF_NO_SETAFFINITY;
53225834c73SPeter Zijlstra raw_spin_unlock_irqrestore(&p->pi_lock, flags);
53325834c73SPeter Zijlstra }
53425834c73SPeter Zijlstra
__kthread_bind(struct task_struct * p,unsigned int cpu,unsigned int state)5352f064a59SPeter Zijlstra static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
53625834c73SPeter Zijlstra {
53725834c73SPeter Zijlstra __kthread_bind_mask(p, cpumask_of(cpu), state);
53825834c73SPeter Zijlstra }
53925834c73SPeter Zijlstra
kthread_bind_mask(struct task_struct * p,const struct cpumask * mask)54025834c73SPeter Zijlstra void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
54125834c73SPeter Zijlstra {
54225834c73SPeter Zijlstra __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
5432a1d4460SThomas Gleixner }
5442a1d4460SThomas Gleixner
5459e37bd30SRandy Dunlap /**
546881232b7SPeter Zijlstra * kthread_bind - bind a just-created kthread to a cpu.
547881232b7SPeter Zijlstra * @p: thread created by kthread_create().
548881232b7SPeter Zijlstra * @cpu: cpu (might not be online, must be possible) for @k to run on.
549881232b7SPeter Zijlstra *
550881232b7SPeter Zijlstra * Description: This function is equivalent to set_cpus_allowed(),
551881232b7SPeter Zijlstra * except that @cpu doesn't need to be online, and the thread must be
552881232b7SPeter Zijlstra * stopped (i.e., just returned from kthread_create()).
553881232b7SPeter Zijlstra */
kthread_bind(struct task_struct * p,unsigned int cpu)554881232b7SPeter Zijlstra void kthread_bind(struct task_struct *p, unsigned int cpu)
555881232b7SPeter Zijlstra {
556f2530dc7SThomas Gleixner __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
557881232b7SPeter Zijlstra }
558881232b7SPeter Zijlstra EXPORT_SYMBOL(kthread_bind);
559881232b7SPeter Zijlstra
560881232b7SPeter Zijlstra /**
5612a1d4460SThomas Gleixner * kthread_create_on_cpu - Create a cpu bound kthread
5622a1d4460SThomas Gleixner * @threadfn: the function to run until signal_pending(current).
5632a1d4460SThomas Gleixner * @data: data ptr for @threadfn.
5642a1d4460SThomas Gleixner * @cpu: The cpu on which the thread should be bound,
5652a1d4460SThomas Gleixner * @namefmt: printf-style name for the thread. Format is restricted
5662a1d4460SThomas Gleixner * to "name.*%u". Code fills in cpu number.
5672a1d4460SThomas Gleixner *
5682a1d4460SThomas Gleixner * Description: This helper function creates and names a kernel thread
5692a1d4460SThomas Gleixner */
kthread_create_on_cpu(int (* threadfn)(void * data),void * data,unsigned int cpu,const char * namefmt)5702a1d4460SThomas Gleixner struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
5712a1d4460SThomas Gleixner void *data, unsigned int cpu,
5722a1d4460SThomas Gleixner const char *namefmt)
5732a1d4460SThomas Gleixner {
5742a1d4460SThomas Gleixner struct task_struct *p;
5752a1d4460SThomas Gleixner
57610922838SNishanth Aravamudan p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
5772a1d4460SThomas Gleixner cpu);
5782a1d4460SThomas Gleixner if (IS_ERR(p))
5792a1d4460SThomas Gleixner return p;
580a65d4096SPetr Mladek kthread_bind(p, cpu);
581a65d4096SPetr Mladek /* CPU hotplug need to bind once again when unparking the thread. */
5822a1d4460SThomas Gleixner to_kthread(p)->cpu = cpu;
5832a1d4460SThomas Gleixner return p;
5842a1d4460SThomas Gleixner }
585800977f6SCai Huoqing EXPORT_SYMBOL(kthread_create_on_cpu);
5862a1d4460SThomas Gleixner
kthread_set_per_cpu(struct task_struct * k,int cpu)587ac687e6eSPeter Zijlstra void kthread_set_per_cpu(struct task_struct *k, int cpu)
588ac687e6eSPeter Zijlstra {
589ac687e6eSPeter Zijlstra struct kthread *kthread = to_kthread(k);
590ac687e6eSPeter Zijlstra if (!kthread)
591ac687e6eSPeter Zijlstra return;
592ac687e6eSPeter Zijlstra
593ac687e6eSPeter Zijlstra WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
594ac687e6eSPeter Zijlstra
595ac687e6eSPeter Zijlstra if (cpu < 0) {
596ac687e6eSPeter Zijlstra clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
597ac687e6eSPeter Zijlstra return;
598ac687e6eSPeter Zijlstra }
599ac687e6eSPeter Zijlstra
600ac687e6eSPeter Zijlstra kthread->cpu = cpu;
601ac687e6eSPeter Zijlstra set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
602ac687e6eSPeter Zijlstra }
603ac687e6eSPeter Zijlstra
kthread_is_per_cpu(struct task_struct * p)6043a7956e2SPeter Zijlstra bool kthread_is_per_cpu(struct task_struct *p)
605ac687e6eSPeter Zijlstra {
6063a7956e2SPeter Zijlstra struct kthread *kthread = __to_kthread(p);
607ac687e6eSPeter Zijlstra if (!kthread)
608ac687e6eSPeter Zijlstra return false;
609ac687e6eSPeter Zijlstra
610ac687e6eSPeter Zijlstra return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
611ac687e6eSPeter Zijlstra }
612ac687e6eSPeter Zijlstra
613cf380a4aSOleg Nesterov /**
614cf380a4aSOleg Nesterov * kthread_unpark - unpark a thread created by kthread_create().
615cf380a4aSOleg Nesterov * @k: thread created by kthread_create().
616cf380a4aSOleg Nesterov *
617cf380a4aSOleg Nesterov * Sets kthread_should_park() for @k to return false, wakes it, and
618cf380a4aSOleg Nesterov * waits for it to return. If the thread is marked percpu then its
619cf380a4aSOleg Nesterov * bound to the cpu again.
620cf380a4aSOleg Nesterov */
kthread_unpark(struct task_struct * k)621cf380a4aSOleg Nesterov void kthread_unpark(struct task_struct *k)
622f2530dc7SThomas Gleixner {
623cf380a4aSOleg Nesterov struct kthread *kthread = to_kthread(k);
624cf380a4aSOleg Nesterov
625*19a50299SFrederic Weisbecker if (!test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))
626*19a50299SFrederic Weisbecker return;
627a65d4096SPetr Mladek /*
628a65d4096SPetr Mladek * Newly created kthread was parked when the CPU was offline.
629a65d4096SPetr Mladek * The binding was lost and we need to set it again.
630a65d4096SPetr Mladek */
631f2530dc7SThomas Gleixner if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
632f2530dc7SThomas Gleixner __kthread_bind(k, kthread->cpu, TASK_PARKED);
63385f1abe0SPeter Zijlstra
63485f1abe0SPeter Zijlstra clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
6351cef1150SPeter Zijlstra /*
6361cef1150SPeter Zijlstra * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
6371cef1150SPeter Zijlstra */
638f2530dc7SThomas Gleixner wake_up_state(k, TASK_PARKED);
639f2530dc7SThomas Gleixner }
64018896451SDavid Kershner EXPORT_SYMBOL_GPL(kthread_unpark);
6412a1d4460SThomas Gleixner
6422a1d4460SThomas Gleixner /**
6432a1d4460SThomas Gleixner * kthread_park - park a thread created by kthread_create().
6442a1d4460SThomas Gleixner * @k: thread created by kthread_create().
6452a1d4460SThomas Gleixner *
6462a1d4460SThomas Gleixner * Sets kthread_should_park() for @k to return true, wakes it, and
6472a1d4460SThomas Gleixner * waits for it to return. This can also be called after kthread_create()
6482a1d4460SThomas Gleixner * instead of calling wake_up_process(): the thread will park without
6492a1d4460SThomas Gleixner * calling threadfn().
6502a1d4460SThomas Gleixner *
6512a1d4460SThomas Gleixner * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
6522a1d4460SThomas Gleixner * If called by the kthread itself just the park bit is set.
6532a1d4460SThomas Gleixner */
kthread_park(struct task_struct * k)6542a1d4460SThomas Gleixner int kthread_park(struct task_struct *k)
6552a1d4460SThomas Gleixner {
656cf380a4aSOleg Nesterov struct kthread *kthread = to_kthread(k);
6572a1d4460SThomas Gleixner
658cf380a4aSOleg Nesterov if (WARN_ON(k->flags & PF_EXITING))
659cf380a4aSOleg Nesterov return -ENOSYS;
660cf380a4aSOleg Nesterov
661f83ee19bSPeter Zijlstra if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
662f83ee19bSPeter Zijlstra return -EBUSY;
663f83ee19bSPeter Zijlstra
6642a1d4460SThomas Gleixner set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
6652a1d4460SThomas Gleixner if (k != current) {
6662a1d4460SThomas Gleixner wake_up_process(k);
6671cef1150SPeter Zijlstra /*
6681cef1150SPeter Zijlstra * Wait for __kthread_parkme() to complete(), this means we
6691cef1150SPeter Zijlstra * _will_ have TASK_PARKED and are about to call schedule().
6701cef1150SPeter Zijlstra */
6712a1d4460SThomas Gleixner wait_for_completion(&kthread->parked);
6721cef1150SPeter Zijlstra /*
6731cef1150SPeter Zijlstra * Now wait for that schedule() to complete and the task to
6741cef1150SPeter Zijlstra * get scheduled out.
6751cef1150SPeter Zijlstra */
6761cef1150SPeter Zijlstra WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
6772a1d4460SThomas Gleixner }
678cf380a4aSOleg Nesterov
679cf380a4aSOleg Nesterov return 0;
6802a1d4460SThomas Gleixner }
68118896451SDavid Kershner EXPORT_SYMBOL_GPL(kthread_park);
6822a1d4460SThomas Gleixner
6832a1d4460SThomas Gleixner /**
6849e37bd30SRandy Dunlap * kthread_stop - stop a thread created by kthread_create().
6859e37bd30SRandy Dunlap * @k: thread created by kthread_create().
6869e37bd30SRandy Dunlap *
6879e37bd30SRandy Dunlap * Sets kthread_should_stop() for @k to return true, wakes it, and
6889ae26027SOleg Nesterov * waits for it to exit. This can also be called after kthread_create()
6899ae26027SOleg Nesterov * instead of calling wake_up_process(): the thread will exit without
6909ae26027SOleg Nesterov * calling threadfn().
6919ae26027SOleg Nesterov *
692bbda86e9SEric W. Biederman * If threadfn() may call kthread_exit() itself, the caller must ensure
6939ae26027SOleg Nesterov * task_struct can't go away.
6949e37bd30SRandy Dunlap *
6959e37bd30SRandy Dunlap * Returns the result of threadfn(), or %-EINTR if wake_up_process()
6969e37bd30SRandy Dunlap * was never called.
6979e37bd30SRandy Dunlap */
kthread_stop(struct task_struct * k)6981da177e4SLinus Torvalds int kthread_stop(struct task_struct *k)
6991da177e4SLinus Torvalds {
700b5c5442bSOleg Nesterov struct kthread *kthread;
7011da177e4SLinus Torvalds int ret;
7021da177e4SLinus Torvalds
70363706172SOleg Nesterov trace_sched_kthread_stop(k);
704b5c5442bSOleg Nesterov
705b5c5442bSOleg Nesterov get_task_struct(k);
706efb29fbfSOleg Nesterov kthread = to_kthread(k);
7072a1d4460SThomas Gleixner set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
708cf380a4aSOleg Nesterov kthread_unpark(k);
709a7c01fa9SJason A. Donenfeld set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL);
7101da177e4SLinus Torvalds wake_up_process(k);
71163706172SOleg Nesterov wait_for_completion(&kthread->exited);
7126b124879SEric W. Biederman ret = kthread->result;
7131da177e4SLinus Torvalds put_task_struct(k);
7140a16b607SMathieu Desnoyers
715b5c5442bSOleg Nesterov trace_sched_kthread_stop_ret(ret);
7161da177e4SLinus Torvalds return ret;
7171da177e4SLinus Torvalds }
71852e92e57SAdrian Bunk EXPORT_SYMBOL(kthread_stop);
7191da177e4SLinus Torvalds
720a9da6ddaSAndreas Gruenbacher /**
721a9da6ddaSAndreas Gruenbacher * kthread_stop_put - stop a thread and put its task struct
722a9da6ddaSAndreas Gruenbacher * @k: thread created by kthread_create().
723a9da6ddaSAndreas Gruenbacher *
724a9da6ddaSAndreas Gruenbacher * Stops a thread created by kthread_create() and put its task_struct.
725a9da6ddaSAndreas Gruenbacher * Only use when holding an extra task struct reference obtained by
726a9da6ddaSAndreas Gruenbacher * calling get_task_struct().
727a9da6ddaSAndreas Gruenbacher */
kthread_stop_put(struct task_struct * k)728a9da6ddaSAndreas Gruenbacher int kthread_stop_put(struct task_struct *k)
729a9da6ddaSAndreas Gruenbacher {
730a9da6ddaSAndreas Gruenbacher int ret;
731a9da6ddaSAndreas Gruenbacher
732a9da6ddaSAndreas Gruenbacher ret = kthread_stop(k);
733a9da6ddaSAndreas Gruenbacher put_task_struct(k);
734a9da6ddaSAndreas Gruenbacher return ret;
735a9da6ddaSAndreas Gruenbacher }
736a9da6ddaSAndreas Gruenbacher EXPORT_SYMBOL(kthread_stop_put);
737a9da6ddaSAndreas Gruenbacher
kthreadd(void * unused)738e804a4a4SSatyam Sharma int kthreadd(void *unused)
7391da177e4SLinus Torvalds {
74073c27992SEric W. Biederman struct task_struct *tsk = current;
74173c27992SEric W. Biederman
742e804a4a4SSatyam Sharma /* Setup a clean context for our children to inherit. */
74373c27992SEric W. Biederman set_task_comm(tsk, "kthreadd");
74410ab825bSOleg Nesterov ignore_signals(tsk);
74504d4e665SFrederic Weisbecker set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
746aee4faa4SLai Jiangshan set_mems_allowed(node_states[N_MEMORY]);
74773c27992SEric W. Biederman
74834b087e4STejun Heo current->flags |= PF_NOFREEZE;
74977f88796STejun Heo cgroup_init_kthreadd();
75073c27992SEric W. Biederman
75173c27992SEric W. Biederman for (;;) {
75273c27992SEric W. Biederman set_current_state(TASK_INTERRUPTIBLE);
75373c27992SEric W. Biederman if (list_empty(&kthread_create_list))
75473c27992SEric W. Biederman schedule();
75573c27992SEric W. Biederman __set_current_state(TASK_RUNNING);
75673c27992SEric W. Biederman
75773c27992SEric W. Biederman spin_lock(&kthread_create_lock);
75873c27992SEric W. Biederman while (!list_empty(&kthread_create_list)) {
75973c27992SEric W. Biederman struct kthread_create_info *create;
76073c27992SEric W. Biederman
76173c27992SEric W. Biederman create = list_entry(kthread_create_list.next,
76273c27992SEric W. Biederman struct kthread_create_info, list);
76373c27992SEric W. Biederman list_del_init(&create->list);
76473c27992SEric W. Biederman spin_unlock(&kthread_create_lock);
76573c27992SEric W. Biederman
76673c27992SEric W. Biederman create_kthread(create);
76773c27992SEric W. Biederman
76873c27992SEric W. Biederman spin_lock(&kthread_create_lock);
76973c27992SEric W. Biederman }
77073c27992SEric W. Biederman spin_unlock(&kthread_create_lock);
77173c27992SEric W. Biederman }
7721da177e4SLinus Torvalds
7731da177e4SLinus Torvalds return 0;
7741da177e4SLinus Torvalds }
775b56c0d89STejun Heo
__kthread_init_worker(struct kthread_worker * worker,const char * name,struct lock_class_key * key)7763989144fSPetr Mladek void __kthread_init_worker(struct kthread_worker *worker,
7774f32e9b1SYong Zhang const char *name,
7784f32e9b1SYong Zhang struct lock_class_key *key)
7794f32e9b1SYong Zhang {
780dbf52682SPetr Mladek memset(worker, 0, sizeof(struct kthread_worker));
781fe99a4f4SJulia Cartwright raw_spin_lock_init(&worker->lock);
7824f32e9b1SYong Zhang lockdep_set_class_and_name(&worker->lock, key, name);
7834f32e9b1SYong Zhang INIT_LIST_HEAD(&worker->work_list);
78422597dc3SPetr Mladek INIT_LIST_HEAD(&worker->delayed_work_list);
7854f32e9b1SYong Zhang }
7863989144fSPetr Mladek EXPORT_SYMBOL_GPL(__kthread_init_worker);
7874f32e9b1SYong Zhang
788b56c0d89STejun Heo /**
789b56c0d89STejun Heo * kthread_worker_fn - kthread function to process kthread_worker
790b56c0d89STejun Heo * @worker_ptr: pointer to initialized kthread_worker
791b56c0d89STejun Heo *
792fbae2d44SPetr Mladek * This function implements the main cycle of kthread worker. It processes
793fbae2d44SPetr Mladek * work_list until it is stopped with kthread_stop(). It sleeps when the queue
794fbae2d44SPetr Mladek * is empty.
795b56c0d89STejun Heo *
796fbae2d44SPetr Mladek * The works are not allowed to keep any locks, disable preemption or interrupts
797fbae2d44SPetr Mladek * when they finish. There is defined a safe point for freezing when one work
798fbae2d44SPetr Mladek * finishes and before a new one is started.
7998197b3d4SPetr Mladek *
8008197b3d4SPetr Mladek * Also the works must not be handled by more than one worker at the same time,
8018197b3d4SPetr Mladek * see also kthread_queue_work().
802b56c0d89STejun Heo */
kthread_worker_fn(void * worker_ptr)803b56c0d89STejun Heo int kthread_worker_fn(void *worker_ptr)
804b56c0d89STejun Heo {
805b56c0d89STejun Heo struct kthread_worker *worker = worker_ptr;
806b56c0d89STejun Heo struct kthread_work *work;
807b56c0d89STejun Heo
808fbae2d44SPetr Mladek /*
809fbae2d44SPetr Mladek * FIXME: Update the check and remove the assignment when all kthread
810fbae2d44SPetr Mladek * worker users are created using kthread_create_worker*() functions.
811fbae2d44SPetr Mladek */
812fbae2d44SPetr Mladek WARN_ON(worker->task && worker->task != current);
813b56c0d89STejun Heo worker->task = current;
814dbf52682SPetr Mladek
815dbf52682SPetr Mladek if (worker->flags & KTW_FREEZABLE)
816dbf52682SPetr Mladek set_freezable();
817dbf52682SPetr Mladek
818b56c0d89STejun Heo repeat:
819b56c0d89STejun Heo set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
820b56c0d89STejun Heo
821b56c0d89STejun Heo if (kthread_should_stop()) {
822b56c0d89STejun Heo __set_current_state(TASK_RUNNING);
823fe99a4f4SJulia Cartwright raw_spin_lock_irq(&worker->lock);
824b56c0d89STejun Heo worker->task = NULL;
825fe99a4f4SJulia Cartwright raw_spin_unlock_irq(&worker->lock);
826b56c0d89STejun Heo return 0;
827b56c0d89STejun Heo }
828b56c0d89STejun Heo
829b56c0d89STejun Heo work = NULL;
830fe99a4f4SJulia Cartwright raw_spin_lock_irq(&worker->lock);
831b56c0d89STejun Heo if (!list_empty(&worker->work_list)) {
832b56c0d89STejun Heo work = list_first_entry(&worker->work_list,
833b56c0d89STejun Heo struct kthread_work, node);
834b56c0d89STejun Heo list_del_init(&work->node);
835b56c0d89STejun Heo }
83646f3d976STejun Heo worker->current_work = work;
837fe99a4f4SJulia Cartwright raw_spin_unlock_irq(&worker->lock);
838b56c0d89STejun Heo
839b56c0d89STejun Heo if (work) {
840f630c7c6SRob Clark kthread_work_func_t func = work->func;
841b56c0d89STejun Heo __set_current_state(TASK_RUNNING);
842f630c7c6SRob Clark trace_sched_kthread_work_execute_start(work);
843b56c0d89STejun Heo work->func(work);
844f630c7c6SRob Clark /*
845f630c7c6SRob Clark * Avoid dereferencing work after this point. The trace
846f630c7c6SRob Clark * event only cares about the address.
847f630c7c6SRob Clark */
848f630c7c6SRob Clark trace_sched_kthread_work_execute_end(work, func);
849cfd257f5SChen Yu } else if (!freezing(current)) {
850b56c0d89STejun Heo schedule();
851cfd257f5SChen Yu } else {
852cfd257f5SChen Yu /*
853cfd257f5SChen Yu * Handle the case where the current remains
854cfd257f5SChen Yu * TASK_INTERRUPTIBLE. try_to_freeze() expects
855cfd257f5SChen Yu * the current to be TASK_RUNNING.
856cfd257f5SChen Yu */
857cfd257f5SChen Yu __set_current_state(TASK_RUNNING);
858cfd257f5SChen Yu }
859b56c0d89STejun Heo
860b56c0d89STejun Heo try_to_freeze();
86122cf8bc6SShaohua Li cond_resched();
862b56c0d89STejun Heo goto repeat;
863b56c0d89STejun Heo }
864b56c0d89STejun Heo EXPORT_SYMBOL_GPL(kthread_worker_fn);
865b56c0d89STejun Heo
866c0b942a7SNicolas Iooss static __printf(3, 0) struct kthread_worker *
__kthread_create_worker(int cpu,unsigned int flags,const char namefmt[],va_list args)867dbf52682SPetr Mladek __kthread_create_worker(int cpu, unsigned int flags,
868dbf52682SPetr Mladek const char namefmt[], va_list args)
869fbae2d44SPetr Mladek {
870fbae2d44SPetr Mladek struct kthread_worker *worker;
871fbae2d44SPetr Mladek struct task_struct *task;
87298fa15f3SAnshuman Khandual int node = NUMA_NO_NODE;
873fbae2d44SPetr Mladek
874fbae2d44SPetr Mladek worker = kzalloc(sizeof(*worker), GFP_KERNEL);
875fbae2d44SPetr Mladek if (!worker)
876fbae2d44SPetr Mladek return ERR_PTR(-ENOMEM);
877fbae2d44SPetr Mladek
878fbae2d44SPetr Mladek kthread_init_worker(worker);
879fbae2d44SPetr Mladek
8808fb9dcbdSOleg Nesterov if (cpu >= 0)
8818fb9dcbdSOleg Nesterov node = cpu_to_node(cpu);
882fbae2d44SPetr Mladek
883fbae2d44SPetr Mladek task = __kthread_create_on_node(kthread_worker_fn, worker,
8848fb9dcbdSOleg Nesterov node, namefmt, args);
885fbae2d44SPetr Mladek if (IS_ERR(task))
886fbae2d44SPetr Mladek goto fail_task;
887fbae2d44SPetr Mladek
8888fb9dcbdSOleg Nesterov if (cpu >= 0)
8898fb9dcbdSOleg Nesterov kthread_bind(task, cpu);
8908fb9dcbdSOleg Nesterov
891dbf52682SPetr Mladek worker->flags = flags;
892fbae2d44SPetr Mladek worker->task = task;
893fbae2d44SPetr Mladek wake_up_process(task);
894fbae2d44SPetr Mladek return worker;
895fbae2d44SPetr Mladek
896fbae2d44SPetr Mladek fail_task:
897fbae2d44SPetr Mladek kfree(worker);
898fbae2d44SPetr Mladek return ERR_CAST(task);
899fbae2d44SPetr Mladek }
900fbae2d44SPetr Mladek
901fbae2d44SPetr Mladek /**
902fbae2d44SPetr Mladek * kthread_create_worker - create a kthread worker
903dbf52682SPetr Mladek * @flags: flags modifying the default behavior of the worker
904fbae2d44SPetr Mladek * @namefmt: printf-style name for the kthread worker (task).
905fbae2d44SPetr Mladek *
906fbae2d44SPetr Mladek * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
907fbae2d44SPetr Mladek * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
908d25c83c6SPetr Mladek * when the caller was killed by a fatal signal.
909fbae2d44SPetr Mladek */
910fbae2d44SPetr Mladek struct kthread_worker *
kthread_create_worker(unsigned int flags,const char namefmt[],...)911dbf52682SPetr Mladek kthread_create_worker(unsigned int flags, const char namefmt[], ...)
912fbae2d44SPetr Mladek {
913fbae2d44SPetr Mladek struct kthread_worker *worker;
914fbae2d44SPetr Mladek va_list args;
915fbae2d44SPetr Mladek
916fbae2d44SPetr Mladek va_start(args, namefmt);
917dbf52682SPetr Mladek worker = __kthread_create_worker(-1, flags, namefmt, args);
918fbae2d44SPetr Mladek va_end(args);
919fbae2d44SPetr Mladek
920fbae2d44SPetr Mladek return worker;
921fbae2d44SPetr Mladek }
922fbae2d44SPetr Mladek EXPORT_SYMBOL(kthread_create_worker);
923fbae2d44SPetr Mladek
924fbae2d44SPetr Mladek /**
925fbae2d44SPetr Mladek * kthread_create_worker_on_cpu - create a kthread worker and bind it
9267b7b8a2cSRandy Dunlap * to a given CPU and the associated NUMA node.
927fbae2d44SPetr Mladek * @cpu: CPU number
928dbf52682SPetr Mladek * @flags: flags modifying the default behavior of the worker
929fbae2d44SPetr Mladek * @namefmt: printf-style name for the kthread worker (task).
930fbae2d44SPetr Mladek *
931fbae2d44SPetr Mladek * Use a valid CPU number if you want to bind the kthread worker
932fbae2d44SPetr Mladek * to the given CPU and the associated NUMA node.
933fbae2d44SPetr Mladek *
934fbae2d44SPetr Mladek * A good practice is to add the cpu number also into the worker name.
935fbae2d44SPetr Mladek * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
936fbae2d44SPetr Mladek *
937ebb2bdceSPetr Mladek * CPU hotplug:
938ebb2bdceSPetr Mladek * The kthread worker API is simple and generic. It just provides a way
939ebb2bdceSPetr Mladek * to create, use, and destroy workers.
940ebb2bdceSPetr Mladek *
941ebb2bdceSPetr Mladek * It is up to the API user how to handle CPU hotplug. They have to decide
942ebb2bdceSPetr Mladek * how to handle pending work items, prevent queuing new ones, and
943ebb2bdceSPetr Mladek * restore the functionality when the CPU goes off and on. There are a
944ebb2bdceSPetr Mladek * few catches:
945ebb2bdceSPetr Mladek *
946ebb2bdceSPetr Mladek * - CPU affinity gets lost when it is scheduled on an offline CPU.
947ebb2bdceSPetr Mladek *
948ebb2bdceSPetr Mladek * - The worker might not exist when the CPU was off when the user
949ebb2bdceSPetr Mladek * created the workers.
950ebb2bdceSPetr Mladek *
951ebb2bdceSPetr Mladek * Good practice is to implement two CPU hotplug callbacks and to
952ebb2bdceSPetr Mladek * destroy/create the worker when the CPU goes down/up.
953ebb2bdceSPetr Mladek *
954ebb2bdceSPetr Mladek * Return:
955ebb2bdceSPetr Mladek * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
956fbae2d44SPetr Mladek * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
957d25c83c6SPetr Mladek * when the caller was killed by a fatal signal.
958fbae2d44SPetr Mladek */
959fbae2d44SPetr Mladek struct kthread_worker *
kthread_create_worker_on_cpu(int cpu,unsigned int flags,const char namefmt[],...)960dbf52682SPetr Mladek kthread_create_worker_on_cpu(int cpu, unsigned int flags,
961dbf52682SPetr Mladek const char namefmt[], ...)
962fbae2d44SPetr Mladek {
963fbae2d44SPetr Mladek struct kthread_worker *worker;
964fbae2d44SPetr Mladek va_list args;
965fbae2d44SPetr Mladek
966fbae2d44SPetr Mladek va_start(args, namefmt);
967dbf52682SPetr Mladek worker = __kthread_create_worker(cpu, flags, namefmt, args);
968fbae2d44SPetr Mladek va_end(args);
969fbae2d44SPetr Mladek
970fbae2d44SPetr Mladek return worker;
971fbae2d44SPetr Mladek }
972fbae2d44SPetr Mladek EXPORT_SYMBOL(kthread_create_worker_on_cpu);
973fbae2d44SPetr Mladek
97437be45d4SPetr Mladek /*
97537be45d4SPetr Mladek * Returns true when the work could not be queued at the moment.
97637be45d4SPetr Mladek * It happens when it is already pending in a worker list
97737be45d4SPetr Mladek * or when it is being cancelled.
97837be45d4SPetr Mladek */
queuing_blocked(struct kthread_worker * worker,struct kthread_work * work)97937be45d4SPetr Mladek static inline bool queuing_blocked(struct kthread_worker *worker,
98037be45d4SPetr Mladek struct kthread_work *work)
98137be45d4SPetr Mladek {
98237be45d4SPetr Mladek lockdep_assert_held(&worker->lock);
98337be45d4SPetr Mladek
98437be45d4SPetr Mladek return !list_empty(&work->node) || work->canceling;
98537be45d4SPetr Mladek }
98637be45d4SPetr Mladek
kthread_insert_work_sanity_check(struct kthread_worker * worker,struct kthread_work * work)9878197b3d4SPetr Mladek static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
9888197b3d4SPetr Mladek struct kthread_work *work)
9898197b3d4SPetr Mladek {
9908197b3d4SPetr Mladek lockdep_assert_held(&worker->lock);
9918197b3d4SPetr Mladek WARN_ON_ONCE(!list_empty(&work->node));
9928197b3d4SPetr Mladek /* Do not use a work with >1 worker, see kthread_queue_work() */
9938197b3d4SPetr Mladek WARN_ON_ONCE(work->worker && work->worker != worker);
9948197b3d4SPetr Mladek }
9958197b3d4SPetr Mladek
9969a2e03d8STejun Heo /* insert @work before @pos in @worker */
kthread_insert_work(struct kthread_worker * worker,struct kthread_work * work,struct list_head * pos)9973989144fSPetr Mladek static void kthread_insert_work(struct kthread_worker *worker,
9989a2e03d8STejun Heo struct kthread_work *work,
9999a2e03d8STejun Heo struct list_head *pos)
10009a2e03d8STejun Heo {
10018197b3d4SPetr Mladek kthread_insert_work_sanity_check(worker, work);
10029a2e03d8STejun Heo
1003f630c7c6SRob Clark trace_sched_kthread_work_queue_work(worker, work);
1004f630c7c6SRob Clark
10059a2e03d8STejun Heo list_add_tail(&work->node, pos);
100646f3d976STejun Heo work->worker = worker;
1007ed1403ecSLai Jiangshan if (!worker->current_work && likely(worker->task))
10089a2e03d8STejun Heo wake_up_process(worker->task);
10099a2e03d8STejun Heo }
10109a2e03d8STejun Heo
1011b56c0d89STejun Heo /**
10123989144fSPetr Mladek * kthread_queue_work - queue a kthread_work
1013b56c0d89STejun Heo * @worker: target kthread_worker
1014b56c0d89STejun Heo * @work: kthread_work to queue
1015b56c0d89STejun Heo *
1016b56c0d89STejun Heo * Queue @work to work processor @task for async execution. @task
1017b56c0d89STejun Heo * must have been created with kthread_worker_create(). Returns %true
1018b56c0d89STejun Heo * if @work was successfully queued, %false if it was already pending.
10198197b3d4SPetr Mladek *
10208197b3d4SPetr Mladek * Reinitialize the work if it needs to be used by another worker.
10218197b3d4SPetr Mladek * For example, when the worker was stopped and started again.
1022b56c0d89STejun Heo */
kthread_queue_work(struct kthread_worker * worker,struct kthread_work * work)10233989144fSPetr Mladek bool kthread_queue_work(struct kthread_worker *worker,
1024b56c0d89STejun Heo struct kthread_work *work)
1025b56c0d89STejun Heo {
1026b56c0d89STejun Heo bool ret = false;
1027b56c0d89STejun Heo unsigned long flags;
1028b56c0d89STejun Heo
1029fe99a4f4SJulia Cartwright raw_spin_lock_irqsave(&worker->lock, flags);
103037be45d4SPetr Mladek if (!queuing_blocked(worker, work)) {
10313989144fSPetr Mladek kthread_insert_work(worker, work, &worker->work_list);
1032b56c0d89STejun Heo ret = true;
1033b56c0d89STejun Heo }
1034fe99a4f4SJulia Cartwright raw_spin_unlock_irqrestore(&worker->lock, flags);
1035b56c0d89STejun Heo return ret;
1036b56c0d89STejun Heo }
10373989144fSPetr Mladek EXPORT_SYMBOL_GPL(kthread_queue_work);
1038b56c0d89STejun Heo
103922597dc3SPetr Mladek /**
104022597dc3SPetr Mladek * kthread_delayed_work_timer_fn - callback that queues the associated kthread
104122597dc3SPetr Mladek * delayed work when the timer expires.
1042fe5c3b69SKees Cook * @t: pointer to the expired timer
104322597dc3SPetr Mladek *
104422597dc3SPetr Mladek * The format of the function is defined by struct timer_list.
104522597dc3SPetr Mladek * It should have been called from irqsafe timer with irq already off.
104622597dc3SPetr Mladek */
kthread_delayed_work_timer_fn(struct timer_list * t)1047fe5c3b69SKees Cook void kthread_delayed_work_timer_fn(struct timer_list *t)
104822597dc3SPetr Mladek {
1049fe5c3b69SKees Cook struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
105022597dc3SPetr Mladek struct kthread_work *work = &dwork->work;
105122597dc3SPetr Mladek struct kthread_worker *worker = work->worker;
1052ad01423aSSebastian Andrzej Siewior unsigned long flags;
105322597dc3SPetr Mladek
105422597dc3SPetr Mladek /*
105522597dc3SPetr Mladek * This might happen when a pending work is reinitialized.
105622597dc3SPetr Mladek * It means that it is used a wrong way.
105722597dc3SPetr Mladek */
105822597dc3SPetr Mladek if (WARN_ON_ONCE(!worker))
105922597dc3SPetr Mladek return;
106022597dc3SPetr Mladek
1061ad01423aSSebastian Andrzej Siewior raw_spin_lock_irqsave(&worker->lock, flags);
106222597dc3SPetr Mladek /* Work must not be used with >1 worker, see kthread_queue_work(). */
106322597dc3SPetr Mladek WARN_ON_ONCE(work->worker != worker);
106422597dc3SPetr Mladek
106522597dc3SPetr Mladek /* Move the work from worker->delayed_work_list. */
106622597dc3SPetr Mladek WARN_ON_ONCE(list_empty(&work->node));
106722597dc3SPetr Mladek list_del_init(&work->node);
10686993d0fdSZqiang if (!work->canceling)
106922597dc3SPetr Mladek kthread_insert_work(worker, work, &worker->work_list);
107022597dc3SPetr Mladek
1071ad01423aSSebastian Andrzej Siewior raw_spin_unlock_irqrestore(&worker->lock, flags);
107222597dc3SPetr Mladek }
107322597dc3SPetr Mladek EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
107422597dc3SPetr Mladek
__kthread_queue_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)1075bc88f85cSBen Dooks static void __kthread_queue_delayed_work(struct kthread_worker *worker,
107622597dc3SPetr Mladek struct kthread_delayed_work *dwork,
107722597dc3SPetr Mladek unsigned long delay)
107822597dc3SPetr Mladek {
107922597dc3SPetr Mladek struct timer_list *timer = &dwork->timer;
108022597dc3SPetr Mladek struct kthread_work *work = &dwork->work;
108122597dc3SPetr Mladek
10824b243563SSami Tolvanen WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
108322597dc3SPetr Mladek
108422597dc3SPetr Mladek /*
108522597dc3SPetr Mladek * If @delay is 0, queue @dwork->work immediately. This is for
108622597dc3SPetr Mladek * both optimization and correctness. The earliest @timer can
108722597dc3SPetr Mladek * expire is on the closest next tick and delayed_work users depend
108822597dc3SPetr Mladek * on that there's no such delay when @delay is 0.
108922597dc3SPetr Mladek */
109022597dc3SPetr Mladek if (!delay) {
109122597dc3SPetr Mladek kthread_insert_work(worker, work, &worker->work_list);
109222597dc3SPetr Mladek return;
109322597dc3SPetr Mladek }
109422597dc3SPetr Mladek
109522597dc3SPetr Mladek /* Be paranoid and try to detect possible races already now. */
109622597dc3SPetr Mladek kthread_insert_work_sanity_check(worker, work);
109722597dc3SPetr Mladek
109822597dc3SPetr Mladek list_add(&work->node, &worker->delayed_work_list);
109922597dc3SPetr Mladek work->worker = worker;
110022597dc3SPetr Mladek timer->expires = jiffies + delay;
110122597dc3SPetr Mladek add_timer(timer);
110222597dc3SPetr Mladek }
110322597dc3SPetr Mladek
110422597dc3SPetr Mladek /**
110522597dc3SPetr Mladek * kthread_queue_delayed_work - queue the associated kthread work
110622597dc3SPetr Mladek * after a delay.
110722597dc3SPetr Mladek * @worker: target kthread_worker
110822597dc3SPetr Mladek * @dwork: kthread_delayed_work to queue
110922597dc3SPetr Mladek * @delay: number of jiffies to wait before queuing
111022597dc3SPetr Mladek *
111122597dc3SPetr Mladek * If the work has not been pending it starts a timer that will queue
111222597dc3SPetr Mladek * the work after the given @delay. If @delay is zero, it queues the
111322597dc3SPetr Mladek * work immediately.
111422597dc3SPetr Mladek *
111522597dc3SPetr Mladek * Return: %false if the @work has already been pending. It means that
111622597dc3SPetr Mladek * either the timer was running or the work was queued. It returns %true
111722597dc3SPetr Mladek * otherwise.
111822597dc3SPetr Mladek */
kthread_queue_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)111922597dc3SPetr Mladek bool kthread_queue_delayed_work(struct kthread_worker *worker,
112022597dc3SPetr Mladek struct kthread_delayed_work *dwork,
112122597dc3SPetr Mladek unsigned long delay)
112222597dc3SPetr Mladek {
112322597dc3SPetr Mladek struct kthread_work *work = &dwork->work;
112422597dc3SPetr Mladek unsigned long flags;
112522597dc3SPetr Mladek bool ret = false;
112622597dc3SPetr Mladek
1127fe99a4f4SJulia Cartwright raw_spin_lock_irqsave(&worker->lock, flags);
112822597dc3SPetr Mladek
112937be45d4SPetr Mladek if (!queuing_blocked(worker, work)) {
113022597dc3SPetr Mladek __kthread_queue_delayed_work(worker, dwork, delay);
113122597dc3SPetr Mladek ret = true;
113222597dc3SPetr Mladek }
113322597dc3SPetr Mladek
1134fe99a4f4SJulia Cartwright raw_spin_unlock_irqrestore(&worker->lock, flags);
113522597dc3SPetr Mladek return ret;
113622597dc3SPetr Mladek }
113722597dc3SPetr Mladek EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
113822597dc3SPetr Mladek
11399a2e03d8STejun Heo struct kthread_flush_work {
11409a2e03d8STejun Heo struct kthread_work work;
11419a2e03d8STejun Heo struct completion done;
11429a2e03d8STejun Heo };
11439a2e03d8STejun Heo
kthread_flush_work_fn(struct kthread_work * work)11449a2e03d8STejun Heo static void kthread_flush_work_fn(struct kthread_work *work)
11459a2e03d8STejun Heo {
11469a2e03d8STejun Heo struct kthread_flush_work *fwork =
11479a2e03d8STejun Heo container_of(work, struct kthread_flush_work, work);
11489a2e03d8STejun Heo complete(&fwork->done);
11499a2e03d8STejun Heo }
11509a2e03d8STejun Heo
1151b56c0d89STejun Heo /**
11523989144fSPetr Mladek * kthread_flush_work - flush a kthread_work
1153b56c0d89STejun Heo * @work: work to flush
1154b56c0d89STejun Heo *
1155b56c0d89STejun Heo * If @work is queued or executing, wait for it to finish execution.
1156b56c0d89STejun Heo */
kthread_flush_work(struct kthread_work * work)11573989144fSPetr Mladek void kthread_flush_work(struct kthread_work *work)
1158b56c0d89STejun Heo {
115946f3d976STejun Heo struct kthread_flush_work fwork = {
116046f3d976STejun Heo KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
116146f3d976STejun Heo COMPLETION_INITIALIZER_ONSTACK(fwork.done),
116246f3d976STejun Heo };
116346f3d976STejun Heo struct kthread_worker *worker;
116446f3d976STejun Heo bool noop = false;
1165b56c0d89STejun Heo
116646f3d976STejun Heo worker = work->worker;
116746f3d976STejun Heo if (!worker)
116846f3d976STejun Heo return;
1169b56c0d89STejun Heo
1170fe99a4f4SJulia Cartwright raw_spin_lock_irq(&worker->lock);
11718197b3d4SPetr Mladek /* Work must not be used with >1 worker, see kthread_queue_work(). */
11728197b3d4SPetr Mladek WARN_ON_ONCE(work->worker != worker);
1173b56c0d89STejun Heo
117446f3d976STejun Heo if (!list_empty(&work->node))
11753989144fSPetr Mladek kthread_insert_work(worker, &fwork.work, work->node.next);
117646f3d976STejun Heo else if (worker->current_work == work)
11773989144fSPetr Mladek kthread_insert_work(worker, &fwork.work,
11783989144fSPetr Mladek worker->work_list.next);
117946f3d976STejun Heo else
118046f3d976STejun Heo noop = true;
1181b56c0d89STejun Heo
1182fe99a4f4SJulia Cartwright raw_spin_unlock_irq(&worker->lock);
118346f3d976STejun Heo
118446f3d976STejun Heo if (!noop)
118546f3d976STejun Heo wait_for_completion(&fwork.done);
1186b56c0d89STejun Heo }
11873989144fSPetr Mladek EXPORT_SYMBOL_GPL(kthread_flush_work);
1188b56c0d89STejun Heo
118937be45d4SPetr Mladek /*
119034b3d534SPetr Mladek * Make sure that the timer is neither set nor running and could
119134b3d534SPetr Mladek * not manipulate the work list_head any longer.
119237be45d4SPetr Mladek *
119334b3d534SPetr Mladek * The function is called under worker->lock. The lock is temporary
119434b3d534SPetr Mladek * released but the timer can't be set again in the meantime.
119537be45d4SPetr Mladek */
kthread_cancel_delayed_work_timer(struct kthread_work * work,unsigned long * flags)119634b3d534SPetr Mladek static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
119737be45d4SPetr Mladek unsigned long *flags)
119837be45d4SPetr Mladek {
119937be45d4SPetr Mladek struct kthread_delayed_work *dwork =
120037be45d4SPetr Mladek container_of(work, struct kthread_delayed_work, work);
120137be45d4SPetr Mladek struct kthread_worker *worker = work->worker;
120237be45d4SPetr Mladek
120337be45d4SPetr Mladek /*
120437be45d4SPetr Mladek * del_timer_sync() must be called to make sure that the timer
120537be45d4SPetr Mladek * callback is not running. The lock must be temporary released
120637be45d4SPetr Mladek * to avoid a deadlock with the callback. In the meantime,
120737be45d4SPetr Mladek * any queuing is blocked by setting the canceling counter.
120837be45d4SPetr Mladek */
120937be45d4SPetr Mladek work->canceling++;
1210fe99a4f4SJulia Cartwright raw_spin_unlock_irqrestore(&worker->lock, *flags);
121137be45d4SPetr Mladek del_timer_sync(&dwork->timer);
1212fe99a4f4SJulia Cartwright raw_spin_lock_irqsave(&worker->lock, *flags);
121337be45d4SPetr Mladek work->canceling--;
121437be45d4SPetr Mladek }
121537be45d4SPetr Mladek
121637be45d4SPetr Mladek /*
12175fa54346SPetr Mladek * This function removes the work from the worker queue.
12185fa54346SPetr Mladek *
12195fa54346SPetr Mladek * It is called under worker->lock. The caller must make sure that
12205fa54346SPetr Mladek * the timer used by delayed work is not running, e.g. by calling
12215fa54346SPetr Mladek * kthread_cancel_delayed_work_timer().
122234b3d534SPetr Mladek *
122334b3d534SPetr Mladek * The work might still be in use when this function finishes. See the
122434b3d534SPetr Mladek * current_work proceed by the worker.
122534b3d534SPetr Mladek *
122634b3d534SPetr Mladek * Return: %true if @work was pending and successfully canceled,
122734b3d534SPetr Mladek * %false if @work was not pending
122834b3d534SPetr Mladek */
__kthread_cancel_work(struct kthread_work * work)12295fa54346SPetr Mladek static bool __kthread_cancel_work(struct kthread_work *work)
123034b3d534SPetr Mladek {
123134b3d534SPetr Mladek /*
123237be45d4SPetr Mladek * Try to remove the work from a worker list. It might either
123337be45d4SPetr Mladek * be from worker->work_list or from worker->delayed_work_list.
123437be45d4SPetr Mladek */
123537be45d4SPetr Mladek if (!list_empty(&work->node)) {
123637be45d4SPetr Mladek list_del_init(&work->node);
123737be45d4SPetr Mladek return true;
123837be45d4SPetr Mladek }
123937be45d4SPetr Mladek
124037be45d4SPetr Mladek return false;
124137be45d4SPetr Mladek }
124237be45d4SPetr Mladek
12439a6b06c8SPetr Mladek /**
12449a6b06c8SPetr Mladek * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
12459a6b06c8SPetr Mladek * @worker: kthread worker to use
12469a6b06c8SPetr Mladek * @dwork: kthread delayed work to queue
12479a6b06c8SPetr Mladek * @delay: number of jiffies to wait before queuing
12489a6b06c8SPetr Mladek *
12499a6b06c8SPetr Mladek * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
12509a6b06c8SPetr Mladek * modify @dwork's timer so that it expires after @delay. If @delay is zero,
12519a6b06c8SPetr Mladek * @work is guaranteed to be queued immediately.
12529a6b06c8SPetr Mladek *
1253d71ba164SPetr Mladek * Return: %false if @dwork was idle and queued, %true otherwise.
12549a6b06c8SPetr Mladek *
12559a6b06c8SPetr Mladek * A special case is when the work is being canceled in parallel.
12569a6b06c8SPetr Mladek * It might be caused either by the real kthread_cancel_delayed_work_sync()
12579a6b06c8SPetr Mladek * or yet another kthread_mod_delayed_work() call. We let the other command
1258d71ba164SPetr Mladek * win and return %true here. The return value can be used for reference
1259d71ba164SPetr Mladek * counting and the number of queued works stays the same. Anyway, the caller
1260d71ba164SPetr Mladek * is supposed to synchronize these operations a reasonable way.
12619a6b06c8SPetr Mladek *
12629a6b06c8SPetr Mladek * This function is safe to call from any context including IRQ handler.
12639a6b06c8SPetr Mladek * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
12649a6b06c8SPetr Mladek * for details.
12659a6b06c8SPetr Mladek */
kthread_mod_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)12669a6b06c8SPetr Mladek bool kthread_mod_delayed_work(struct kthread_worker *worker,
12679a6b06c8SPetr Mladek struct kthread_delayed_work *dwork,
12689a6b06c8SPetr Mladek unsigned long delay)
12699a6b06c8SPetr Mladek {
12709a6b06c8SPetr Mladek struct kthread_work *work = &dwork->work;
12719a6b06c8SPetr Mladek unsigned long flags;
1272d71ba164SPetr Mladek int ret;
12739a6b06c8SPetr Mladek
1274fe99a4f4SJulia Cartwright raw_spin_lock_irqsave(&worker->lock, flags);
12759a6b06c8SPetr Mladek
12769a6b06c8SPetr Mladek /* Do not bother with canceling when never queued. */
1277d71ba164SPetr Mladek if (!work->worker) {
1278d71ba164SPetr Mladek ret = false;
12799a6b06c8SPetr Mladek goto fast_queue;
1280d71ba164SPetr Mladek }
12819a6b06c8SPetr Mladek
12829a6b06c8SPetr Mladek /* Work must not be used with >1 worker, see kthread_queue_work() */
12839a6b06c8SPetr Mladek WARN_ON_ONCE(work->worker != worker);
12849a6b06c8SPetr Mladek
12855fa54346SPetr Mladek /*
12865fa54346SPetr Mladek * Temporary cancel the work but do not fight with another command
12875fa54346SPetr Mladek * that is canceling the work as well.
12885fa54346SPetr Mladek *
12895fa54346SPetr Mladek * It is a bit tricky because of possible races with another
12905fa54346SPetr Mladek * mod_delayed_work() and cancel_delayed_work() callers.
12915fa54346SPetr Mladek *
12925fa54346SPetr Mladek * The timer must be canceled first because worker->lock is released
12935fa54346SPetr Mladek * when doing so. But the work can be removed from the queue (list)
12945fa54346SPetr Mladek * only when it can be queued again so that the return value can
12955fa54346SPetr Mladek * be used for reference counting.
12965fa54346SPetr Mladek */
12975fa54346SPetr Mladek kthread_cancel_delayed_work_timer(work, &flags);
1298d71ba164SPetr Mladek if (work->canceling) {
1299d71ba164SPetr Mladek /* The number of works in the queue does not change. */
1300d71ba164SPetr Mladek ret = true;
13019a6b06c8SPetr Mladek goto out;
1302d71ba164SPetr Mladek }
13035fa54346SPetr Mladek ret = __kthread_cancel_work(work);
13049a6b06c8SPetr Mladek
13059a6b06c8SPetr Mladek fast_queue:
13069a6b06c8SPetr Mladek __kthread_queue_delayed_work(worker, dwork, delay);
13079a6b06c8SPetr Mladek out:
1308fe99a4f4SJulia Cartwright raw_spin_unlock_irqrestore(&worker->lock, flags);
13099a6b06c8SPetr Mladek return ret;
13109a6b06c8SPetr Mladek }
13119a6b06c8SPetr Mladek EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
13129a6b06c8SPetr Mladek
__kthread_cancel_work_sync(struct kthread_work * work,bool is_dwork)131337be45d4SPetr Mladek static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
131437be45d4SPetr Mladek {
131537be45d4SPetr Mladek struct kthread_worker *worker = work->worker;
131637be45d4SPetr Mladek unsigned long flags;
131737be45d4SPetr Mladek int ret = false;
131837be45d4SPetr Mladek
131937be45d4SPetr Mladek if (!worker)
132037be45d4SPetr Mladek goto out;
132137be45d4SPetr Mladek
1322fe99a4f4SJulia Cartwright raw_spin_lock_irqsave(&worker->lock, flags);
132337be45d4SPetr Mladek /* Work must not be used with >1 worker, see kthread_queue_work(). */
132437be45d4SPetr Mladek WARN_ON_ONCE(work->worker != worker);
132537be45d4SPetr Mladek
13265fa54346SPetr Mladek if (is_dwork)
13275fa54346SPetr Mladek kthread_cancel_delayed_work_timer(work, &flags);
13285fa54346SPetr Mladek
13295fa54346SPetr Mladek ret = __kthread_cancel_work(work);
133037be45d4SPetr Mladek
133137be45d4SPetr Mladek if (worker->current_work != work)
133237be45d4SPetr Mladek goto out_fast;
133337be45d4SPetr Mladek
133437be45d4SPetr Mladek /*
133537be45d4SPetr Mladek * The work is in progress and we need to wait with the lock released.
133637be45d4SPetr Mladek * In the meantime, block any queuing by setting the canceling counter.
133737be45d4SPetr Mladek */
133837be45d4SPetr Mladek work->canceling++;
1339fe99a4f4SJulia Cartwright raw_spin_unlock_irqrestore(&worker->lock, flags);
134037be45d4SPetr Mladek kthread_flush_work(work);
1341fe99a4f4SJulia Cartwright raw_spin_lock_irqsave(&worker->lock, flags);
134237be45d4SPetr Mladek work->canceling--;
134337be45d4SPetr Mladek
134437be45d4SPetr Mladek out_fast:
1345fe99a4f4SJulia Cartwright raw_spin_unlock_irqrestore(&worker->lock, flags);
134637be45d4SPetr Mladek out:
134737be45d4SPetr Mladek return ret;
134837be45d4SPetr Mladek }
134937be45d4SPetr Mladek
135037be45d4SPetr Mladek /**
135137be45d4SPetr Mladek * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
135237be45d4SPetr Mladek * @work: the kthread work to cancel
135337be45d4SPetr Mladek *
135437be45d4SPetr Mladek * Cancel @work and wait for its execution to finish. This function
135537be45d4SPetr Mladek * can be used even if the work re-queues itself. On return from this
135637be45d4SPetr Mladek * function, @work is guaranteed to be not pending or executing on any CPU.
135737be45d4SPetr Mladek *
135837be45d4SPetr Mladek * kthread_cancel_work_sync(&delayed_work->work) must not be used for
135937be45d4SPetr Mladek * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
136037be45d4SPetr Mladek *
136137be45d4SPetr Mladek * The caller must ensure that the worker on which @work was last
136237be45d4SPetr Mladek * queued can't be destroyed before this function returns.
136337be45d4SPetr Mladek *
136437be45d4SPetr Mladek * Return: %true if @work was pending, %false otherwise.
136537be45d4SPetr Mladek */
kthread_cancel_work_sync(struct kthread_work * work)136637be45d4SPetr Mladek bool kthread_cancel_work_sync(struct kthread_work *work)
136737be45d4SPetr Mladek {
136837be45d4SPetr Mladek return __kthread_cancel_work_sync(work, false);
136937be45d4SPetr Mladek }
137037be45d4SPetr Mladek EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
137137be45d4SPetr Mladek
137237be45d4SPetr Mladek /**
137337be45d4SPetr Mladek * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
137437be45d4SPetr Mladek * wait for it to finish.
137537be45d4SPetr Mladek * @dwork: the kthread delayed work to cancel
137637be45d4SPetr Mladek *
137737be45d4SPetr Mladek * This is kthread_cancel_work_sync() for delayed works.
137837be45d4SPetr Mladek *
137937be45d4SPetr Mladek * Return: %true if @dwork was pending, %false otherwise.
138037be45d4SPetr Mladek */
kthread_cancel_delayed_work_sync(struct kthread_delayed_work * dwork)138137be45d4SPetr Mladek bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
138237be45d4SPetr Mladek {
138337be45d4SPetr Mladek return __kthread_cancel_work_sync(&dwork->work, true);
138437be45d4SPetr Mladek }
138537be45d4SPetr Mladek EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
138637be45d4SPetr Mladek
1387b56c0d89STejun Heo /**
13883989144fSPetr Mladek * kthread_flush_worker - flush all current works on a kthread_worker
1389b56c0d89STejun Heo * @worker: worker to flush
1390b56c0d89STejun Heo *
1391b56c0d89STejun Heo * Wait until all currently executing or pending works on @worker are
1392b56c0d89STejun Heo * finished.
1393b56c0d89STejun Heo */
kthread_flush_worker(struct kthread_worker * worker)13943989144fSPetr Mladek void kthread_flush_worker(struct kthread_worker *worker)
1395b56c0d89STejun Heo {
1396b56c0d89STejun Heo struct kthread_flush_work fwork = {
1397b56c0d89STejun Heo KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1398b56c0d89STejun Heo COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1399b56c0d89STejun Heo };
1400b56c0d89STejun Heo
14013989144fSPetr Mladek kthread_queue_work(worker, &fwork.work);
1402b56c0d89STejun Heo wait_for_completion(&fwork.done);
1403b56c0d89STejun Heo }
14043989144fSPetr Mladek EXPORT_SYMBOL_GPL(kthread_flush_worker);
140535033fe9SPetr Mladek
140635033fe9SPetr Mladek /**
140735033fe9SPetr Mladek * kthread_destroy_worker - destroy a kthread worker
140835033fe9SPetr Mladek * @worker: worker to be destroyed
140935033fe9SPetr Mladek *
141035033fe9SPetr Mladek * Flush and destroy @worker. The simple flush is enough because the kthread
141135033fe9SPetr Mladek * worker API is used only in trivial scenarios. There are no multi-step state
141235033fe9SPetr Mladek * machines needed.
1413eb79fa7eSZqiang *
1414eb79fa7eSZqiang * Note that this function is not responsible for handling delayed work, so
1415eb79fa7eSZqiang * caller should be responsible for queuing or canceling all delayed work items
1416eb79fa7eSZqiang * before invoke this function.
141735033fe9SPetr Mladek */
kthread_destroy_worker(struct kthread_worker * worker)141835033fe9SPetr Mladek void kthread_destroy_worker(struct kthread_worker *worker)
141935033fe9SPetr Mladek {
142035033fe9SPetr Mladek struct task_struct *task;
142135033fe9SPetr Mladek
142235033fe9SPetr Mladek task = worker->task;
142335033fe9SPetr Mladek if (WARN_ON(!task))
142435033fe9SPetr Mladek return;
142535033fe9SPetr Mladek
142635033fe9SPetr Mladek kthread_flush_worker(worker);
142735033fe9SPetr Mladek kthread_stop(task);
1428eb79fa7eSZqiang WARN_ON(!list_empty(&worker->delayed_work_list));
142935033fe9SPetr Mladek WARN_ON(!list_empty(&worker->work_list));
143035033fe9SPetr Mladek kfree(worker);
143135033fe9SPetr Mladek }
143235033fe9SPetr Mladek EXPORT_SYMBOL(kthread_destroy_worker);
143305e3db95SShaohua Li
1434f5678e7fSChristoph Hellwig /**
1435f5678e7fSChristoph Hellwig * kthread_use_mm - make the calling kthread operate on an address space
1436f5678e7fSChristoph Hellwig * @mm: address space to operate on
14379bf5b9ebSChristoph Hellwig */
kthread_use_mm(struct mm_struct * mm)1438f5678e7fSChristoph Hellwig void kthread_use_mm(struct mm_struct *mm)
14399bf5b9ebSChristoph Hellwig {
14409bf5b9ebSChristoph Hellwig struct mm_struct *active_mm;
14419bf5b9ebSChristoph Hellwig struct task_struct *tsk = current;
14429bf5b9ebSChristoph Hellwig
1443f5678e7fSChristoph Hellwig WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1444f5678e7fSChristoph Hellwig WARN_ON_ONCE(tsk->mm);
1445f5678e7fSChristoph Hellwig
1446aa464ba9SNicholas Piggin /*
1447aa464ba9SNicholas Piggin * It is possible for mm to be the same as tsk->active_mm, but
1448aa464ba9SNicholas Piggin * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm),
1449aa464ba9SNicholas Piggin * because these references are not equivalent.
1450aa464ba9SNicholas Piggin */
14516cad87b0SNicholas Piggin mmgrab(mm);
14526cad87b0SNicholas Piggin
14539bf5b9ebSChristoph Hellwig task_lock(tsk);
145438cf307cSPeter Zijlstra /* Hold off tlb flush IPIs while switching mm's */
145538cf307cSPeter Zijlstra local_irq_disable();
14569bf5b9ebSChristoph Hellwig active_mm = tsk->active_mm;
14579bf5b9ebSChristoph Hellwig tsk->active_mm = mm;
14589bf5b9ebSChristoph Hellwig tsk->mm = mm;
1459618758edSMathieu Desnoyers membarrier_update_current_mm(mm);
146038cf307cSPeter Zijlstra switch_mm_irqs_off(active_mm, mm, tsk);
146138cf307cSPeter Zijlstra local_irq_enable();
14629bf5b9ebSChristoph Hellwig task_unlock(tsk);
14639bf5b9ebSChristoph Hellwig #ifdef finish_arch_post_lock_switch
14649bf5b9ebSChristoph Hellwig finish_arch_post_lock_switch();
14659bf5b9ebSChristoph Hellwig #endif
14669bf5b9ebSChristoph Hellwig
1467618758edSMathieu Desnoyers /*
1468618758edSMathieu Desnoyers * When a kthread starts operating on an address space, the loop
1469618758edSMathieu Desnoyers * in membarrier_{private,global}_expedited() may not observe
1470618758edSMathieu Desnoyers * that tsk->mm, and not issue an IPI. Membarrier requires a
1471618758edSMathieu Desnoyers * memory barrier after storing to tsk->mm, before accessing
1472618758edSMathieu Desnoyers * user-space memory. A full memory barrier for membarrier
1473618758edSMathieu Desnoyers * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1474aa464ba9SNicholas Piggin * mmdrop_lazy_tlb().
1475618758edSMathieu Desnoyers */
1476aa464ba9SNicholas Piggin mmdrop_lazy_tlb(active_mm);
14779bf5b9ebSChristoph Hellwig }
1478f5678e7fSChristoph Hellwig EXPORT_SYMBOL_GPL(kthread_use_mm);
14799bf5b9ebSChristoph Hellwig
1480f5678e7fSChristoph Hellwig /**
1481f5678e7fSChristoph Hellwig * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1482f5678e7fSChristoph Hellwig * @mm: address space to operate on
14839bf5b9ebSChristoph Hellwig */
kthread_unuse_mm(struct mm_struct * mm)1484f5678e7fSChristoph Hellwig void kthread_unuse_mm(struct mm_struct *mm)
14859bf5b9ebSChristoph Hellwig {
14869bf5b9ebSChristoph Hellwig struct task_struct *tsk = current;
14879bf5b9ebSChristoph Hellwig
1488f5678e7fSChristoph Hellwig WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1489f5678e7fSChristoph Hellwig WARN_ON_ONCE(!tsk->mm);
1490f5678e7fSChristoph Hellwig
14919bf5b9ebSChristoph Hellwig task_lock(tsk);
1492618758edSMathieu Desnoyers /*
1493618758edSMathieu Desnoyers * When a kthread stops operating on an address space, the loop
1494618758edSMathieu Desnoyers * in membarrier_{private,global}_expedited() may not observe
1495618758edSMathieu Desnoyers * that tsk->mm, and not issue an IPI. Membarrier requires a
1496618758edSMathieu Desnoyers * memory barrier after accessing user-space memory, before
1497618758edSMathieu Desnoyers * clearing tsk->mm.
1498618758edSMathieu Desnoyers */
1499618758edSMathieu Desnoyers smp_mb__after_spinlock();
15009bf5b9ebSChristoph Hellwig sync_mm_rss(mm);
150138cf307cSPeter Zijlstra local_irq_disable();
15029bf5b9ebSChristoph Hellwig tsk->mm = NULL;
1503618758edSMathieu Desnoyers membarrier_update_current_mm(NULL);
1504aa464ba9SNicholas Piggin mmgrab_lazy_tlb(mm);
15059bf5b9ebSChristoph Hellwig /* active_mm is still 'mm' */
15069bf5b9ebSChristoph Hellwig enter_lazy_tlb(mm, tsk);
150738cf307cSPeter Zijlstra local_irq_enable();
15089bf5b9ebSChristoph Hellwig task_unlock(tsk);
1509aa464ba9SNicholas Piggin
1510aa464ba9SNicholas Piggin mmdrop(mm);
15119bf5b9ebSChristoph Hellwig }
1512f5678e7fSChristoph Hellwig EXPORT_SYMBOL_GPL(kthread_unuse_mm);
15139bf5b9ebSChristoph Hellwig
15140b508bc9SShaohua Li #ifdef CONFIG_BLK_CGROUP
151505e3db95SShaohua Li /**
151605e3db95SShaohua Li * kthread_associate_blkcg - associate blkcg to current kthread
151705e3db95SShaohua Li * @css: the cgroup info
151805e3db95SShaohua Li *
151905e3db95SShaohua Li * Current thread must be a kthread. The thread is running jobs on behalf of
152005e3db95SShaohua Li * other threads. In some cases, we expect the jobs attach cgroup info of
152105e3db95SShaohua Li * original threads instead of that of current thread. This function stores
152205e3db95SShaohua Li * original thread's cgroup info in current kthread context for later
152305e3db95SShaohua Li * retrieval.
152405e3db95SShaohua Li */
kthread_associate_blkcg(struct cgroup_subsys_state * css)152505e3db95SShaohua Li void kthread_associate_blkcg(struct cgroup_subsys_state *css)
152605e3db95SShaohua Li {
152705e3db95SShaohua Li struct kthread *kthread;
152805e3db95SShaohua Li
152905e3db95SShaohua Li if (!(current->flags & PF_KTHREAD))
153005e3db95SShaohua Li return;
153105e3db95SShaohua Li kthread = to_kthread(current);
153205e3db95SShaohua Li if (!kthread)
153305e3db95SShaohua Li return;
153405e3db95SShaohua Li
153505e3db95SShaohua Li if (kthread->blkcg_css) {
153605e3db95SShaohua Li css_put(kthread->blkcg_css);
153705e3db95SShaohua Li kthread->blkcg_css = NULL;
153805e3db95SShaohua Li }
153905e3db95SShaohua Li if (css) {
154005e3db95SShaohua Li css_get(css);
154105e3db95SShaohua Li kthread->blkcg_css = css;
154205e3db95SShaohua Li }
154305e3db95SShaohua Li }
154405e3db95SShaohua Li EXPORT_SYMBOL(kthread_associate_blkcg);
154505e3db95SShaohua Li
154605e3db95SShaohua Li /**
154705e3db95SShaohua Li * kthread_blkcg - get associated blkcg css of current kthread
154805e3db95SShaohua Li *
154905e3db95SShaohua Li * Current thread must be a kthread.
155005e3db95SShaohua Li */
kthread_blkcg(void)155105e3db95SShaohua Li struct cgroup_subsys_state *kthread_blkcg(void)
155205e3db95SShaohua Li {
155305e3db95SShaohua Li struct kthread *kthread;
155405e3db95SShaohua Li
155505e3db95SShaohua Li if (current->flags & PF_KTHREAD) {
155605e3db95SShaohua Li kthread = to_kthread(current);
155705e3db95SShaohua Li if (kthread)
155805e3db95SShaohua Li return kthread->blkcg_css;
155905e3db95SShaohua Li }
156005e3db95SShaohua Li return NULL;
156105e3db95SShaohua Li }
156205e3db95SShaohua Li #endif
1563