1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_TASK_H
3 #define _LINUX_SCHED_TASK_H
4
5 /*
6 * Interface between the scheduler and various task lifetime (fork()/exit())
7 * functionality:
8 */
9
10 #include <linux/sched.h>
11 #include <linux/uaccess.h>
12
13 struct task_struct;
14 struct rusage;
15 union thread_union;
16 struct css_set;
17
18 /* All the bits taken by the old clone syscall. */
19 #define CLONE_LEGACY_FLAGS 0xffffffffULL
20
21 struct kernel_clone_args {
22 u64 flags;
23 int __user *pidfd;
24 int __user *child_tid;
25 int __user *parent_tid;
26 const char *name;
27 int exit_signal;
28 u32 kthread:1;
29 u32 io_thread:1;
30 u32 user_worker:1;
31 u32 no_files:1;
32 unsigned long stack;
33 unsigned long stack_size;
34 unsigned long tls;
35 pid_t *set_tid;
36 /* Number of elements in *set_tid */
37 size_t set_tid_size;
38 int cgroup;
39 int idle;
40 int (*fn)(void *);
41 void *fn_arg;
42 struct cgroup *cgrp;
43 struct css_set *cset;
44 unsigned int kill_seq;
45 };
46
47 /*
48 * This serializes "schedule()" and also protects
49 * the run-queue from deletions/modifications (but
50 * _adding_ to the beginning of the run-queue has
51 * a separate lock).
52 */
53 extern rwlock_t tasklist_lock;
54 extern spinlock_t mmlist_lock;
55
56 extern union thread_union init_thread_union;
57 extern struct task_struct init_task;
58
59 extern int lockdep_tasklist_lock_is_held(void);
60
61 extern asmlinkage void schedule_tail(struct task_struct *prev);
62 extern void init_idle(struct task_struct *idle, int cpu);
63
64 extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
65 extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
66 extern void sched_post_fork(struct task_struct *p);
67 extern void sched_dead(struct task_struct *p);
68
69 void __noreturn do_task_dead(void);
70 void __noreturn make_task_dead(int signr);
71
72 extern void mm_cache_init(void);
73 extern void proc_caches_init(void);
74
75 extern void fork_init(void);
76
77 extern void release_task(struct task_struct * p);
78
79 extern int copy_thread(struct task_struct *, const struct kernel_clone_args *);
80
81 extern void flush_thread(void);
82
83 #ifdef CONFIG_HAVE_EXIT_THREAD
84 extern void exit_thread(struct task_struct *tsk);
85 #else
exit_thread(struct task_struct * tsk)86 static inline void exit_thread(struct task_struct *tsk)
87 {
88 }
89 #endif
90 extern __noreturn void do_group_exit(int);
91
92 extern void exit_files(struct task_struct *);
93 extern void exit_itimers(struct task_struct *);
94
95 extern pid_t kernel_clone(struct kernel_clone_args *kargs);
96 struct task_struct *copy_process(struct pid *pid, int trace, int node,
97 struct kernel_clone_args *args);
98 struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
99 struct task_struct *fork_idle(int);
100 extern pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name,
101 unsigned long flags);
102 extern pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags);
103 extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
104 int kernel_wait(pid_t pid, int *stat);
105
106 extern void free_task(struct task_struct *tsk);
107
108 /* sched_exec is called by processes performing an exec */
109 #ifdef CONFIG_SMP
110 extern void sched_exec(void);
111 #else
112 #define sched_exec() {}
113 #endif
114
get_task_struct(struct task_struct * t)115 static inline struct task_struct *get_task_struct(struct task_struct *t)
116 {
117 refcount_inc(&t->usage);
118 return t;
119 }
120
121 extern void __put_task_struct(struct task_struct *t);
122 extern void __put_task_struct_rcu_cb(struct rcu_head *rhp);
123
put_task_struct(struct task_struct * t)124 static inline void put_task_struct(struct task_struct *t)
125 {
126 if (!refcount_dec_and_test(&t->usage))
127 return;
128
129 /*
130 * In !RT, it is always safe to call __put_task_struct().
131 * Under RT, we can only call it in preemptible context.
132 */
133 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
134 static DEFINE_WAIT_OVERRIDE_MAP(put_task_map, LD_WAIT_SLEEP);
135
136 lock_map_acquire_try(&put_task_map);
137 __put_task_struct(t);
138 lock_map_release(&put_task_map);
139 return;
140 }
141
142 /*
143 * under PREEMPT_RT, we can't call put_task_struct
144 * in atomic context because it will indirectly
145 * acquire sleeping locks.
146 *
147 * call_rcu() will schedule delayed_put_task_struct_rcu()
148 * to be called in process context.
149 *
150 * __put_task_struct() is called when
151 * refcount_dec_and_test(&t->usage) succeeds.
152 *
153 * This means that it can't "conflict" with
154 * put_task_struct_rcu_user() which abuses ->rcu the same
155 * way; rcu_users has a reference so task->usage can't be
156 * zero after rcu_users 1 -> 0 transition.
157 *
158 * delayed_free_task() also uses ->rcu, but it is only called
159 * when it fails to fork a process. Therefore, there is no
160 * way it can conflict with put_task_struct().
161 */
162 call_rcu(&t->rcu, __put_task_struct_rcu_cb);
163 }
164
DEFINE_FREE(put_task,struct task_struct *,if (_T)put_task_struct (_T))165 DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T))
166
167 static inline void put_task_struct_many(struct task_struct *t, int nr)
168 {
169 if (refcount_sub_and_test(nr, &t->usage))
170 __put_task_struct(t);
171 }
172
173 void put_task_struct_rcu_user(struct task_struct *task);
174
175 /* Free all architecture-specific resources held by a thread. */
176 void release_thread(struct task_struct *dead_task);
177
178 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
179 extern int arch_task_struct_size __read_mostly;
180 #else
181 # define arch_task_struct_size (sizeof(struct task_struct))
182 #endif
183
184 #ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST
185 /*
186 * If an architecture has not declared a thread_struct whitelist we
187 * must assume something there may need to be copied to userspace.
188 */
arch_thread_struct_whitelist(unsigned long * offset,unsigned long * size)189 static inline void arch_thread_struct_whitelist(unsigned long *offset,
190 unsigned long *size)
191 {
192 *offset = 0;
193 /* Handle dynamically sized thread_struct. */
194 *size = arch_task_struct_size - offsetof(struct task_struct, thread);
195 }
196 #endif
197
198 #ifdef CONFIG_VMAP_STACK
task_stack_vm_area(const struct task_struct * t)199 static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
200 {
201 return t->stack_vm_area;
202 }
203 #else
task_stack_vm_area(const struct task_struct * t)204 static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
205 {
206 return NULL;
207 }
208 #endif
209
210 /*
211 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
212 * subscriptions and synchronises with wait4(). Also used in procfs. Also
213 * pins the final release of task.io_context. Also protects ->cpuset and
214 * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
215 *
216 * Nests both inside and outside of read_lock(&tasklist_lock).
217 * It must not be nested with write_lock_irq(&tasklist_lock),
218 * neither inside nor outside.
219 */
task_lock(struct task_struct * p)220 static inline void task_lock(struct task_struct *p)
221 {
222 spin_lock(&p->alloc_lock);
223 }
224
task_unlock(struct task_struct * p)225 static inline void task_unlock(struct task_struct *p)
226 {
227 spin_unlock(&p->alloc_lock);
228 }
229
230 #endif /* _LINUX_SCHED_TASK_H */
231