xref: /openbmc/linux/kernel/bpf/task_iter.c (revision 89b15863)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2020 Facebook */
3 
4 #include <linux/init.h>
5 #include <linux/namei.h>
6 #include <linux/pid_namespace.h>
7 #include <linux/fs.h>
8 #include <linux/fdtable.h>
9 #include <linux/filter.h>
10 #include <linux/btf_ids.h>
11 
12 struct bpf_iter_seq_task_common {
13 	struct pid_namespace *ns;
14 };
15 
16 struct bpf_iter_seq_task_info {
17 	/* The first field must be struct bpf_iter_seq_task_common.
18 	 * this is assumed by {init, fini}_seq_pidns() callback functions.
19 	 */
20 	struct bpf_iter_seq_task_common common;
21 	u32 tid;
22 };
23 
24 static struct task_struct *task_seq_get_next(struct pid_namespace *ns,
25 					     u32 *tid,
26 					     bool skip_if_dup_files)
27 {
28 	struct task_struct *task = NULL;
29 	struct pid *pid;
30 
31 	rcu_read_lock();
32 retry:
33 	pid = find_ge_pid(*tid, ns);
34 	if (pid) {
35 		*tid = pid_nr_ns(pid, ns);
36 		task = get_pid_task(pid, PIDTYPE_PID);
37 		if (!task) {
38 			++*tid;
39 			goto retry;
40 		} else if (skip_if_dup_files && task->tgid != task->pid &&
41 			   task->files == task->group_leader->files) {
42 			put_task_struct(task);
43 			task = NULL;
44 			++*tid;
45 			goto retry;
46 		}
47 	}
48 	rcu_read_unlock();
49 
50 	return task;
51 }
52 
53 static void *task_seq_start(struct seq_file *seq, loff_t *pos)
54 {
55 	struct bpf_iter_seq_task_info *info = seq->private;
56 	struct task_struct *task;
57 
58 	task = task_seq_get_next(info->common.ns, &info->tid, false);
59 	if (!task)
60 		return NULL;
61 
62 	if (*pos == 0)
63 		++*pos;
64 	return task;
65 }
66 
67 static void *task_seq_next(struct seq_file *seq, void *v, loff_t *pos)
68 {
69 	struct bpf_iter_seq_task_info *info = seq->private;
70 	struct task_struct *task;
71 
72 	++*pos;
73 	++info->tid;
74 	put_task_struct((struct task_struct *)v);
75 	task = task_seq_get_next(info->common.ns, &info->tid, false);
76 	if (!task)
77 		return NULL;
78 
79 	return task;
80 }
81 
82 struct bpf_iter__task {
83 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
84 	__bpf_md_ptr(struct task_struct *, task);
85 };
86 
87 DEFINE_BPF_ITER_FUNC(task, struct bpf_iter_meta *meta, struct task_struct *task)
88 
89 static int __task_seq_show(struct seq_file *seq, struct task_struct *task,
90 			   bool in_stop)
91 {
92 	struct bpf_iter_meta meta;
93 	struct bpf_iter__task ctx;
94 	struct bpf_prog *prog;
95 
96 	meta.seq = seq;
97 	prog = bpf_iter_get_info(&meta, in_stop);
98 	if (!prog)
99 		return 0;
100 
101 	meta.seq = seq;
102 	ctx.meta = &meta;
103 	ctx.task = task;
104 	return bpf_iter_run_prog(prog, &ctx);
105 }
106 
107 static int task_seq_show(struct seq_file *seq, void *v)
108 {
109 	return __task_seq_show(seq, v, false);
110 }
111 
112 static void task_seq_stop(struct seq_file *seq, void *v)
113 {
114 	if (!v)
115 		(void)__task_seq_show(seq, v, true);
116 	else
117 		put_task_struct((struct task_struct *)v);
118 }
119 
120 static const struct seq_operations task_seq_ops = {
121 	.start	= task_seq_start,
122 	.next	= task_seq_next,
123 	.stop	= task_seq_stop,
124 	.show	= task_seq_show,
125 };
126 
127 struct bpf_iter_seq_task_file_info {
128 	/* The first field must be struct bpf_iter_seq_task_common.
129 	 * this is assumed by {init, fini}_seq_pidns() callback functions.
130 	 */
131 	struct bpf_iter_seq_task_common common;
132 	struct task_struct *task;
133 	struct files_struct *files;
134 	u32 tid;
135 	u32 fd;
136 };
137 
138 static struct file *
139 task_file_seq_get_next(struct bpf_iter_seq_task_file_info *info)
140 {
141 	struct pid_namespace *ns = info->common.ns;
142 	u32 curr_tid = info->tid, max_fds;
143 	struct files_struct *curr_files;
144 	struct task_struct *curr_task;
145 	int curr_fd = info->fd;
146 
147 	/* If this function returns a non-NULL file object,
148 	 * it held a reference to the task/files_struct/file.
149 	 * Otherwise, it does not hold any reference.
150 	 */
151 again:
152 	if (info->task) {
153 		curr_task = info->task;
154 		curr_files = info->files;
155 		curr_fd = info->fd;
156 	} else {
157 		curr_task = task_seq_get_next(ns, &curr_tid, true);
158 		if (!curr_task) {
159 			info->task = NULL;
160 			info->files = NULL;
161 			return NULL;
162 		}
163 
164 		curr_files = get_files_struct(curr_task);
165 		if (!curr_files) {
166 			put_task_struct(curr_task);
167 			curr_tid = ++(info->tid);
168 			info->fd = 0;
169 			goto again;
170 		}
171 
172 		info->files = curr_files;
173 		info->task = curr_task;
174 		if (curr_tid == info->tid) {
175 			curr_fd = info->fd;
176 		} else {
177 			info->tid = curr_tid;
178 			curr_fd = 0;
179 		}
180 	}
181 
182 	rcu_read_lock();
183 	max_fds = files_fdtable(curr_files)->max_fds;
184 	for (; curr_fd < max_fds; curr_fd++) {
185 		struct file *f;
186 
187 		f = fcheck_files(curr_files, curr_fd);
188 		if (!f)
189 			continue;
190 		if (!get_file_rcu(f))
191 			continue;
192 
193 		/* set info->fd */
194 		info->fd = curr_fd;
195 		rcu_read_unlock();
196 		return f;
197 	}
198 
199 	/* the current task is done, go to the next task */
200 	rcu_read_unlock();
201 	put_files_struct(curr_files);
202 	put_task_struct(curr_task);
203 	info->task = NULL;
204 	info->files = NULL;
205 	info->fd = 0;
206 	curr_tid = ++(info->tid);
207 	goto again;
208 }
209 
210 static void *task_file_seq_start(struct seq_file *seq, loff_t *pos)
211 {
212 	struct bpf_iter_seq_task_file_info *info = seq->private;
213 	struct file *file;
214 
215 	info->task = NULL;
216 	info->files = NULL;
217 	file = task_file_seq_get_next(info);
218 	if (file && *pos == 0)
219 		++*pos;
220 
221 	return file;
222 }
223 
224 static void *task_file_seq_next(struct seq_file *seq, void *v, loff_t *pos)
225 {
226 	struct bpf_iter_seq_task_file_info *info = seq->private;
227 
228 	++*pos;
229 	++info->fd;
230 	fput((struct file *)v);
231 	return task_file_seq_get_next(info);
232 }
233 
234 struct bpf_iter__task_file {
235 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
236 	__bpf_md_ptr(struct task_struct *, task);
237 	u32 fd __aligned(8);
238 	__bpf_md_ptr(struct file *, file);
239 };
240 
241 DEFINE_BPF_ITER_FUNC(task_file, struct bpf_iter_meta *meta,
242 		     struct task_struct *task, u32 fd,
243 		     struct file *file)
244 
245 static int __task_file_seq_show(struct seq_file *seq, struct file *file,
246 				bool in_stop)
247 {
248 	struct bpf_iter_seq_task_file_info *info = seq->private;
249 	struct bpf_iter__task_file ctx;
250 	struct bpf_iter_meta meta;
251 	struct bpf_prog *prog;
252 
253 	meta.seq = seq;
254 	prog = bpf_iter_get_info(&meta, in_stop);
255 	if (!prog)
256 		return 0;
257 
258 	ctx.meta = &meta;
259 	ctx.task = info->task;
260 	ctx.fd = info->fd;
261 	ctx.file = file;
262 	return bpf_iter_run_prog(prog, &ctx);
263 }
264 
265 static int task_file_seq_show(struct seq_file *seq, void *v)
266 {
267 	return __task_file_seq_show(seq, v, false);
268 }
269 
270 static void task_file_seq_stop(struct seq_file *seq, void *v)
271 {
272 	struct bpf_iter_seq_task_file_info *info = seq->private;
273 
274 	if (!v) {
275 		(void)__task_file_seq_show(seq, v, true);
276 	} else {
277 		fput((struct file *)v);
278 		put_files_struct(info->files);
279 		put_task_struct(info->task);
280 		info->files = NULL;
281 		info->task = NULL;
282 	}
283 }
284 
285 static int init_seq_pidns(void *priv_data, struct bpf_iter_aux_info *aux)
286 {
287 	struct bpf_iter_seq_task_common *common = priv_data;
288 
289 	common->ns = get_pid_ns(task_active_pid_ns(current));
290 	return 0;
291 }
292 
293 static void fini_seq_pidns(void *priv_data)
294 {
295 	struct bpf_iter_seq_task_common *common = priv_data;
296 
297 	put_pid_ns(common->ns);
298 }
299 
300 static const struct seq_operations task_file_seq_ops = {
301 	.start	= task_file_seq_start,
302 	.next	= task_file_seq_next,
303 	.stop	= task_file_seq_stop,
304 	.show	= task_file_seq_show,
305 };
306 
307 BTF_ID_LIST(btf_task_file_ids)
308 BTF_ID(struct, task_struct)
309 BTF_ID(struct, file)
310 
311 static const struct bpf_iter_seq_info task_seq_info = {
312 	.seq_ops		= &task_seq_ops,
313 	.init_seq_private	= init_seq_pidns,
314 	.fini_seq_private	= fini_seq_pidns,
315 	.seq_priv_size		= sizeof(struct bpf_iter_seq_task_info),
316 };
317 
318 static struct bpf_iter_reg task_reg_info = {
319 	.target			= "task",
320 	.feature		= BPF_ITER_RESCHED,
321 	.ctx_arg_info_size	= 1,
322 	.ctx_arg_info		= {
323 		{ offsetof(struct bpf_iter__task, task),
324 		  PTR_TO_BTF_ID_OR_NULL },
325 	},
326 	.seq_info		= &task_seq_info,
327 };
328 
329 static const struct bpf_iter_seq_info task_file_seq_info = {
330 	.seq_ops		= &task_file_seq_ops,
331 	.init_seq_private	= init_seq_pidns,
332 	.fini_seq_private	= fini_seq_pidns,
333 	.seq_priv_size		= sizeof(struct bpf_iter_seq_task_file_info),
334 };
335 
336 static struct bpf_iter_reg task_file_reg_info = {
337 	.target			= "task_file",
338 	.feature		= BPF_ITER_RESCHED,
339 	.ctx_arg_info_size	= 2,
340 	.ctx_arg_info		= {
341 		{ offsetof(struct bpf_iter__task_file, task),
342 		  PTR_TO_BTF_ID_OR_NULL },
343 		{ offsetof(struct bpf_iter__task_file, file),
344 		  PTR_TO_BTF_ID_OR_NULL },
345 	},
346 	.seq_info		= &task_file_seq_info,
347 };
348 
349 static int __init task_iter_init(void)
350 {
351 	int ret;
352 
353 	task_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
354 	ret = bpf_iter_reg_target(&task_reg_info);
355 	if (ret)
356 		return ret;
357 
358 	task_file_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
359 	task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[1];
360 	return bpf_iter_reg_target(&task_file_reg_info);
361 }
362 late_initcall(task_iter_init);
363