xref: /openbmc/linux/kernel/bpf/cgroup.c (revision 808649fb)
1 /*
2  * Functions to manage eBPF programs attached to cgroups
3  *
4  * Copyright (c) 2016 Daniel Mack
5  *
6  * This file is subject to the terms and conditions of version 2 of the GNU
7  * General Public License.  See the file COPYING in the main directory of the
8  * Linux distribution for more details.
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/atomic.h>
13 #include <linux/cgroup.h>
14 #include <linux/filter.h>
15 #include <linux/slab.h>
16 #include <linux/sysctl.h>
17 #include <linux/string.h>
18 #include <linux/bpf.h>
19 #include <linux/bpf-cgroup.h>
20 #include <net/sock.h>
21 
22 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
23 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
24 
25 /**
26  * cgroup_bpf_put() - put references of all bpf programs
27  * @cgrp: the cgroup to modify
28  */
29 void cgroup_bpf_put(struct cgroup *cgrp)
30 {
31 	enum bpf_cgroup_storage_type stype;
32 	unsigned int type;
33 
34 	for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
35 		struct list_head *progs = &cgrp->bpf.progs[type];
36 		struct bpf_prog_list *pl, *tmp;
37 
38 		list_for_each_entry_safe(pl, tmp, progs, node) {
39 			list_del(&pl->node);
40 			bpf_prog_put(pl->prog);
41 			for_each_cgroup_storage_type(stype) {
42 				bpf_cgroup_storage_unlink(pl->storage[stype]);
43 				bpf_cgroup_storage_free(pl->storage[stype]);
44 			}
45 			kfree(pl);
46 			static_branch_dec(&cgroup_bpf_enabled_key);
47 		}
48 		bpf_prog_array_free(cgrp->bpf.effective[type]);
49 	}
50 }
51 
52 /* count number of elements in the list.
53  * it's slow but the list cannot be long
54  */
55 static u32 prog_list_length(struct list_head *head)
56 {
57 	struct bpf_prog_list *pl;
58 	u32 cnt = 0;
59 
60 	list_for_each_entry(pl, head, node) {
61 		if (!pl->prog)
62 			continue;
63 		cnt++;
64 	}
65 	return cnt;
66 }
67 
68 /* if parent has non-overridable prog attached,
69  * disallow attaching new programs to the descendent cgroup.
70  * if parent has overridable or multi-prog, allow attaching
71  */
72 static bool hierarchy_allows_attach(struct cgroup *cgrp,
73 				    enum bpf_attach_type type,
74 				    u32 new_flags)
75 {
76 	struct cgroup *p;
77 
78 	p = cgroup_parent(cgrp);
79 	if (!p)
80 		return true;
81 	do {
82 		u32 flags = p->bpf.flags[type];
83 		u32 cnt;
84 
85 		if (flags & BPF_F_ALLOW_MULTI)
86 			return true;
87 		cnt = prog_list_length(&p->bpf.progs[type]);
88 		WARN_ON_ONCE(cnt > 1);
89 		if (cnt == 1)
90 			return !!(flags & BPF_F_ALLOW_OVERRIDE);
91 		p = cgroup_parent(p);
92 	} while (p);
93 	return true;
94 }
95 
96 /* compute a chain of effective programs for a given cgroup:
97  * start from the list of programs in this cgroup and add
98  * all parent programs.
99  * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
100  * to programs in this cgroup
101  */
102 static int compute_effective_progs(struct cgroup *cgrp,
103 				   enum bpf_attach_type type,
104 				   struct bpf_prog_array __rcu **array)
105 {
106 	enum bpf_cgroup_storage_type stype;
107 	struct bpf_prog_array *progs;
108 	struct bpf_prog_list *pl;
109 	struct cgroup *p = cgrp;
110 	int cnt = 0;
111 
112 	/* count number of effective programs by walking parents */
113 	do {
114 		if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
115 			cnt += prog_list_length(&p->bpf.progs[type]);
116 		p = cgroup_parent(p);
117 	} while (p);
118 
119 	progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
120 	if (!progs)
121 		return -ENOMEM;
122 
123 	/* populate the array with effective progs */
124 	cnt = 0;
125 	p = cgrp;
126 	do {
127 		if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
128 			continue;
129 
130 		list_for_each_entry(pl, &p->bpf.progs[type], node) {
131 			if (!pl->prog)
132 				continue;
133 
134 			progs->items[cnt].prog = pl->prog;
135 			for_each_cgroup_storage_type(stype)
136 				progs->items[cnt].cgroup_storage[stype] =
137 					pl->storage[stype];
138 			cnt++;
139 		}
140 	} while ((p = cgroup_parent(p)));
141 
142 	rcu_assign_pointer(*array, progs);
143 	return 0;
144 }
145 
146 static void activate_effective_progs(struct cgroup *cgrp,
147 				     enum bpf_attach_type type,
148 				     struct bpf_prog_array __rcu *array)
149 {
150 	struct bpf_prog_array __rcu *old_array;
151 
152 	old_array = xchg(&cgrp->bpf.effective[type], array);
153 	/* free prog array after grace period, since __cgroup_bpf_run_*()
154 	 * might be still walking the array
155 	 */
156 	bpf_prog_array_free(old_array);
157 }
158 
159 /**
160  * cgroup_bpf_inherit() - inherit effective programs from parent
161  * @cgrp: the cgroup to modify
162  */
163 int cgroup_bpf_inherit(struct cgroup *cgrp)
164 {
165 /* has to use marco instead of const int, since compiler thinks
166  * that array below is variable length
167  */
168 #define	NR ARRAY_SIZE(cgrp->bpf.effective)
169 	struct bpf_prog_array __rcu *arrays[NR] = {};
170 	int i;
171 
172 	for (i = 0; i < NR; i++)
173 		INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
174 
175 	for (i = 0; i < NR; i++)
176 		if (compute_effective_progs(cgrp, i, &arrays[i]))
177 			goto cleanup;
178 
179 	for (i = 0; i < NR; i++)
180 		activate_effective_progs(cgrp, i, arrays[i]);
181 
182 	return 0;
183 cleanup:
184 	for (i = 0; i < NR; i++)
185 		bpf_prog_array_free(arrays[i]);
186 	return -ENOMEM;
187 }
188 
189 static int update_effective_progs(struct cgroup *cgrp,
190 				  enum bpf_attach_type type)
191 {
192 	struct cgroup_subsys_state *css;
193 	int err;
194 
195 	/* allocate and recompute effective prog arrays */
196 	css_for_each_descendant_pre(css, &cgrp->self) {
197 		struct cgroup *desc = container_of(css, struct cgroup, self);
198 
199 		err = compute_effective_progs(desc, type, &desc->bpf.inactive);
200 		if (err)
201 			goto cleanup;
202 	}
203 
204 	/* all allocations were successful. Activate all prog arrays */
205 	css_for_each_descendant_pre(css, &cgrp->self) {
206 		struct cgroup *desc = container_of(css, struct cgroup, self);
207 
208 		activate_effective_progs(desc, type, desc->bpf.inactive);
209 		desc->bpf.inactive = NULL;
210 	}
211 
212 	return 0;
213 
214 cleanup:
215 	/* oom while computing effective. Free all computed effective arrays
216 	 * since they were not activated
217 	 */
218 	css_for_each_descendant_pre(css, &cgrp->self) {
219 		struct cgroup *desc = container_of(css, struct cgroup, self);
220 
221 		bpf_prog_array_free(desc->bpf.inactive);
222 		desc->bpf.inactive = NULL;
223 	}
224 
225 	return err;
226 }
227 
228 #define BPF_CGROUP_MAX_PROGS 64
229 
230 /**
231  * __cgroup_bpf_attach() - Attach the program to a cgroup, and
232  *                         propagate the change to descendants
233  * @cgrp: The cgroup which descendants to traverse
234  * @prog: A program to attach
235  * @type: Type of attach operation
236  * @flags: Option flags
237  *
238  * Must be called with cgroup_mutex held.
239  */
240 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
241 			enum bpf_attach_type type, u32 flags)
242 {
243 	struct list_head *progs = &cgrp->bpf.progs[type];
244 	struct bpf_prog *old_prog = NULL;
245 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
246 		*old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL};
247 	enum bpf_cgroup_storage_type stype;
248 	struct bpf_prog_list *pl;
249 	bool pl_was_allocated;
250 	int err;
251 
252 	if ((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI))
253 		/* invalid combination */
254 		return -EINVAL;
255 
256 	if (!hierarchy_allows_attach(cgrp, type, flags))
257 		return -EPERM;
258 
259 	if (!list_empty(progs) && cgrp->bpf.flags[type] != flags)
260 		/* Disallow attaching non-overridable on top
261 		 * of existing overridable in this cgroup.
262 		 * Disallow attaching multi-prog if overridable or none
263 		 */
264 		return -EPERM;
265 
266 	if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
267 		return -E2BIG;
268 
269 	for_each_cgroup_storage_type(stype) {
270 		storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
271 		if (IS_ERR(storage[stype])) {
272 			storage[stype] = NULL;
273 			for_each_cgroup_storage_type(stype)
274 				bpf_cgroup_storage_free(storage[stype]);
275 			return -ENOMEM;
276 		}
277 	}
278 
279 	if (flags & BPF_F_ALLOW_MULTI) {
280 		list_for_each_entry(pl, progs, node) {
281 			if (pl->prog == prog) {
282 				/* disallow attaching the same prog twice */
283 				for_each_cgroup_storage_type(stype)
284 					bpf_cgroup_storage_free(storage[stype]);
285 				return -EINVAL;
286 			}
287 		}
288 
289 		pl = kmalloc(sizeof(*pl), GFP_KERNEL);
290 		if (!pl) {
291 			for_each_cgroup_storage_type(stype)
292 				bpf_cgroup_storage_free(storage[stype]);
293 			return -ENOMEM;
294 		}
295 
296 		pl_was_allocated = true;
297 		pl->prog = prog;
298 		for_each_cgroup_storage_type(stype)
299 			pl->storage[stype] = storage[stype];
300 		list_add_tail(&pl->node, progs);
301 	} else {
302 		if (list_empty(progs)) {
303 			pl = kmalloc(sizeof(*pl), GFP_KERNEL);
304 			if (!pl) {
305 				for_each_cgroup_storage_type(stype)
306 					bpf_cgroup_storage_free(storage[stype]);
307 				return -ENOMEM;
308 			}
309 			pl_was_allocated = true;
310 			list_add_tail(&pl->node, progs);
311 		} else {
312 			pl = list_first_entry(progs, typeof(*pl), node);
313 			old_prog = pl->prog;
314 			for_each_cgroup_storage_type(stype) {
315 				old_storage[stype] = pl->storage[stype];
316 				bpf_cgroup_storage_unlink(old_storage[stype]);
317 			}
318 			pl_was_allocated = false;
319 		}
320 		pl->prog = prog;
321 		for_each_cgroup_storage_type(stype)
322 			pl->storage[stype] = storage[stype];
323 	}
324 
325 	cgrp->bpf.flags[type] = flags;
326 
327 	err = update_effective_progs(cgrp, type);
328 	if (err)
329 		goto cleanup;
330 
331 	static_branch_inc(&cgroup_bpf_enabled_key);
332 	for_each_cgroup_storage_type(stype) {
333 		if (!old_storage[stype])
334 			continue;
335 		bpf_cgroup_storage_free(old_storage[stype]);
336 	}
337 	if (old_prog) {
338 		bpf_prog_put(old_prog);
339 		static_branch_dec(&cgroup_bpf_enabled_key);
340 	}
341 	for_each_cgroup_storage_type(stype)
342 		bpf_cgroup_storage_link(storage[stype], cgrp, type);
343 	return 0;
344 
345 cleanup:
346 	/* and cleanup the prog list */
347 	pl->prog = old_prog;
348 	for_each_cgroup_storage_type(stype) {
349 		bpf_cgroup_storage_free(pl->storage[stype]);
350 		pl->storage[stype] = old_storage[stype];
351 		bpf_cgroup_storage_link(old_storage[stype], cgrp, type);
352 	}
353 	if (pl_was_allocated) {
354 		list_del(&pl->node);
355 		kfree(pl);
356 	}
357 	return err;
358 }
359 
360 /**
361  * __cgroup_bpf_detach() - Detach the program from a cgroup, and
362  *                         propagate the change to descendants
363  * @cgrp: The cgroup which descendants to traverse
364  * @prog: A program to detach or NULL
365  * @type: Type of detach operation
366  *
367  * Must be called with cgroup_mutex held.
368  */
369 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
370 			enum bpf_attach_type type)
371 {
372 	struct list_head *progs = &cgrp->bpf.progs[type];
373 	enum bpf_cgroup_storage_type stype;
374 	u32 flags = cgrp->bpf.flags[type];
375 	struct bpf_prog *old_prog = NULL;
376 	struct bpf_prog_list *pl;
377 	int err;
378 
379 	if (flags & BPF_F_ALLOW_MULTI) {
380 		if (!prog)
381 			/* to detach MULTI prog the user has to specify valid FD
382 			 * of the program to be detached
383 			 */
384 			return -EINVAL;
385 	} else {
386 		if (list_empty(progs))
387 			/* report error when trying to detach and nothing is attached */
388 			return -ENOENT;
389 	}
390 
391 	if (flags & BPF_F_ALLOW_MULTI) {
392 		/* find the prog and detach it */
393 		list_for_each_entry(pl, progs, node) {
394 			if (pl->prog != prog)
395 				continue;
396 			old_prog = prog;
397 			/* mark it deleted, so it's ignored while
398 			 * recomputing effective
399 			 */
400 			pl->prog = NULL;
401 			break;
402 		}
403 		if (!old_prog)
404 			return -ENOENT;
405 	} else {
406 		/* to maintain backward compatibility NONE and OVERRIDE cgroups
407 		 * allow detaching with invalid FD (prog==NULL)
408 		 */
409 		pl = list_first_entry(progs, typeof(*pl), node);
410 		old_prog = pl->prog;
411 		pl->prog = NULL;
412 	}
413 
414 	err = update_effective_progs(cgrp, type);
415 	if (err)
416 		goto cleanup;
417 
418 	/* now can actually delete it from this cgroup list */
419 	list_del(&pl->node);
420 	for_each_cgroup_storage_type(stype) {
421 		bpf_cgroup_storage_unlink(pl->storage[stype]);
422 		bpf_cgroup_storage_free(pl->storage[stype]);
423 	}
424 	kfree(pl);
425 	if (list_empty(progs))
426 		/* last program was detached, reset flags to zero */
427 		cgrp->bpf.flags[type] = 0;
428 
429 	bpf_prog_put(old_prog);
430 	static_branch_dec(&cgroup_bpf_enabled_key);
431 	return 0;
432 
433 cleanup:
434 	/* and restore back old_prog */
435 	pl->prog = old_prog;
436 	return err;
437 }
438 
439 /* Must be called with cgroup_mutex held to avoid races. */
440 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
441 		       union bpf_attr __user *uattr)
442 {
443 	__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
444 	enum bpf_attach_type type = attr->query.attach_type;
445 	struct list_head *progs = &cgrp->bpf.progs[type];
446 	u32 flags = cgrp->bpf.flags[type];
447 	int cnt, ret = 0, i;
448 
449 	if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
450 		cnt = bpf_prog_array_length(cgrp->bpf.effective[type]);
451 	else
452 		cnt = prog_list_length(progs);
453 
454 	if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
455 		return -EFAULT;
456 	if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
457 		return -EFAULT;
458 	if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
459 		/* return early if user requested only program count + flags */
460 		return 0;
461 	if (attr->query.prog_cnt < cnt) {
462 		cnt = attr->query.prog_cnt;
463 		ret = -ENOSPC;
464 	}
465 
466 	if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
467 		return bpf_prog_array_copy_to_user(cgrp->bpf.effective[type],
468 						   prog_ids, cnt);
469 	} else {
470 		struct bpf_prog_list *pl;
471 		u32 id;
472 
473 		i = 0;
474 		list_for_each_entry(pl, progs, node) {
475 			id = pl->prog->aux->id;
476 			if (copy_to_user(prog_ids + i, &id, sizeof(id)))
477 				return -EFAULT;
478 			if (++i == cnt)
479 				break;
480 		}
481 	}
482 	return ret;
483 }
484 
485 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
486 			   enum bpf_prog_type ptype, struct bpf_prog *prog)
487 {
488 	struct cgroup *cgrp;
489 	int ret;
490 
491 	cgrp = cgroup_get_from_fd(attr->target_fd);
492 	if (IS_ERR(cgrp))
493 		return PTR_ERR(cgrp);
494 
495 	ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
496 				attr->attach_flags);
497 	cgroup_put(cgrp);
498 	return ret;
499 }
500 
501 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
502 {
503 	struct bpf_prog *prog;
504 	struct cgroup *cgrp;
505 	int ret;
506 
507 	cgrp = cgroup_get_from_fd(attr->target_fd);
508 	if (IS_ERR(cgrp))
509 		return PTR_ERR(cgrp);
510 
511 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
512 	if (IS_ERR(prog))
513 		prog = NULL;
514 
515 	ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
516 	if (prog)
517 		bpf_prog_put(prog);
518 
519 	cgroup_put(cgrp);
520 	return ret;
521 }
522 
523 int cgroup_bpf_prog_query(const union bpf_attr *attr,
524 			  union bpf_attr __user *uattr)
525 {
526 	struct cgroup *cgrp;
527 	int ret;
528 
529 	cgrp = cgroup_get_from_fd(attr->query.target_fd);
530 	if (IS_ERR(cgrp))
531 		return PTR_ERR(cgrp);
532 
533 	ret = cgroup_bpf_query(cgrp, attr, uattr);
534 
535 	cgroup_put(cgrp);
536 	return ret;
537 }
538 
539 /**
540  * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
541  * @sk: The socket sending or receiving traffic
542  * @skb: The skb that is being sent or received
543  * @type: The type of program to be exectuted
544  *
545  * If no socket is passed, or the socket is not of type INET or INET6,
546  * this function does nothing and returns 0.
547  *
548  * The program type passed in via @type must be suitable for network
549  * filtering. No further check is performed to assert that.
550  *
551  * This function will return %-EPERM if any if an attached program was found
552  * and if it returned != 1 during execution. In all other cases, 0 is returned.
553  */
554 int __cgroup_bpf_run_filter_skb(struct sock *sk,
555 				struct sk_buff *skb,
556 				enum bpf_attach_type type)
557 {
558 	unsigned int offset = skb->data - skb_network_header(skb);
559 	struct sock *save_sk;
560 	void *saved_data_end;
561 	struct cgroup *cgrp;
562 	int ret;
563 
564 	if (!sk || !sk_fullsock(sk))
565 		return 0;
566 
567 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
568 		return 0;
569 
570 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
571 	save_sk = skb->sk;
572 	skb->sk = sk;
573 	__skb_push(skb, offset);
574 
575 	/* compute pointers for the bpf prog */
576 	bpf_compute_and_save_data_end(skb, &saved_data_end);
577 
578 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
579 				 __bpf_prog_run_save_cb);
580 	bpf_restore_data_end(skb, saved_data_end);
581 	__skb_pull(skb, offset);
582 	skb->sk = save_sk;
583 	return ret == 1 ? 0 : -EPERM;
584 }
585 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
586 
587 /**
588  * __cgroup_bpf_run_filter_sk() - Run a program on a sock
589  * @sk: sock structure to manipulate
590  * @type: The type of program to be exectuted
591  *
592  * socket is passed is expected to be of type INET or INET6.
593  *
594  * The program type passed in via @type must be suitable for sock
595  * filtering. No further check is performed to assert that.
596  *
597  * This function will return %-EPERM if any if an attached program was found
598  * and if it returned != 1 during execution. In all other cases, 0 is returned.
599  */
600 int __cgroup_bpf_run_filter_sk(struct sock *sk,
601 			       enum bpf_attach_type type)
602 {
603 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
604 	int ret;
605 
606 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
607 	return ret == 1 ? 0 : -EPERM;
608 }
609 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
610 
611 /**
612  * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
613  *                                       provided by user sockaddr
614  * @sk: sock struct that will use sockaddr
615  * @uaddr: sockaddr struct provided by user
616  * @type: The type of program to be exectuted
617  * @t_ctx: Pointer to attach type specific context
618  *
619  * socket is expected to be of type INET or INET6.
620  *
621  * This function will return %-EPERM if an attached program is found and
622  * returned value != 1 during execution. In all other cases, 0 is returned.
623  */
624 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
625 				      struct sockaddr *uaddr,
626 				      enum bpf_attach_type type,
627 				      void *t_ctx)
628 {
629 	struct bpf_sock_addr_kern ctx = {
630 		.sk = sk,
631 		.uaddr = uaddr,
632 		.t_ctx = t_ctx,
633 	};
634 	struct sockaddr_storage unspec;
635 	struct cgroup *cgrp;
636 	int ret;
637 
638 	/* Check socket family since not all sockets represent network
639 	 * endpoint (e.g. AF_UNIX).
640 	 */
641 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
642 		return 0;
643 
644 	if (!ctx.uaddr) {
645 		memset(&unspec, 0, sizeof(unspec));
646 		ctx.uaddr = (struct sockaddr *)&unspec;
647 	}
648 
649 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
650 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
651 
652 	return ret == 1 ? 0 : -EPERM;
653 }
654 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
655 
656 /**
657  * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
658  * @sk: socket to get cgroup from
659  * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
660  * sk with connection information (IP addresses, etc.) May not contain
661  * cgroup info if it is a req sock.
662  * @type: The type of program to be exectuted
663  *
664  * socket passed is expected to be of type INET or INET6.
665  *
666  * The program type passed in via @type must be suitable for sock_ops
667  * filtering. No further check is performed to assert that.
668  *
669  * This function will return %-EPERM if any if an attached program was found
670  * and if it returned != 1 during execution. In all other cases, 0 is returned.
671  */
672 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
673 				     struct bpf_sock_ops_kern *sock_ops,
674 				     enum bpf_attach_type type)
675 {
676 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
677 	int ret;
678 
679 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops,
680 				 BPF_PROG_RUN);
681 	return ret == 1 ? 0 : -EPERM;
682 }
683 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
684 
685 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
686 				      short access, enum bpf_attach_type type)
687 {
688 	struct cgroup *cgrp;
689 	struct bpf_cgroup_dev_ctx ctx = {
690 		.access_type = (access << 16) | dev_type,
691 		.major = major,
692 		.minor = minor,
693 	};
694 	int allow = 1;
695 
696 	rcu_read_lock();
697 	cgrp = task_dfl_cgroup(current);
698 	allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx,
699 				   BPF_PROG_RUN);
700 	rcu_read_unlock();
701 
702 	return !allow;
703 }
704 EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission);
705 
706 static const struct bpf_func_proto *
707 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
708 {
709 	switch (func_id) {
710 	case BPF_FUNC_map_lookup_elem:
711 		return &bpf_map_lookup_elem_proto;
712 	case BPF_FUNC_map_update_elem:
713 		return &bpf_map_update_elem_proto;
714 	case BPF_FUNC_map_delete_elem:
715 		return &bpf_map_delete_elem_proto;
716 	case BPF_FUNC_get_current_uid_gid:
717 		return &bpf_get_current_uid_gid_proto;
718 	case BPF_FUNC_get_local_storage:
719 		return &bpf_get_local_storage_proto;
720 	case BPF_FUNC_get_current_cgroup_id:
721 		return &bpf_get_current_cgroup_id_proto;
722 	case BPF_FUNC_trace_printk:
723 		if (capable(CAP_SYS_ADMIN))
724 			return bpf_get_trace_printk_proto();
725 		/* fall through */
726 	default:
727 		return NULL;
728 	}
729 }
730 
731 static const struct bpf_func_proto *
732 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
733 {
734 	return cgroup_base_func_proto(func_id, prog);
735 }
736 
737 static bool cgroup_dev_is_valid_access(int off, int size,
738 				       enum bpf_access_type type,
739 				       const struct bpf_prog *prog,
740 				       struct bpf_insn_access_aux *info)
741 {
742 	const int size_default = sizeof(__u32);
743 
744 	if (type == BPF_WRITE)
745 		return false;
746 
747 	if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
748 		return false;
749 	/* The verifier guarantees that size > 0. */
750 	if (off % size != 0)
751 		return false;
752 
753 	switch (off) {
754 	case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
755 		bpf_ctx_record_field_size(info, size_default);
756 		if (!bpf_ctx_narrow_access_ok(off, size, size_default))
757 			return false;
758 		break;
759 	default:
760 		if (size != size_default)
761 			return false;
762 	}
763 
764 	return true;
765 }
766 
767 const struct bpf_prog_ops cg_dev_prog_ops = {
768 };
769 
770 const struct bpf_verifier_ops cg_dev_verifier_ops = {
771 	.get_func_proto		= cgroup_dev_func_proto,
772 	.is_valid_access	= cgroup_dev_is_valid_access,
773 };
774 
775 /**
776  * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
777  *
778  * @head: sysctl table header
779  * @table: sysctl table
780  * @write: sysctl is being read (= 0) or written (= 1)
781  * @type: type of program to be executed
782  *
783  * Program is run when sysctl is being accessed, either read or written, and
784  * can allow or deny such access.
785  *
786  * This function will return %-EPERM if an attached program is found and
787  * returned value != 1 during execution. In all other cases 0 is returned.
788  */
789 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
790 				   struct ctl_table *table, int write,
791 				   enum bpf_attach_type type)
792 {
793 	struct bpf_sysctl_kern ctx = {
794 		.head = head,
795 		.table = table,
796 		.write = write,
797 	};
798 	struct cgroup *cgrp;
799 	int ret;
800 
801 	rcu_read_lock();
802 	cgrp = task_dfl_cgroup(current);
803 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
804 	rcu_read_unlock();
805 
806 	return ret == 1 ? 0 : -EPERM;
807 }
808 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sysctl);
809 
810 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
811 			      size_t *lenp)
812 {
813 	ssize_t tmp_ret = 0, ret;
814 
815 	if (dir->header.parent) {
816 		tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
817 		if (tmp_ret < 0)
818 			return tmp_ret;
819 	}
820 
821 	ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
822 	if (ret < 0)
823 		return ret;
824 	*bufp += ret;
825 	*lenp -= ret;
826 	ret += tmp_ret;
827 
828 	/* Avoid leading slash. */
829 	if (!ret)
830 		return ret;
831 
832 	tmp_ret = strscpy(*bufp, "/", *lenp);
833 	if (tmp_ret < 0)
834 		return tmp_ret;
835 	*bufp += tmp_ret;
836 	*lenp -= tmp_ret;
837 
838 	return ret + tmp_ret;
839 }
840 
841 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
842 	   size_t, buf_len, u64, flags)
843 {
844 	ssize_t tmp_ret = 0, ret;
845 
846 	if (!buf)
847 		return -EINVAL;
848 
849 	if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
850 		if (!ctx->head)
851 			return -EINVAL;
852 		tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
853 		if (tmp_ret < 0)
854 			return tmp_ret;
855 	}
856 
857 	ret = strscpy(buf, ctx->table->procname, buf_len);
858 
859 	return ret < 0 ? ret : tmp_ret + ret;
860 }
861 
862 static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
863 	.func		= bpf_sysctl_get_name,
864 	.gpl_only	= false,
865 	.ret_type	= RET_INTEGER,
866 	.arg1_type	= ARG_PTR_TO_CTX,
867 	.arg2_type	= ARG_PTR_TO_MEM,
868 	.arg3_type	= ARG_CONST_SIZE,
869 	.arg4_type	= ARG_ANYTHING,
870 };
871 
872 static const struct bpf_func_proto *
873 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
874 {
875 	switch (func_id) {
876 	case BPF_FUNC_sysctl_get_name:
877 		return &bpf_sysctl_get_name_proto;
878 	default:
879 		return cgroup_base_func_proto(func_id, prog);
880 	}
881 }
882 
883 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
884 				   const struct bpf_prog *prog,
885 				   struct bpf_insn_access_aux *info)
886 {
887 	const int size_default = sizeof(__u32);
888 
889 	if (off < 0 || off + size > sizeof(struct bpf_sysctl) ||
890 	    off % size || type != BPF_READ)
891 		return false;
892 
893 	switch (off) {
894 	case offsetof(struct bpf_sysctl, write):
895 		bpf_ctx_record_field_size(info, size_default);
896 		return bpf_ctx_narrow_access_ok(off, size, size_default);
897 	default:
898 		return false;
899 	}
900 }
901 
902 static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
903 				     const struct bpf_insn *si,
904 				     struct bpf_insn *insn_buf,
905 				     struct bpf_prog *prog, u32 *target_size)
906 {
907 	struct bpf_insn *insn = insn_buf;
908 
909 	switch (si->off) {
910 	case offsetof(struct bpf_sysctl, write):
911 		*insn++ = BPF_LDX_MEM(
912 			BPF_SIZE(si->code), si->dst_reg, si->src_reg,
913 			bpf_target_off(struct bpf_sysctl_kern, write,
914 				       FIELD_SIZEOF(struct bpf_sysctl_kern,
915 						    write),
916 				       target_size));
917 		break;
918 	}
919 
920 	return insn - insn_buf;
921 }
922 
923 const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
924 	.get_func_proto		= sysctl_func_proto,
925 	.is_valid_access	= sysctl_is_valid_access,
926 	.convert_ctx_access	= sysctl_convert_ctx_access,
927 };
928 
929 const struct bpf_prog_ops cg_sysctl_prog_ops = {
930 };
931