xref: /openbmc/linux/kernel/bpf/cgroup.c (revision 394e40a2)
1 /*
2  * Functions to manage eBPF programs attached to cgroups
3  *
4  * Copyright (c) 2016 Daniel Mack
5  *
6  * This file is subject to the terms and conditions of version 2 of the GNU
7  * General Public License.  See the file COPYING in the main directory of the
8  * Linux distribution for more details.
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/atomic.h>
13 #include <linux/cgroup.h>
14 #include <linux/slab.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
17 #include <net/sock.h>
18 
19 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
20 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
21 
22 /**
23  * cgroup_bpf_put() - put references of all bpf programs
24  * @cgrp: the cgroup to modify
25  */
26 void cgroup_bpf_put(struct cgroup *cgrp)
27 {
28 	unsigned int type;
29 
30 	for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
31 		struct list_head *progs = &cgrp->bpf.progs[type];
32 		struct bpf_prog_list *pl, *tmp;
33 
34 		list_for_each_entry_safe(pl, tmp, progs, node) {
35 			list_del(&pl->node);
36 			bpf_prog_put(pl->prog);
37 			bpf_cgroup_storage_unlink(pl->storage);
38 			bpf_cgroup_storage_free(pl->storage);
39 			kfree(pl);
40 			static_branch_dec(&cgroup_bpf_enabled_key);
41 		}
42 		bpf_prog_array_free(cgrp->bpf.effective[type]);
43 	}
44 }
45 
46 /* count number of elements in the list.
47  * it's slow but the list cannot be long
48  */
49 static u32 prog_list_length(struct list_head *head)
50 {
51 	struct bpf_prog_list *pl;
52 	u32 cnt = 0;
53 
54 	list_for_each_entry(pl, head, node) {
55 		if (!pl->prog)
56 			continue;
57 		cnt++;
58 	}
59 	return cnt;
60 }
61 
62 /* if parent has non-overridable prog attached,
63  * disallow attaching new programs to the descendent cgroup.
64  * if parent has overridable or multi-prog, allow attaching
65  */
66 static bool hierarchy_allows_attach(struct cgroup *cgrp,
67 				    enum bpf_attach_type type,
68 				    u32 new_flags)
69 {
70 	struct cgroup *p;
71 
72 	p = cgroup_parent(cgrp);
73 	if (!p)
74 		return true;
75 	do {
76 		u32 flags = p->bpf.flags[type];
77 		u32 cnt;
78 
79 		if (flags & BPF_F_ALLOW_MULTI)
80 			return true;
81 		cnt = prog_list_length(&p->bpf.progs[type]);
82 		WARN_ON_ONCE(cnt > 1);
83 		if (cnt == 1)
84 			return !!(flags & BPF_F_ALLOW_OVERRIDE);
85 		p = cgroup_parent(p);
86 	} while (p);
87 	return true;
88 }
89 
90 /* compute a chain of effective programs for a given cgroup:
91  * start from the list of programs in this cgroup and add
92  * all parent programs.
93  * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
94  * to programs in this cgroup
95  */
96 static int compute_effective_progs(struct cgroup *cgrp,
97 				   enum bpf_attach_type type,
98 				   struct bpf_prog_array __rcu **array)
99 {
100 	struct bpf_prog_array *progs;
101 	struct bpf_prog_list *pl;
102 	struct cgroup *p = cgrp;
103 	int cnt = 0;
104 
105 	/* count number of effective programs by walking parents */
106 	do {
107 		if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
108 			cnt += prog_list_length(&p->bpf.progs[type]);
109 		p = cgroup_parent(p);
110 	} while (p);
111 
112 	progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
113 	if (!progs)
114 		return -ENOMEM;
115 
116 	/* populate the array with effective progs */
117 	cnt = 0;
118 	p = cgrp;
119 	do {
120 		if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
121 			continue;
122 
123 		list_for_each_entry(pl, &p->bpf.progs[type], node) {
124 			if (!pl->prog)
125 				continue;
126 
127 			progs->items[cnt].prog = pl->prog;
128 			progs->items[cnt].cgroup_storage = pl->storage;
129 			cnt++;
130 		}
131 	} while ((p = cgroup_parent(p)));
132 
133 	rcu_assign_pointer(*array, progs);
134 	return 0;
135 }
136 
137 static void activate_effective_progs(struct cgroup *cgrp,
138 				     enum bpf_attach_type type,
139 				     struct bpf_prog_array __rcu *array)
140 {
141 	struct bpf_prog_array __rcu *old_array;
142 
143 	old_array = xchg(&cgrp->bpf.effective[type], array);
144 	/* free prog array after grace period, since __cgroup_bpf_run_*()
145 	 * might be still walking the array
146 	 */
147 	bpf_prog_array_free(old_array);
148 }
149 
150 /**
151  * cgroup_bpf_inherit() - inherit effective programs from parent
152  * @cgrp: the cgroup to modify
153  */
154 int cgroup_bpf_inherit(struct cgroup *cgrp)
155 {
156 /* has to use marco instead of const int, since compiler thinks
157  * that array below is variable length
158  */
159 #define	NR ARRAY_SIZE(cgrp->bpf.effective)
160 	struct bpf_prog_array __rcu *arrays[NR] = {};
161 	int i;
162 
163 	for (i = 0; i < NR; i++)
164 		INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
165 
166 	for (i = 0; i < NR; i++)
167 		if (compute_effective_progs(cgrp, i, &arrays[i]))
168 			goto cleanup;
169 
170 	for (i = 0; i < NR; i++)
171 		activate_effective_progs(cgrp, i, arrays[i]);
172 
173 	return 0;
174 cleanup:
175 	for (i = 0; i < NR; i++)
176 		bpf_prog_array_free(arrays[i]);
177 	return -ENOMEM;
178 }
179 
180 #define BPF_CGROUP_MAX_PROGS 64
181 
182 /**
183  * __cgroup_bpf_attach() - Attach the program to a cgroup, and
184  *                         propagate the change to descendants
185  * @cgrp: The cgroup which descendants to traverse
186  * @prog: A program to attach
187  * @type: Type of attach operation
188  *
189  * Must be called with cgroup_mutex held.
190  */
191 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
192 			enum bpf_attach_type type, u32 flags)
193 {
194 	struct list_head *progs = &cgrp->bpf.progs[type];
195 	struct bpf_prog *old_prog = NULL;
196 	struct bpf_cgroup_storage *storage, *old_storage = NULL;
197 	struct cgroup_subsys_state *css;
198 	struct bpf_prog_list *pl;
199 	bool pl_was_allocated;
200 	int err;
201 
202 	if ((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI))
203 		/* invalid combination */
204 		return -EINVAL;
205 
206 	if (!hierarchy_allows_attach(cgrp, type, flags))
207 		return -EPERM;
208 
209 	if (!list_empty(progs) && cgrp->bpf.flags[type] != flags)
210 		/* Disallow attaching non-overridable on top
211 		 * of existing overridable in this cgroup.
212 		 * Disallow attaching multi-prog if overridable or none
213 		 */
214 		return -EPERM;
215 
216 	if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
217 		return -E2BIG;
218 
219 	storage = bpf_cgroup_storage_alloc(prog);
220 	if (IS_ERR(storage))
221 		return -ENOMEM;
222 
223 	if (flags & BPF_F_ALLOW_MULTI) {
224 		list_for_each_entry(pl, progs, node) {
225 			if (pl->prog == prog) {
226 				/* disallow attaching the same prog twice */
227 				bpf_cgroup_storage_free(storage);
228 				return -EINVAL;
229 			}
230 		}
231 
232 		pl = kmalloc(sizeof(*pl), GFP_KERNEL);
233 		if (!pl) {
234 			bpf_cgroup_storage_free(storage);
235 			return -ENOMEM;
236 		}
237 
238 		pl_was_allocated = true;
239 		pl->prog = prog;
240 		pl->storage = storage;
241 		list_add_tail(&pl->node, progs);
242 	} else {
243 		if (list_empty(progs)) {
244 			pl = kmalloc(sizeof(*pl), GFP_KERNEL);
245 			if (!pl) {
246 				bpf_cgroup_storage_free(storage);
247 				return -ENOMEM;
248 			}
249 			pl_was_allocated = true;
250 			list_add_tail(&pl->node, progs);
251 		} else {
252 			pl = list_first_entry(progs, typeof(*pl), node);
253 			old_prog = pl->prog;
254 			old_storage = pl->storage;
255 			bpf_cgroup_storage_unlink(old_storage);
256 			pl_was_allocated = false;
257 		}
258 		pl->prog = prog;
259 		pl->storage = storage;
260 	}
261 
262 	cgrp->bpf.flags[type] = flags;
263 
264 	/* allocate and recompute effective prog arrays */
265 	css_for_each_descendant_pre(css, &cgrp->self) {
266 		struct cgroup *desc = container_of(css, struct cgroup, self);
267 
268 		err = compute_effective_progs(desc, type, &desc->bpf.inactive);
269 		if (err)
270 			goto cleanup;
271 	}
272 
273 	/* all allocations were successful. Activate all prog arrays */
274 	css_for_each_descendant_pre(css, &cgrp->self) {
275 		struct cgroup *desc = container_of(css, struct cgroup, self);
276 
277 		activate_effective_progs(desc, type, desc->bpf.inactive);
278 		desc->bpf.inactive = NULL;
279 	}
280 
281 	static_branch_inc(&cgroup_bpf_enabled_key);
282 	if (old_storage)
283 		bpf_cgroup_storage_free(old_storage);
284 	if (old_prog) {
285 		bpf_prog_put(old_prog);
286 		static_branch_dec(&cgroup_bpf_enabled_key);
287 	}
288 	bpf_cgroup_storage_link(storage, cgrp, type);
289 	return 0;
290 
291 cleanup:
292 	/* oom while computing effective. Free all computed effective arrays
293 	 * since they were not activated
294 	 */
295 	css_for_each_descendant_pre(css, &cgrp->self) {
296 		struct cgroup *desc = container_of(css, struct cgroup, self);
297 
298 		bpf_prog_array_free(desc->bpf.inactive);
299 		desc->bpf.inactive = NULL;
300 	}
301 
302 	/* and cleanup the prog list */
303 	pl->prog = old_prog;
304 	bpf_cgroup_storage_free(pl->storage);
305 	pl->storage = old_storage;
306 	bpf_cgroup_storage_link(old_storage, cgrp, type);
307 	if (pl_was_allocated) {
308 		list_del(&pl->node);
309 		kfree(pl);
310 	}
311 	return err;
312 }
313 
314 /**
315  * __cgroup_bpf_detach() - Detach the program from a cgroup, and
316  *                         propagate the change to descendants
317  * @cgrp: The cgroup which descendants to traverse
318  * @prog: A program to detach or NULL
319  * @type: Type of detach operation
320  *
321  * Must be called with cgroup_mutex held.
322  */
323 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
324 			enum bpf_attach_type type, u32 unused_flags)
325 {
326 	struct list_head *progs = &cgrp->bpf.progs[type];
327 	u32 flags = cgrp->bpf.flags[type];
328 	struct bpf_prog *old_prog = NULL;
329 	struct cgroup_subsys_state *css;
330 	struct bpf_prog_list *pl;
331 	int err;
332 
333 	if (flags & BPF_F_ALLOW_MULTI) {
334 		if (!prog)
335 			/* to detach MULTI prog the user has to specify valid FD
336 			 * of the program to be detached
337 			 */
338 			return -EINVAL;
339 	} else {
340 		if (list_empty(progs))
341 			/* report error when trying to detach and nothing is attached */
342 			return -ENOENT;
343 	}
344 
345 	if (flags & BPF_F_ALLOW_MULTI) {
346 		/* find the prog and detach it */
347 		list_for_each_entry(pl, progs, node) {
348 			if (pl->prog != prog)
349 				continue;
350 			old_prog = prog;
351 			/* mark it deleted, so it's ignored while
352 			 * recomputing effective
353 			 */
354 			pl->prog = NULL;
355 			break;
356 		}
357 		if (!old_prog)
358 			return -ENOENT;
359 	} else {
360 		/* to maintain backward compatibility NONE and OVERRIDE cgroups
361 		 * allow detaching with invalid FD (prog==NULL)
362 		 */
363 		pl = list_first_entry(progs, typeof(*pl), node);
364 		old_prog = pl->prog;
365 		pl->prog = NULL;
366 	}
367 
368 	/* allocate and recompute effective prog arrays */
369 	css_for_each_descendant_pre(css, &cgrp->self) {
370 		struct cgroup *desc = container_of(css, struct cgroup, self);
371 
372 		err = compute_effective_progs(desc, type, &desc->bpf.inactive);
373 		if (err)
374 			goto cleanup;
375 	}
376 
377 	/* all allocations were successful. Activate all prog arrays */
378 	css_for_each_descendant_pre(css, &cgrp->self) {
379 		struct cgroup *desc = container_of(css, struct cgroup, self);
380 
381 		activate_effective_progs(desc, type, desc->bpf.inactive);
382 		desc->bpf.inactive = NULL;
383 	}
384 
385 	/* now can actually delete it from this cgroup list */
386 	list_del(&pl->node);
387 	bpf_cgroup_storage_unlink(pl->storage);
388 	bpf_cgroup_storage_free(pl->storage);
389 	kfree(pl);
390 	if (list_empty(progs))
391 		/* last program was detached, reset flags to zero */
392 		cgrp->bpf.flags[type] = 0;
393 
394 	bpf_prog_put(old_prog);
395 	static_branch_dec(&cgroup_bpf_enabled_key);
396 	return 0;
397 
398 cleanup:
399 	/* oom while computing effective. Free all computed effective arrays
400 	 * since they were not activated
401 	 */
402 	css_for_each_descendant_pre(css, &cgrp->self) {
403 		struct cgroup *desc = container_of(css, struct cgroup, self);
404 
405 		bpf_prog_array_free(desc->bpf.inactive);
406 		desc->bpf.inactive = NULL;
407 	}
408 
409 	/* and restore back old_prog */
410 	pl->prog = old_prog;
411 	return err;
412 }
413 
414 /* Must be called with cgroup_mutex held to avoid races. */
415 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
416 		       union bpf_attr __user *uattr)
417 {
418 	__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
419 	enum bpf_attach_type type = attr->query.attach_type;
420 	struct list_head *progs = &cgrp->bpf.progs[type];
421 	u32 flags = cgrp->bpf.flags[type];
422 	int cnt, ret = 0, i;
423 
424 	if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
425 		cnt = bpf_prog_array_length(cgrp->bpf.effective[type]);
426 	else
427 		cnt = prog_list_length(progs);
428 
429 	if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
430 		return -EFAULT;
431 	if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
432 		return -EFAULT;
433 	if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
434 		/* return early if user requested only program count + flags */
435 		return 0;
436 	if (attr->query.prog_cnt < cnt) {
437 		cnt = attr->query.prog_cnt;
438 		ret = -ENOSPC;
439 	}
440 
441 	if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
442 		return bpf_prog_array_copy_to_user(cgrp->bpf.effective[type],
443 						   prog_ids, cnt);
444 	} else {
445 		struct bpf_prog_list *pl;
446 		u32 id;
447 
448 		i = 0;
449 		list_for_each_entry(pl, progs, node) {
450 			id = pl->prog->aux->id;
451 			if (copy_to_user(prog_ids + i, &id, sizeof(id)))
452 				return -EFAULT;
453 			if (++i == cnt)
454 				break;
455 		}
456 	}
457 	return ret;
458 }
459 
460 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
461 			   enum bpf_prog_type ptype, struct bpf_prog *prog)
462 {
463 	struct cgroup *cgrp;
464 	int ret;
465 
466 	cgrp = cgroup_get_from_fd(attr->target_fd);
467 	if (IS_ERR(cgrp))
468 		return PTR_ERR(cgrp);
469 
470 	ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
471 				attr->attach_flags);
472 	cgroup_put(cgrp);
473 	return ret;
474 }
475 
476 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
477 {
478 	struct bpf_prog *prog;
479 	struct cgroup *cgrp;
480 	int ret;
481 
482 	cgrp = cgroup_get_from_fd(attr->target_fd);
483 	if (IS_ERR(cgrp))
484 		return PTR_ERR(cgrp);
485 
486 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
487 	if (IS_ERR(prog))
488 		prog = NULL;
489 
490 	ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
491 	if (prog)
492 		bpf_prog_put(prog);
493 
494 	cgroup_put(cgrp);
495 	return ret;
496 }
497 
498 int cgroup_bpf_prog_query(const union bpf_attr *attr,
499 			  union bpf_attr __user *uattr)
500 {
501 	struct cgroup *cgrp;
502 	int ret;
503 
504 	cgrp = cgroup_get_from_fd(attr->query.target_fd);
505 	if (IS_ERR(cgrp))
506 		return PTR_ERR(cgrp);
507 
508 	ret = cgroup_bpf_query(cgrp, attr, uattr);
509 
510 	cgroup_put(cgrp);
511 	return ret;
512 }
513 
514 /**
515  * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
516  * @sk: The socket sending or receiving traffic
517  * @skb: The skb that is being sent or received
518  * @type: The type of program to be exectuted
519  *
520  * If no socket is passed, or the socket is not of type INET or INET6,
521  * this function does nothing and returns 0.
522  *
523  * The program type passed in via @type must be suitable for network
524  * filtering. No further check is performed to assert that.
525  *
526  * This function will return %-EPERM if any if an attached program was found
527  * and if it returned != 1 during execution. In all other cases, 0 is returned.
528  */
529 int __cgroup_bpf_run_filter_skb(struct sock *sk,
530 				struct sk_buff *skb,
531 				enum bpf_attach_type type)
532 {
533 	unsigned int offset = skb->data - skb_network_header(skb);
534 	struct sock *save_sk;
535 	struct cgroup *cgrp;
536 	int ret;
537 
538 	if (!sk || !sk_fullsock(sk))
539 		return 0;
540 
541 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
542 		return 0;
543 
544 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
545 	save_sk = skb->sk;
546 	skb->sk = sk;
547 	__skb_push(skb, offset);
548 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
549 				 bpf_prog_run_save_cb);
550 	__skb_pull(skb, offset);
551 	skb->sk = save_sk;
552 	return ret == 1 ? 0 : -EPERM;
553 }
554 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
555 
556 /**
557  * __cgroup_bpf_run_filter_sk() - Run a program on a sock
558  * @sk: sock structure to manipulate
559  * @type: The type of program to be exectuted
560  *
561  * socket is passed is expected to be of type INET or INET6.
562  *
563  * The program type passed in via @type must be suitable for sock
564  * filtering. No further check is performed to assert that.
565  *
566  * This function will return %-EPERM if any if an attached program was found
567  * and if it returned != 1 during execution. In all other cases, 0 is returned.
568  */
569 int __cgroup_bpf_run_filter_sk(struct sock *sk,
570 			       enum bpf_attach_type type)
571 {
572 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
573 	int ret;
574 
575 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
576 	return ret == 1 ? 0 : -EPERM;
577 }
578 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
579 
580 /**
581  * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
582  *                                       provided by user sockaddr
583  * @sk: sock struct that will use sockaddr
584  * @uaddr: sockaddr struct provided by user
585  * @type: The type of program to be exectuted
586  * @t_ctx: Pointer to attach type specific context
587  *
588  * socket is expected to be of type INET or INET6.
589  *
590  * This function will return %-EPERM if an attached program is found and
591  * returned value != 1 during execution. In all other cases, 0 is returned.
592  */
593 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
594 				      struct sockaddr *uaddr,
595 				      enum bpf_attach_type type,
596 				      void *t_ctx)
597 {
598 	struct bpf_sock_addr_kern ctx = {
599 		.sk = sk,
600 		.uaddr = uaddr,
601 		.t_ctx = t_ctx,
602 	};
603 	struct sockaddr_storage unspec;
604 	struct cgroup *cgrp;
605 	int ret;
606 
607 	/* Check socket family since not all sockets represent network
608 	 * endpoint (e.g. AF_UNIX).
609 	 */
610 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
611 		return 0;
612 
613 	if (!ctx.uaddr) {
614 		memset(&unspec, 0, sizeof(unspec));
615 		ctx.uaddr = (struct sockaddr *)&unspec;
616 	}
617 
618 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
619 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
620 
621 	return ret == 1 ? 0 : -EPERM;
622 }
623 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
624 
625 /**
626  * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
627  * @sk: socket to get cgroup from
628  * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
629  * sk with connection information (IP addresses, etc.) May not contain
630  * cgroup info if it is a req sock.
631  * @type: The type of program to be exectuted
632  *
633  * socket passed is expected to be of type INET or INET6.
634  *
635  * The program type passed in via @type must be suitable for sock_ops
636  * filtering. No further check is performed to assert that.
637  *
638  * This function will return %-EPERM if any if an attached program was found
639  * and if it returned != 1 during execution. In all other cases, 0 is returned.
640  */
641 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
642 				     struct bpf_sock_ops_kern *sock_ops,
643 				     enum bpf_attach_type type)
644 {
645 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
646 	int ret;
647 
648 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops,
649 				 BPF_PROG_RUN);
650 	return ret == 1 ? 0 : -EPERM;
651 }
652 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
653 
654 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
655 				      short access, enum bpf_attach_type type)
656 {
657 	struct cgroup *cgrp;
658 	struct bpf_cgroup_dev_ctx ctx = {
659 		.access_type = (access << 16) | dev_type,
660 		.major = major,
661 		.minor = minor,
662 	};
663 	int allow = 1;
664 
665 	rcu_read_lock();
666 	cgrp = task_dfl_cgroup(current);
667 	allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx,
668 				   BPF_PROG_RUN);
669 	rcu_read_unlock();
670 
671 	return !allow;
672 }
673 EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission);
674 
675 static const struct bpf_func_proto *
676 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
677 {
678 	switch (func_id) {
679 	case BPF_FUNC_map_lookup_elem:
680 		return &bpf_map_lookup_elem_proto;
681 	case BPF_FUNC_map_update_elem:
682 		return &bpf_map_update_elem_proto;
683 	case BPF_FUNC_map_delete_elem:
684 		return &bpf_map_delete_elem_proto;
685 	case BPF_FUNC_get_current_uid_gid:
686 		return &bpf_get_current_uid_gid_proto;
687 	case BPF_FUNC_trace_printk:
688 		if (capable(CAP_SYS_ADMIN))
689 			return bpf_get_trace_printk_proto();
690 	default:
691 		return NULL;
692 	}
693 }
694 
695 static bool cgroup_dev_is_valid_access(int off, int size,
696 				       enum bpf_access_type type,
697 				       const struct bpf_prog *prog,
698 				       struct bpf_insn_access_aux *info)
699 {
700 	const int size_default = sizeof(__u32);
701 
702 	if (type == BPF_WRITE)
703 		return false;
704 
705 	if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
706 		return false;
707 	/* The verifier guarantees that size > 0. */
708 	if (off % size != 0)
709 		return false;
710 
711 	switch (off) {
712 	case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
713 		bpf_ctx_record_field_size(info, size_default);
714 		if (!bpf_ctx_narrow_access_ok(off, size, size_default))
715 			return false;
716 		break;
717 	default:
718 		if (size != size_default)
719 			return false;
720 	}
721 
722 	return true;
723 }
724 
725 const struct bpf_prog_ops cg_dev_prog_ops = {
726 };
727 
728 const struct bpf_verifier_ops cg_dev_verifier_ops = {
729 	.get_func_proto		= cgroup_dev_func_proto,
730 	.is_valid_access	= cgroup_dev_is_valid_access,
731 };
732