xref: /openbmc/linux/kernel/bpf/cgroup.c (revision 5ae75a1a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Functions to manage eBPF programs attached to cgroups
4  *
5  * Copyright (c) 2016 Daniel Mack
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/atomic.h>
10 #include <linux/cgroup.h>
11 #include <linux/filter.h>
12 #include <linux/slab.h>
13 #include <linux/sysctl.h>
14 #include <linux/string.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
17 #include <linux/bpf_lsm.h>
18 #include <linux/bpf_verifier.h>
19 #include <net/sock.h>
20 #include <net/bpf_sk_storage.h>
21 
22 #include "../cgroup/cgroup-internal.h"
23 
24 DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
25 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
26 
27 /* __always_inline is necessary to prevent indirect call through run_prog
28  * function pointer.
29  */
30 static __always_inline int
31 bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
32 		      enum cgroup_bpf_attach_type atype,
33 		      const void *ctx, bpf_prog_run_fn run_prog,
34 		      int retval, u32 *ret_flags)
35 {
36 	const struct bpf_prog_array_item *item;
37 	const struct bpf_prog *prog;
38 	const struct bpf_prog_array *array;
39 	struct bpf_run_ctx *old_run_ctx;
40 	struct bpf_cg_run_ctx run_ctx;
41 	u32 func_ret;
42 
43 	run_ctx.retval = retval;
44 	migrate_disable();
45 	rcu_read_lock();
46 	array = rcu_dereference(cgrp->effective[atype]);
47 	item = &array->items[0];
48 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
49 	while ((prog = READ_ONCE(item->prog))) {
50 		run_ctx.prog_item = item;
51 		func_ret = run_prog(prog, ctx);
52 		if (ret_flags) {
53 			*(ret_flags) |= (func_ret >> 1);
54 			func_ret &= 1;
55 		}
56 		if (!func_ret && !IS_ERR_VALUE((long)run_ctx.retval))
57 			run_ctx.retval = -EPERM;
58 		item++;
59 	}
60 	bpf_reset_run_ctx(old_run_ctx);
61 	rcu_read_unlock();
62 	migrate_enable();
63 	return run_ctx.retval;
64 }
65 
66 unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
67 				       const struct bpf_insn *insn)
68 {
69 	const struct bpf_prog *shim_prog;
70 	struct sock *sk;
71 	struct cgroup *cgrp;
72 	int ret = 0;
73 	u64 *args;
74 
75 	args = (u64 *)ctx;
76 	sk = (void *)(unsigned long)args[0];
77 	/*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
78 	shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
79 
80 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
81 	if (likely(cgrp))
82 		ret = bpf_prog_run_array_cg(&cgrp->bpf,
83 					    shim_prog->aux->cgroup_atype,
84 					    ctx, bpf_prog_run, 0, NULL);
85 	return ret;
86 }
87 
88 unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
89 					 const struct bpf_insn *insn)
90 {
91 	const struct bpf_prog *shim_prog;
92 	struct socket *sock;
93 	struct cgroup *cgrp;
94 	int ret = 0;
95 	u64 *args;
96 
97 	args = (u64 *)ctx;
98 	sock = (void *)(unsigned long)args[0];
99 	/*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
100 	shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
101 
102 	cgrp = sock_cgroup_ptr(&sock->sk->sk_cgrp_data);
103 	if (likely(cgrp))
104 		ret = bpf_prog_run_array_cg(&cgrp->bpf,
105 					    shim_prog->aux->cgroup_atype,
106 					    ctx, bpf_prog_run, 0, NULL);
107 	return ret;
108 }
109 
110 unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
111 					  const struct bpf_insn *insn)
112 {
113 	const struct bpf_prog *shim_prog;
114 	struct cgroup *cgrp;
115 	int ret = 0;
116 
117 	/*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
118 	shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
119 
120 	/* We rely on trampoline's __bpf_prog_enter_lsm_cgroup to grab RCU read lock. */
121 	cgrp = task_dfl_cgroup(current);
122 	if (likely(cgrp))
123 		ret = bpf_prog_run_array_cg(&cgrp->bpf,
124 					    shim_prog->aux->cgroup_atype,
125 					    ctx, bpf_prog_run, 0, NULL);
126 	return ret;
127 }
128 
129 #ifdef CONFIG_BPF_LSM
130 struct cgroup_lsm_atype {
131 	u32 attach_btf_id;
132 	int refcnt;
133 };
134 
135 static struct cgroup_lsm_atype cgroup_lsm_atype[CGROUP_LSM_NUM];
136 
137 static enum cgroup_bpf_attach_type
138 bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
139 {
140 	int i;
141 
142 	lockdep_assert_held(&cgroup_mutex);
143 
144 	if (attach_type != BPF_LSM_CGROUP)
145 		return to_cgroup_bpf_attach_type(attach_type);
146 
147 	for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++)
148 		if (cgroup_lsm_atype[i].attach_btf_id == attach_btf_id)
149 			return CGROUP_LSM_START + i;
150 
151 	for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++)
152 		if (cgroup_lsm_atype[i].attach_btf_id == 0)
153 			return CGROUP_LSM_START + i;
154 
155 	return -E2BIG;
156 
157 }
158 
159 void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype)
160 {
161 	int i = cgroup_atype - CGROUP_LSM_START;
162 
163 	lockdep_assert_held(&cgroup_mutex);
164 
165 	WARN_ON_ONCE(cgroup_lsm_atype[i].attach_btf_id &&
166 		     cgroup_lsm_atype[i].attach_btf_id != attach_btf_id);
167 
168 	cgroup_lsm_atype[i].attach_btf_id = attach_btf_id;
169 	cgroup_lsm_atype[i].refcnt++;
170 }
171 
172 void bpf_cgroup_atype_put(int cgroup_atype)
173 {
174 	int i = cgroup_atype - CGROUP_LSM_START;
175 
176 	mutex_lock(&cgroup_mutex);
177 	if (--cgroup_lsm_atype[i].refcnt <= 0)
178 		cgroup_lsm_atype[i].attach_btf_id = 0;
179 	WARN_ON_ONCE(cgroup_lsm_atype[i].refcnt < 0);
180 	mutex_unlock(&cgroup_mutex);
181 }
182 #else
183 static enum cgroup_bpf_attach_type
184 bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
185 {
186 	if (attach_type != BPF_LSM_CGROUP)
187 		return to_cgroup_bpf_attach_type(attach_type);
188 	return -EOPNOTSUPP;
189 }
190 #endif /* CONFIG_BPF_LSM */
191 
192 void cgroup_bpf_offline(struct cgroup *cgrp)
193 {
194 	cgroup_get(cgrp);
195 	percpu_ref_kill(&cgrp->bpf.refcnt);
196 }
197 
198 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
199 {
200 	enum bpf_cgroup_storage_type stype;
201 
202 	for_each_cgroup_storage_type(stype)
203 		bpf_cgroup_storage_free(storages[stype]);
204 }
205 
206 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
207 				     struct bpf_cgroup_storage *new_storages[],
208 				     enum bpf_attach_type type,
209 				     struct bpf_prog *prog,
210 				     struct cgroup *cgrp)
211 {
212 	enum bpf_cgroup_storage_type stype;
213 	struct bpf_cgroup_storage_key key;
214 	struct bpf_map *map;
215 
216 	key.cgroup_inode_id = cgroup_id(cgrp);
217 	key.attach_type = type;
218 
219 	for_each_cgroup_storage_type(stype) {
220 		map = prog->aux->cgroup_storage[stype];
221 		if (!map)
222 			continue;
223 
224 		storages[stype] = cgroup_storage_lookup((void *)map, &key, false);
225 		if (storages[stype])
226 			continue;
227 
228 		storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
229 		if (IS_ERR(storages[stype])) {
230 			bpf_cgroup_storages_free(new_storages);
231 			return -ENOMEM;
232 		}
233 
234 		new_storages[stype] = storages[stype];
235 	}
236 
237 	return 0;
238 }
239 
240 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
241 				       struct bpf_cgroup_storage *src[])
242 {
243 	enum bpf_cgroup_storage_type stype;
244 
245 	for_each_cgroup_storage_type(stype)
246 		dst[stype] = src[stype];
247 }
248 
249 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
250 				     struct cgroup *cgrp,
251 				     enum bpf_attach_type attach_type)
252 {
253 	enum bpf_cgroup_storage_type stype;
254 
255 	for_each_cgroup_storage_type(stype)
256 		bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
257 }
258 
259 /* Called when bpf_cgroup_link is auto-detached from dying cgroup.
260  * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
261  * doesn't free link memory, which will eventually be done by bpf_link's
262  * release() callback, when its last FD is closed.
263  */
264 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
265 {
266 	cgroup_put(link->cgroup);
267 	link->cgroup = NULL;
268 }
269 
270 /**
271  * cgroup_bpf_release() - put references of all bpf programs and
272  *                        release all cgroup bpf data
273  * @work: work structure embedded into the cgroup to modify
274  */
275 static void cgroup_bpf_release(struct work_struct *work)
276 {
277 	struct cgroup *p, *cgrp = container_of(work, struct cgroup,
278 					       bpf.release_work);
279 	struct bpf_prog_array *old_array;
280 	struct list_head *storages = &cgrp->bpf.storages;
281 	struct bpf_cgroup_storage *storage, *stmp;
282 
283 	unsigned int atype;
284 
285 	mutex_lock(&cgroup_mutex);
286 
287 	for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
288 		struct hlist_head *progs = &cgrp->bpf.progs[atype];
289 		struct bpf_prog_list *pl;
290 		struct hlist_node *pltmp;
291 
292 		hlist_for_each_entry_safe(pl, pltmp, progs, node) {
293 			hlist_del(&pl->node);
294 			if (pl->prog) {
295 				if (pl->prog->expected_attach_type == BPF_LSM_CGROUP)
296 					bpf_trampoline_unlink_cgroup_shim(pl->prog);
297 				bpf_prog_put(pl->prog);
298 			}
299 			if (pl->link) {
300 				if (pl->link->link.prog->expected_attach_type == BPF_LSM_CGROUP)
301 					bpf_trampoline_unlink_cgroup_shim(pl->link->link.prog);
302 				bpf_cgroup_link_auto_detach(pl->link);
303 			}
304 			kfree(pl);
305 			static_branch_dec(&cgroup_bpf_enabled_key[atype]);
306 		}
307 		old_array = rcu_dereference_protected(
308 				cgrp->bpf.effective[atype],
309 				lockdep_is_held(&cgroup_mutex));
310 		bpf_prog_array_free(old_array);
311 	}
312 
313 	list_for_each_entry_safe(storage, stmp, storages, list_cg) {
314 		bpf_cgroup_storage_unlink(storage);
315 		bpf_cgroup_storage_free(storage);
316 	}
317 
318 	mutex_unlock(&cgroup_mutex);
319 
320 	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
321 		cgroup_bpf_put(p);
322 
323 	percpu_ref_exit(&cgrp->bpf.refcnt);
324 	cgroup_put(cgrp);
325 }
326 
327 /**
328  * cgroup_bpf_release_fn() - callback used to schedule releasing
329  *                           of bpf cgroup data
330  * @ref: percpu ref counter structure
331  */
332 static void cgroup_bpf_release_fn(struct percpu_ref *ref)
333 {
334 	struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
335 
336 	INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
337 	queue_work(system_wq, &cgrp->bpf.release_work);
338 }
339 
340 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
341  * link or direct prog.
342  */
343 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
344 {
345 	if (pl->prog)
346 		return pl->prog;
347 	if (pl->link)
348 		return pl->link->link.prog;
349 	return NULL;
350 }
351 
352 /* count number of elements in the list.
353  * it's slow but the list cannot be long
354  */
355 static u32 prog_list_length(struct hlist_head *head)
356 {
357 	struct bpf_prog_list *pl;
358 	u32 cnt = 0;
359 
360 	hlist_for_each_entry(pl, head, node) {
361 		if (!prog_list_prog(pl))
362 			continue;
363 		cnt++;
364 	}
365 	return cnt;
366 }
367 
368 /* if parent has non-overridable prog attached,
369  * disallow attaching new programs to the descendent cgroup.
370  * if parent has overridable or multi-prog, allow attaching
371  */
372 static bool hierarchy_allows_attach(struct cgroup *cgrp,
373 				    enum cgroup_bpf_attach_type atype)
374 {
375 	struct cgroup *p;
376 
377 	p = cgroup_parent(cgrp);
378 	if (!p)
379 		return true;
380 	do {
381 		u32 flags = p->bpf.flags[atype];
382 		u32 cnt;
383 
384 		if (flags & BPF_F_ALLOW_MULTI)
385 			return true;
386 		cnt = prog_list_length(&p->bpf.progs[atype]);
387 		WARN_ON_ONCE(cnt > 1);
388 		if (cnt == 1)
389 			return !!(flags & BPF_F_ALLOW_OVERRIDE);
390 		p = cgroup_parent(p);
391 	} while (p);
392 	return true;
393 }
394 
395 /* compute a chain of effective programs for a given cgroup:
396  * start from the list of programs in this cgroup and add
397  * all parent programs.
398  * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
399  * to programs in this cgroup
400  */
401 static int compute_effective_progs(struct cgroup *cgrp,
402 				   enum cgroup_bpf_attach_type atype,
403 				   struct bpf_prog_array **array)
404 {
405 	struct bpf_prog_array_item *item;
406 	struct bpf_prog_array *progs;
407 	struct bpf_prog_list *pl;
408 	struct cgroup *p = cgrp;
409 	int cnt = 0;
410 
411 	/* count number of effective programs by walking parents */
412 	do {
413 		if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
414 			cnt += prog_list_length(&p->bpf.progs[atype]);
415 		p = cgroup_parent(p);
416 	} while (p);
417 
418 	progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
419 	if (!progs)
420 		return -ENOMEM;
421 
422 	/* populate the array with effective progs */
423 	cnt = 0;
424 	p = cgrp;
425 	do {
426 		if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
427 			continue;
428 
429 		hlist_for_each_entry(pl, &p->bpf.progs[atype], node) {
430 			if (!prog_list_prog(pl))
431 				continue;
432 
433 			item = &progs->items[cnt];
434 			item->prog = prog_list_prog(pl);
435 			bpf_cgroup_storages_assign(item->cgroup_storage,
436 						   pl->storage);
437 			cnt++;
438 		}
439 	} while ((p = cgroup_parent(p)));
440 
441 	*array = progs;
442 	return 0;
443 }
444 
445 static void activate_effective_progs(struct cgroup *cgrp,
446 				     enum cgroup_bpf_attach_type atype,
447 				     struct bpf_prog_array *old_array)
448 {
449 	old_array = rcu_replace_pointer(cgrp->bpf.effective[atype], old_array,
450 					lockdep_is_held(&cgroup_mutex));
451 	/* free prog array after grace period, since __cgroup_bpf_run_*()
452 	 * might be still walking the array
453 	 */
454 	bpf_prog_array_free(old_array);
455 }
456 
457 /**
458  * cgroup_bpf_inherit() - inherit effective programs from parent
459  * @cgrp: the cgroup to modify
460  */
461 int cgroup_bpf_inherit(struct cgroup *cgrp)
462 {
463 /* has to use marco instead of const int, since compiler thinks
464  * that array below is variable length
465  */
466 #define	NR ARRAY_SIZE(cgrp->bpf.effective)
467 	struct bpf_prog_array *arrays[NR] = {};
468 	struct cgroup *p;
469 	int ret, i;
470 
471 	ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
472 			      GFP_KERNEL);
473 	if (ret)
474 		return ret;
475 
476 	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
477 		cgroup_bpf_get(p);
478 
479 	for (i = 0; i < NR; i++)
480 		INIT_HLIST_HEAD(&cgrp->bpf.progs[i]);
481 
482 	INIT_LIST_HEAD(&cgrp->bpf.storages);
483 
484 	for (i = 0; i < NR; i++)
485 		if (compute_effective_progs(cgrp, i, &arrays[i]))
486 			goto cleanup;
487 
488 	for (i = 0; i < NR; i++)
489 		activate_effective_progs(cgrp, i, arrays[i]);
490 
491 	return 0;
492 cleanup:
493 	for (i = 0; i < NR; i++)
494 		bpf_prog_array_free(arrays[i]);
495 
496 	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
497 		cgroup_bpf_put(p);
498 
499 	percpu_ref_exit(&cgrp->bpf.refcnt);
500 
501 	return -ENOMEM;
502 }
503 
504 static int update_effective_progs(struct cgroup *cgrp,
505 				  enum cgroup_bpf_attach_type atype)
506 {
507 	struct cgroup_subsys_state *css;
508 	int err;
509 
510 	/* allocate and recompute effective prog arrays */
511 	css_for_each_descendant_pre(css, &cgrp->self) {
512 		struct cgroup *desc = container_of(css, struct cgroup, self);
513 
514 		if (percpu_ref_is_zero(&desc->bpf.refcnt))
515 			continue;
516 
517 		err = compute_effective_progs(desc, atype, &desc->bpf.inactive);
518 		if (err)
519 			goto cleanup;
520 	}
521 
522 	/* all allocations were successful. Activate all prog arrays */
523 	css_for_each_descendant_pre(css, &cgrp->self) {
524 		struct cgroup *desc = container_of(css, struct cgroup, self);
525 
526 		if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
527 			if (unlikely(desc->bpf.inactive)) {
528 				bpf_prog_array_free(desc->bpf.inactive);
529 				desc->bpf.inactive = NULL;
530 			}
531 			continue;
532 		}
533 
534 		activate_effective_progs(desc, atype, desc->bpf.inactive);
535 		desc->bpf.inactive = NULL;
536 	}
537 
538 	return 0;
539 
540 cleanup:
541 	/* oom while computing effective. Free all computed effective arrays
542 	 * since they were not activated
543 	 */
544 	css_for_each_descendant_pre(css, &cgrp->self) {
545 		struct cgroup *desc = container_of(css, struct cgroup, self);
546 
547 		bpf_prog_array_free(desc->bpf.inactive);
548 		desc->bpf.inactive = NULL;
549 	}
550 
551 	return err;
552 }
553 
554 #define BPF_CGROUP_MAX_PROGS 64
555 
556 static struct bpf_prog_list *find_attach_entry(struct hlist_head *progs,
557 					       struct bpf_prog *prog,
558 					       struct bpf_cgroup_link *link,
559 					       struct bpf_prog *replace_prog,
560 					       bool allow_multi)
561 {
562 	struct bpf_prog_list *pl;
563 
564 	/* single-attach case */
565 	if (!allow_multi) {
566 		if (hlist_empty(progs))
567 			return NULL;
568 		return hlist_entry(progs->first, typeof(*pl), node);
569 	}
570 
571 	hlist_for_each_entry(pl, progs, node) {
572 		if (prog && pl->prog == prog && prog != replace_prog)
573 			/* disallow attaching the same prog twice */
574 			return ERR_PTR(-EINVAL);
575 		if (link && pl->link == link)
576 			/* disallow attaching the same link twice */
577 			return ERR_PTR(-EINVAL);
578 	}
579 
580 	/* direct prog multi-attach w/ replacement case */
581 	if (replace_prog) {
582 		hlist_for_each_entry(pl, progs, node) {
583 			if (pl->prog == replace_prog)
584 				/* a match found */
585 				return pl;
586 		}
587 		/* prog to replace not found for cgroup */
588 		return ERR_PTR(-ENOENT);
589 	}
590 
591 	return NULL;
592 }
593 
594 /**
595  * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
596  *                         propagate the change to descendants
597  * @cgrp: The cgroup which descendants to traverse
598  * @prog: A program to attach
599  * @link: A link to attach
600  * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
601  * @type: Type of attach operation
602  * @flags: Option flags
603  *
604  * Exactly one of @prog or @link can be non-null.
605  * Must be called with cgroup_mutex held.
606  */
607 static int __cgroup_bpf_attach(struct cgroup *cgrp,
608 			       struct bpf_prog *prog, struct bpf_prog *replace_prog,
609 			       struct bpf_cgroup_link *link,
610 			       enum bpf_attach_type type, u32 flags)
611 {
612 	u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
613 	struct bpf_prog *old_prog = NULL;
614 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
615 	struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
616 	struct bpf_prog *new_prog = prog ? : link->link.prog;
617 	enum cgroup_bpf_attach_type atype;
618 	struct bpf_prog_list *pl;
619 	struct hlist_head *progs;
620 	int err;
621 
622 	if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
623 	    ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
624 		/* invalid combination */
625 		return -EINVAL;
626 	if (link && (prog || replace_prog))
627 		/* only either link or prog/replace_prog can be specified */
628 		return -EINVAL;
629 	if (!!replace_prog != !!(flags & BPF_F_REPLACE))
630 		/* replace_prog implies BPF_F_REPLACE, and vice versa */
631 		return -EINVAL;
632 
633 	atype = bpf_cgroup_atype_find(type, new_prog->aux->attach_btf_id);
634 	if (atype < 0)
635 		return -EINVAL;
636 
637 	progs = &cgrp->bpf.progs[atype];
638 
639 	if (!hierarchy_allows_attach(cgrp, atype))
640 		return -EPERM;
641 
642 	if (!hlist_empty(progs) && cgrp->bpf.flags[atype] != saved_flags)
643 		/* Disallow attaching non-overridable on top
644 		 * of existing overridable in this cgroup.
645 		 * Disallow attaching multi-prog if overridable or none
646 		 */
647 		return -EPERM;
648 
649 	if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
650 		return -E2BIG;
651 
652 	pl = find_attach_entry(progs, prog, link, replace_prog,
653 			       flags & BPF_F_ALLOW_MULTI);
654 	if (IS_ERR(pl))
655 		return PTR_ERR(pl);
656 
657 	if (bpf_cgroup_storages_alloc(storage, new_storage, type,
658 				      prog ? : link->link.prog, cgrp))
659 		return -ENOMEM;
660 
661 	if (pl) {
662 		old_prog = pl->prog;
663 	} else {
664 		struct hlist_node *last = NULL;
665 
666 		pl = kmalloc(sizeof(*pl), GFP_KERNEL);
667 		if (!pl) {
668 			bpf_cgroup_storages_free(new_storage);
669 			return -ENOMEM;
670 		}
671 		if (hlist_empty(progs))
672 			hlist_add_head(&pl->node, progs);
673 		else
674 			hlist_for_each(last, progs) {
675 				if (last->next)
676 					continue;
677 				hlist_add_behind(&pl->node, last);
678 				break;
679 			}
680 	}
681 
682 	pl->prog = prog;
683 	pl->link = link;
684 	bpf_cgroup_storages_assign(pl->storage, storage);
685 	cgrp->bpf.flags[atype] = saved_flags;
686 
687 	if (type == BPF_LSM_CGROUP) {
688 		err = bpf_trampoline_link_cgroup_shim(new_prog, atype);
689 		if (err)
690 			goto cleanup;
691 	}
692 
693 	err = update_effective_progs(cgrp, atype);
694 	if (err)
695 		goto cleanup_trampoline;
696 
697 	if (old_prog) {
698 		if (type == BPF_LSM_CGROUP)
699 			bpf_trampoline_unlink_cgroup_shim(old_prog);
700 		bpf_prog_put(old_prog);
701 	} else {
702 		static_branch_inc(&cgroup_bpf_enabled_key[atype]);
703 	}
704 	bpf_cgroup_storages_link(new_storage, cgrp, type);
705 	return 0;
706 
707 cleanup_trampoline:
708 	if (type == BPF_LSM_CGROUP)
709 		bpf_trampoline_unlink_cgroup_shim(new_prog);
710 
711 cleanup:
712 	if (old_prog) {
713 		pl->prog = old_prog;
714 		pl->link = NULL;
715 	}
716 	bpf_cgroup_storages_free(new_storage);
717 	if (!old_prog) {
718 		hlist_del(&pl->node);
719 		kfree(pl);
720 	}
721 	return err;
722 }
723 
724 static int cgroup_bpf_attach(struct cgroup *cgrp,
725 			     struct bpf_prog *prog, struct bpf_prog *replace_prog,
726 			     struct bpf_cgroup_link *link,
727 			     enum bpf_attach_type type,
728 			     u32 flags)
729 {
730 	int ret;
731 
732 	mutex_lock(&cgroup_mutex);
733 	ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags);
734 	mutex_unlock(&cgroup_mutex);
735 	return ret;
736 }
737 
738 /* Swap updated BPF program for given link in effective program arrays across
739  * all descendant cgroups. This function is guaranteed to succeed.
740  */
741 static void replace_effective_prog(struct cgroup *cgrp,
742 				   enum cgroup_bpf_attach_type atype,
743 				   struct bpf_cgroup_link *link)
744 {
745 	struct bpf_prog_array_item *item;
746 	struct cgroup_subsys_state *css;
747 	struct bpf_prog_array *progs;
748 	struct bpf_prog_list *pl;
749 	struct hlist_head *head;
750 	struct cgroup *cg;
751 	int pos;
752 
753 	css_for_each_descendant_pre(css, &cgrp->self) {
754 		struct cgroup *desc = container_of(css, struct cgroup, self);
755 
756 		if (percpu_ref_is_zero(&desc->bpf.refcnt))
757 			continue;
758 
759 		/* find position of link in effective progs array */
760 		for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
761 			if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
762 				continue;
763 
764 			head = &cg->bpf.progs[atype];
765 			hlist_for_each_entry(pl, head, node) {
766 				if (!prog_list_prog(pl))
767 					continue;
768 				if (pl->link == link)
769 					goto found;
770 				pos++;
771 			}
772 		}
773 found:
774 		BUG_ON(!cg);
775 		progs = rcu_dereference_protected(
776 				desc->bpf.effective[atype],
777 				lockdep_is_held(&cgroup_mutex));
778 		item = &progs->items[pos];
779 		WRITE_ONCE(item->prog, link->link.prog);
780 	}
781 }
782 
783 /**
784  * __cgroup_bpf_replace() - Replace link's program and propagate the change
785  *                          to descendants
786  * @cgrp: The cgroup which descendants to traverse
787  * @link: A link for which to replace BPF program
788  * @type: Type of attach operation
789  *
790  * Must be called with cgroup_mutex held.
791  */
792 static int __cgroup_bpf_replace(struct cgroup *cgrp,
793 				struct bpf_cgroup_link *link,
794 				struct bpf_prog *new_prog)
795 {
796 	enum cgroup_bpf_attach_type atype;
797 	struct bpf_prog *old_prog;
798 	struct bpf_prog_list *pl;
799 	struct hlist_head *progs;
800 	bool found = false;
801 
802 	atype = bpf_cgroup_atype_find(link->type, new_prog->aux->attach_btf_id);
803 	if (atype < 0)
804 		return -EINVAL;
805 
806 	progs = &cgrp->bpf.progs[atype];
807 
808 	if (link->link.prog->type != new_prog->type)
809 		return -EINVAL;
810 
811 	hlist_for_each_entry(pl, progs, node) {
812 		if (pl->link == link) {
813 			found = true;
814 			break;
815 		}
816 	}
817 	if (!found)
818 		return -ENOENT;
819 
820 	old_prog = xchg(&link->link.prog, new_prog);
821 	replace_effective_prog(cgrp, atype, link);
822 	bpf_prog_put(old_prog);
823 	return 0;
824 }
825 
826 static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
827 			      struct bpf_prog *old_prog)
828 {
829 	struct bpf_cgroup_link *cg_link;
830 	int ret;
831 
832 	cg_link = container_of(link, struct bpf_cgroup_link, link);
833 
834 	mutex_lock(&cgroup_mutex);
835 	/* link might have been auto-released by dying cgroup, so fail */
836 	if (!cg_link->cgroup) {
837 		ret = -ENOLINK;
838 		goto out_unlock;
839 	}
840 	if (old_prog && link->prog != old_prog) {
841 		ret = -EPERM;
842 		goto out_unlock;
843 	}
844 	ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
845 out_unlock:
846 	mutex_unlock(&cgroup_mutex);
847 	return ret;
848 }
849 
850 static struct bpf_prog_list *find_detach_entry(struct hlist_head *progs,
851 					       struct bpf_prog *prog,
852 					       struct bpf_cgroup_link *link,
853 					       bool allow_multi)
854 {
855 	struct bpf_prog_list *pl;
856 
857 	if (!allow_multi) {
858 		if (hlist_empty(progs))
859 			/* report error when trying to detach and nothing is attached */
860 			return ERR_PTR(-ENOENT);
861 
862 		/* to maintain backward compatibility NONE and OVERRIDE cgroups
863 		 * allow detaching with invalid FD (prog==NULL) in legacy mode
864 		 */
865 		return hlist_entry(progs->first, typeof(*pl), node);
866 	}
867 
868 	if (!prog && !link)
869 		/* to detach MULTI prog the user has to specify valid FD
870 		 * of the program or link to be detached
871 		 */
872 		return ERR_PTR(-EINVAL);
873 
874 	/* find the prog or link and detach it */
875 	hlist_for_each_entry(pl, progs, node) {
876 		if (pl->prog == prog && pl->link == link)
877 			return pl;
878 	}
879 	return ERR_PTR(-ENOENT);
880 }
881 
882 /**
883  * purge_effective_progs() - After compute_effective_progs fails to alloc new
884  *                           cgrp->bpf.inactive table we can recover by
885  *                           recomputing the array in place.
886  *
887  * @cgrp: The cgroup which descendants to travers
888  * @prog: A program to detach or NULL
889  * @link: A link to detach or NULL
890  * @atype: Type of detach operation
891  */
892 static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog,
893 				  struct bpf_cgroup_link *link,
894 				  enum cgroup_bpf_attach_type atype)
895 {
896 	struct cgroup_subsys_state *css;
897 	struct bpf_prog_array *progs;
898 	struct bpf_prog_list *pl;
899 	struct hlist_head *head;
900 	struct cgroup *cg;
901 	int pos;
902 
903 	/* recompute effective prog array in place */
904 	css_for_each_descendant_pre(css, &cgrp->self) {
905 		struct cgroup *desc = container_of(css, struct cgroup, self);
906 
907 		if (percpu_ref_is_zero(&desc->bpf.refcnt))
908 			continue;
909 
910 		/* find position of link or prog in effective progs array */
911 		for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
912 			if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
913 				continue;
914 
915 			head = &cg->bpf.progs[atype];
916 			hlist_for_each_entry(pl, head, node) {
917 				if (!prog_list_prog(pl))
918 					continue;
919 				if (pl->prog == prog && pl->link == link)
920 					goto found;
921 				pos++;
922 			}
923 		}
924 
925 		/* no link or prog match, skip the cgroup of this layer */
926 		continue;
927 found:
928 		progs = rcu_dereference_protected(
929 				desc->bpf.effective[atype],
930 				lockdep_is_held(&cgroup_mutex));
931 
932 		/* Remove the program from the array */
933 		WARN_ONCE(bpf_prog_array_delete_safe_at(progs, pos),
934 			  "Failed to purge a prog from array at index %d", pos);
935 	}
936 }
937 
938 /**
939  * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
940  *                         propagate the change to descendants
941  * @cgrp: The cgroup which descendants to traverse
942  * @prog: A program to detach or NULL
943  * @link: A link to detach or NULL
944  * @type: Type of detach operation
945  *
946  * At most one of @prog or @link can be non-NULL.
947  * Must be called with cgroup_mutex held.
948  */
949 static int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
950 			       struct bpf_cgroup_link *link, enum bpf_attach_type type)
951 {
952 	enum cgroup_bpf_attach_type atype;
953 	struct bpf_prog *old_prog;
954 	struct bpf_prog_list *pl;
955 	struct hlist_head *progs;
956 	u32 attach_btf_id = 0;
957 	u32 flags;
958 
959 	if (prog)
960 		attach_btf_id = prog->aux->attach_btf_id;
961 	if (link)
962 		attach_btf_id = link->link.prog->aux->attach_btf_id;
963 
964 	atype = bpf_cgroup_atype_find(type, attach_btf_id);
965 	if (atype < 0)
966 		return -EINVAL;
967 
968 	progs = &cgrp->bpf.progs[atype];
969 	flags = cgrp->bpf.flags[atype];
970 
971 	if (prog && link)
972 		/* only one of prog or link can be specified */
973 		return -EINVAL;
974 
975 	pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI);
976 	if (IS_ERR(pl))
977 		return PTR_ERR(pl);
978 
979 	/* mark it deleted, so it's ignored while recomputing effective */
980 	old_prog = pl->prog;
981 	pl->prog = NULL;
982 	pl->link = NULL;
983 
984 	if (update_effective_progs(cgrp, atype)) {
985 		/* if update effective array failed replace the prog with a dummy prog*/
986 		pl->prog = old_prog;
987 		pl->link = link;
988 		purge_effective_progs(cgrp, old_prog, link, atype);
989 	}
990 
991 	/* now can actually delete it from this cgroup list */
992 	hlist_del(&pl->node);
993 
994 	kfree(pl);
995 	if (hlist_empty(progs))
996 		/* last program was detached, reset flags to zero */
997 		cgrp->bpf.flags[atype] = 0;
998 	if (old_prog) {
999 		if (type == BPF_LSM_CGROUP)
1000 			bpf_trampoline_unlink_cgroup_shim(old_prog);
1001 		bpf_prog_put(old_prog);
1002 	}
1003 	static_branch_dec(&cgroup_bpf_enabled_key[atype]);
1004 	return 0;
1005 }
1006 
1007 static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
1008 			     enum bpf_attach_type type)
1009 {
1010 	int ret;
1011 
1012 	mutex_lock(&cgroup_mutex);
1013 	ret = __cgroup_bpf_detach(cgrp, prog, NULL, type);
1014 	mutex_unlock(&cgroup_mutex);
1015 	return ret;
1016 }
1017 
1018 /* Must be called with cgroup_mutex held to avoid races. */
1019 static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
1020 			      union bpf_attr __user *uattr)
1021 {
1022 	__u32 __user *prog_attach_flags = u64_to_user_ptr(attr->query.prog_attach_flags);
1023 	__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
1024 	enum bpf_attach_type type = attr->query.attach_type;
1025 	enum cgroup_bpf_attach_type from_atype, to_atype;
1026 	enum cgroup_bpf_attach_type atype;
1027 	struct bpf_prog_array *effective;
1028 	int cnt, ret = 0, i;
1029 	int total_cnt = 0;
1030 	u32 flags;
1031 
1032 	if (type == BPF_LSM_CGROUP) {
1033 		if (attr->query.prog_cnt && prog_ids && !prog_attach_flags)
1034 			return -EINVAL;
1035 
1036 		from_atype = CGROUP_LSM_START;
1037 		to_atype = CGROUP_LSM_END;
1038 		flags = 0;
1039 	} else {
1040 		from_atype = to_cgroup_bpf_attach_type(type);
1041 		if (from_atype < 0)
1042 			return -EINVAL;
1043 		to_atype = from_atype;
1044 		flags = cgrp->bpf.flags[from_atype];
1045 	}
1046 
1047 	for (atype = from_atype; atype <= to_atype; atype++) {
1048 		if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
1049 			effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
1050 							      lockdep_is_held(&cgroup_mutex));
1051 			total_cnt += bpf_prog_array_length(effective);
1052 		} else {
1053 			total_cnt += prog_list_length(&cgrp->bpf.progs[atype]);
1054 		}
1055 	}
1056 
1057 	if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
1058 		return -EFAULT;
1059 	if (copy_to_user(&uattr->query.prog_cnt, &total_cnt, sizeof(total_cnt)))
1060 		return -EFAULT;
1061 	if (attr->query.prog_cnt == 0 || !prog_ids || !total_cnt)
1062 		/* return early if user requested only program count + flags */
1063 		return 0;
1064 
1065 	if (attr->query.prog_cnt < total_cnt) {
1066 		total_cnt = attr->query.prog_cnt;
1067 		ret = -ENOSPC;
1068 	}
1069 
1070 	for (atype = from_atype; atype <= to_atype && total_cnt; atype++) {
1071 		if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
1072 			effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
1073 							      lockdep_is_held(&cgroup_mutex));
1074 			cnt = min_t(int, bpf_prog_array_length(effective), total_cnt);
1075 			ret = bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
1076 		} else {
1077 			struct hlist_head *progs;
1078 			struct bpf_prog_list *pl;
1079 			struct bpf_prog *prog;
1080 			u32 id;
1081 
1082 			progs = &cgrp->bpf.progs[atype];
1083 			cnt = min_t(int, prog_list_length(progs), total_cnt);
1084 			i = 0;
1085 			hlist_for_each_entry(pl, progs, node) {
1086 				prog = prog_list_prog(pl);
1087 				id = prog->aux->id;
1088 				if (copy_to_user(prog_ids + i, &id, sizeof(id)))
1089 					return -EFAULT;
1090 				if (++i == cnt)
1091 					break;
1092 			}
1093 		}
1094 
1095 		if (prog_attach_flags) {
1096 			flags = cgrp->bpf.flags[atype];
1097 
1098 			for (i = 0; i < cnt; i++)
1099 				if (copy_to_user(prog_attach_flags + i, &flags, sizeof(flags)))
1100 					return -EFAULT;
1101 			prog_attach_flags += cnt;
1102 		}
1103 
1104 		prog_ids += cnt;
1105 		total_cnt -= cnt;
1106 	}
1107 	return ret;
1108 }
1109 
1110 static int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
1111 			    union bpf_attr __user *uattr)
1112 {
1113 	int ret;
1114 
1115 	mutex_lock(&cgroup_mutex);
1116 	ret = __cgroup_bpf_query(cgrp, attr, uattr);
1117 	mutex_unlock(&cgroup_mutex);
1118 	return ret;
1119 }
1120 
1121 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
1122 			   enum bpf_prog_type ptype, struct bpf_prog *prog)
1123 {
1124 	struct bpf_prog *replace_prog = NULL;
1125 	struct cgroup *cgrp;
1126 	int ret;
1127 
1128 	cgrp = cgroup_get_from_fd(attr->target_fd);
1129 	if (IS_ERR(cgrp))
1130 		return PTR_ERR(cgrp);
1131 
1132 	if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
1133 	    (attr->attach_flags & BPF_F_REPLACE)) {
1134 		replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
1135 		if (IS_ERR(replace_prog)) {
1136 			cgroup_put(cgrp);
1137 			return PTR_ERR(replace_prog);
1138 		}
1139 	}
1140 
1141 	ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL,
1142 				attr->attach_type, attr->attach_flags);
1143 
1144 	if (replace_prog)
1145 		bpf_prog_put(replace_prog);
1146 	cgroup_put(cgrp);
1147 	return ret;
1148 }
1149 
1150 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
1151 {
1152 	struct bpf_prog *prog;
1153 	struct cgroup *cgrp;
1154 	int ret;
1155 
1156 	cgrp = cgroup_get_from_fd(attr->target_fd);
1157 	if (IS_ERR(cgrp))
1158 		return PTR_ERR(cgrp);
1159 
1160 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1161 	if (IS_ERR(prog))
1162 		prog = NULL;
1163 
1164 	ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type);
1165 	if (prog)
1166 		bpf_prog_put(prog);
1167 
1168 	cgroup_put(cgrp);
1169 	return ret;
1170 }
1171 
1172 static void bpf_cgroup_link_release(struct bpf_link *link)
1173 {
1174 	struct bpf_cgroup_link *cg_link =
1175 		container_of(link, struct bpf_cgroup_link, link);
1176 	struct cgroup *cg;
1177 
1178 	/* link might have been auto-detached by dying cgroup already,
1179 	 * in that case our work is done here
1180 	 */
1181 	if (!cg_link->cgroup)
1182 		return;
1183 
1184 	mutex_lock(&cgroup_mutex);
1185 
1186 	/* re-check cgroup under lock again */
1187 	if (!cg_link->cgroup) {
1188 		mutex_unlock(&cgroup_mutex);
1189 		return;
1190 	}
1191 
1192 	WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
1193 				    cg_link->type));
1194 	if (cg_link->type == BPF_LSM_CGROUP)
1195 		bpf_trampoline_unlink_cgroup_shim(cg_link->link.prog);
1196 
1197 	cg = cg_link->cgroup;
1198 	cg_link->cgroup = NULL;
1199 
1200 	mutex_unlock(&cgroup_mutex);
1201 
1202 	cgroup_put(cg);
1203 }
1204 
1205 static void bpf_cgroup_link_dealloc(struct bpf_link *link)
1206 {
1207 	struct bpf_cgroup_link *cg_link =
1208 		container_of(link, struct bpf_cgroup_link, link);
1209 
1210 	kfree(cg_link);
1211 }
1212 
1213 static int bpf_cgroup_link_detach(struct bpf_link *link)
1214 {
1215 	bpf_cgroup_link_release(link);
1216 
1217 	return 0;
1218 }
1219 
1220 static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
1221 					struct seq_file *seq)
1222 {
1223 	struct bpf_cgroup_link *cg_link =
1224 		container_of(link, struct bpf_cgroup_link, link);
1225 	u64 cg_id = 0;
1226 
1227 	mutex_lock(&cgroup_mutex);
1228 	if (cg_link->cgroup)
1229 		cg_id = cgroup_id(cg_link->cgroup);
1230 	mutex_unlock(&cgroup_mutex);
1231 
1232 	seq_printf(seq,
1233 		   "cgroup_id:\t%llu\n"
1234 		   "attach_type:\t%d\n",
1235 		   cg_id,
1236 		   cg_link->type);
1237 }
1238 
1239 static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
1240 					  struct bpf_link_info *info)
1241 {
1242 	struct bpf_cgroup_link *cg_link =
1243 		container_of(link, struct bpf_cgroup_link, link);
1244 	u64 cg_id = 0;
1245 
1246 	mutex_lock(&cgroup_mutex);
1247 	if (cg_link->cgroup)
1248 		cg_id = cgroup_id(cg_link->cgroup);
1249 	mutex_unlock(&cgroup_mutex);
1250 
1251 	info->cgroup.cgroup_id = cg_id;
1252 	info->cgroup.attach_type = cg_link->type;
1253 	return 0;
1254 }
1255 
1256 static const struct bpf_link_ops bpf_cgroup_link_lops = {
1257 	.release = bpf_cgroup_link_release,
1258 	.dealloc = bpf_cgroup_link_dealloc,
1259 	.detach = bpf_cgroup_link_detach,
1260 	.update_prog = cgroup_bpf_replace,
1261 	.show_fdinfo = bpf_cgroup_link_show_fdinfo,
1262 	.fill_link_info = bpf_cgroup_link_fill_link_info,
1263 };
1264 
1265 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
1266 {
1267 	struct bpf_link_primer link_primer;
1268 	struct bpf_cgroup_link *link;
1269 	struct cgroup *cgrp;
1270 	int err;
1271 
1272 	if (attr->link_create.flags)
1273 		return -EINVAL;
1274 
1275 	cgrp = cgroup_get_from_fd(attr->link_create.target_fd);
1276 	if (IS_ERR(cgrp))
1277 		return PTR_ERR(cgrp);
1278 
1279 	link = kzalloc(sizeof(*link), GFP_USER);
1280 	if (!link) {
1281 		err = -ENOMEM;
1282 		goto out_put_cgroup;
1283 	}
1284 	bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops,
1285 		      prog);
1286 	link->cgroup = cgrp;
1287 	link->type = attr->link_create.attach_type;
1288 
1289 	err = bpf_link_prime(&link->link, &link_primer);
1290 	if (err) {
1291 		kfree(link);
1292 		goto out_put_cgroup;
1293 	}
1294 
1295 	err = cgroup_bpf_attach(cgrp, NULL, NULL, link,
1296 				link->type, BPF_F_ALLOW_MULTI);
1297 	if (err) {
1298 		bpf_link_cleanup(&link_primer);
1299 		goto out_put_cgroup;
1300 	}
1301 
1302 	return bpf_link_settle(&link_primer);
1303 
1304 out_put_cgroup:
1305 	cgroup_put(cgrp);
1306 	return err;
1307 }
1308 
1309 int cgroup_bpf_prog_query(const union bpf_attr *attr,
1310 			  union bpf_attr __user *uattr)
1311 {
1312 	struct cgroup *cgrp;
1313 	int ret;
1314 
1315 	cgrp = cgroup_get_from_fd(attr->query.target_fd);
1316 	if (IS_ERR(cgrp))
1317 		return PTR_ERR(cgrp);
1318 
1319 	ret = cgroup_bpf_query(cgrp, attr, uattr);
1320 
1321 	cgroup_put(cgrp);
1322 	return ret;
1323 }
1324 
1325 /**
1326  * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
1327  * @sk: The socket sending or receiving traffic
1328  * @skb: The skb that is being sent or received
1329  * @type: The type of program to be executed
1330  *
1331  * If no socket is passed, or the socket is not of type INET or INET6,
1332  * this function does nothing and returns 0.
1333  *
1334  * The program type passed in via @type must be suitable for network
1335  * filtering. No further check is performed to assert that.
1336  *
1337  * For egress packets, this function can return:
1338  *   NET_XMIT_SUCCESS    (0)	- continue with packet output
1339  *   NET_XMIT_DROP       (1)	- drop packet and notify TCP to call cwr
1340  *   NET_XMIT_CN         (2)	- continue with packet output and notify TCP
1341  *				  to call cwr
1342  *   -err			- drop packet
1343  *
1344  * For ingress packets, this function will return -EPERM if any
1345  * attached program was found and if it returned != 1 during execution.
1346  * Otherwise 0 is returned.
1347  */
1348 int __cgroup_bpf_run_filter_skb(struct sock *sk,
1349 				struct sk_buff *skb,
1350 				enum cgroup_bpf_attach_type atype)
1351 {
1352 	unsigned int offset = skb->data - skb_network_header(skb);
1353 	struct sock *save_sk;
1354 	void *saved_data_end;
1355 	struct cgroup *cgrp;
1356 	int ret;
1357 
1358 	if (!sk || !sk_fullsock(sk))
1359 		return 0;
1360 
1361 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1362 		return 0;
1363 
1364 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1365 	save_sk = skb->sk;
1366 	skb->sk = sk;
1367 	__skb_push(skb, offset);
1368 
1369 	/* compute pointers for the bpf prog */
1370 	bpf_compute_and_save_data_end(skb, &saved_data_end);
1371 
1372 	if (atype == CGROUP_INET_EGRESS) {
1373 		u32 flags = 0;
1374 		bool cn;
1375 
1376 		ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, skb,
1377 					    __bpf_prog_run_save_cb, 0, &flags);
1378 
1379 		/* Return values of CGROUP EGRESS BPF programs are:
1380 		 *   0: drop packet
1381 		 *   1: keep packet
1382 		 *   2: drop packet and cn
1383 		 *   3: keep packet and cn
1384 		 *
1385 		 * The returned value is then converted to one of the NET_XMIT
1386 		 * or an error code that is then interpreted as drop packet
1387 		 * (and no cn):
1388 		 *   0: NET_XMIT_SUCCESS  skb should be transmitted
1389 		 *   1: NET_XMIT_DROP     skb should be dropped and cn
1390 		 *   2: NET_XMIT_CN       skb should be transmitted and cn
1391 		 *   3: -err              skb should be dropped
1392 		 */
1393 
1394 		cn = flags & BPF_RET_SET_CN;
1395 		if (ret && !IS_ERR_VALUE((long)ret))
1396 			ret = -EFAULT;
1397 		if (!ret)
1398 			ret = (cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);
1399 		else
1400 			ret = (cn ? NET_XMIT_DROP : ret);
1401 	} else {
1402 		ret = bpf_prog_run_array_cg(&cgrp->bpf, atype,
1403 					    skb, __bpf_prog_run_save_cb, 0,
1404 					    NULL);
1405 		if (ret && !IS_ERR_VALUE((long)ret))
1406 			ret = -EFAULT;
1407 	}
1408 	bpf_restore_data_end(skb, saved_data_end);
1409 	__skb_pull(skb, offset);
1410 	skb->sk = save_sk;
1411 
1412 	return ret;
1413 }
1414 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
1415 
1416 /**
1417  * __cgroup_bpf_run_filter_sk() - Run a program on a sock
1418  * @sk: sock structure to manipulate
1419  * @type: The type of program to be executed
1420  *
1421  * socket is passed is expected to be of type INET or INET6.
1422  *
1423  * The program type passed in via @type must be suitable for sock
1424  * filtering. No further check is performed to assert that.
1425  *
1426  * This function will return %-EPERM if any if an attached program was found
1427  * and if it returned != 1 during execution. In all other cases, 0 is returned.
1428  */
1429 int __cgroup_bpf_run_filter_sk(struct sock *sk,
1430 			       enum cgroup_bpf_attach_type atype)
1431 {
1432 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1433 
1434 	return bpf_prog_run_array_cg(&cgrp->bpf, atype, sk, bpf_prog_run, 0,
1435 				     NULL);
1436 }
1437 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
1438 
1439 /**
1440  * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
1441  *                                       provided by user sockaddr
1442  * @sk: sock struct that will use sockaddr
1443  * @uaddr: sockaddr struct provided by user
1444  * @type: The type of program to be executed
1445  * @t_ctx: Pointer to attach type specific context
1446  * @flags: Pointer to u32 which contains higher bits of BPF program
1447  *         return value (OR'ed together).
1448  *
1449  * socket is expected to be of type INET or INET6.
1450  *
1451  * This function will return %-EPERM if an attached program is found and
1452  * returned value != 1 during execution. In all other cases, 0 is returned.
1453  */
1454 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
1455 				      struct sockaddr *uaddr,
1456 				      enum cgroup_bpf_attach_type atype,
1457 				      void *t_ctx,
1458 				      u32 *flags)
1459 {
1460 	struct bpf_sock_addr_kern ctx = {
1461 		.sk = sk,
1462 		.uaddr = uaddr,
1463 		.t_ctx = t_ctx,
1464 	};
1465 	struct sockaddr_storage unspec;
1466 	struct cgroup *cgrp;
1467 
1468 	/* Check socket family since not all sockets represent network
1469 	 * endpoint (e.g. AF_UNIX).
1470 	 */
1471 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1472 		return 0;
1473 
1474 	if (!ctx.uaddr) {
1475 		memset(&unspec, 0, sizeof(unspec));
1476 		ctx.uaddr = (struct sockaddr *)&unspec;
1477 	}
1478 
1479 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1480 	return bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run,
1481 				     0, flags);
1482 }
1483 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
1484 
1485 /**
1486  * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
1487  * @sk: socket to get cgroup from
1488  * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
1489  * sk with connection information (IP addresses, etc.) May not contain
1490  * cgroup info if it is a req sock.
1491  * @type: The type of program to be executed
1492  *
1493  * socket passed is expected to be of type INET or INET6.
1494  *
1495  * The program type passed in via @type must be suitable for sock_ops
1496  * filtering. No further check is performed to assert that.
1497  *
1498  * This function will return %-EPERM if any if an attached program was found
1499  * and if it returned != 1 during execution. In all other cases, 0 is returned.
1500  */
1501 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
1502 				     struct bpf_sock_ops_kern *sock_ops,
1503 				     enum cgroup_bpf_attach_type atype)
1504 {
1505 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1506 
1507 	return bpf_prog_run_array_cg(&cgrp->bpf, atype, sock_ops, bpf_prog_run,
1508 				     0, NULL);
1509 }
1510 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
1511 
1512 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
1513 				      short access, enum cgroup_bpf_attach_type atype)
1514 {
1515 	struct cgroup *cgrp;
1516 	struct bpf_cgroup_dev_ctx ctx = {
1517 		.access_type = (access << 16) | dev_type,
1518 		.major = major,
1519 		.minor = minor,
1520 	};
1521 	int ret;
1522 
1523 	rcu_read_lock();
1524 	cgrp = task_dfl_cgroup(current);
1525 	ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
1526 				    NULL);
1527 	rcu_read_unlock();
1528 
1529 	return ret;
1530 }
1531 
1532 BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
1533 {
1534 	/* flags argument is not used now,
1535 	 * but provides an ability to extend the API.
1536 	 * verifier checks that its value is correct.
1537 	 */
1538 	enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
1539 	struct bpf_cgroup_storage *storage;
1540 	struct bpf_cg_run_ctx *ctx;
1541 	void *ptr;
1542 
1543 	/* get current cgroup storage from BPF run context */
1544 	ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1545 	storage = ctx->prog_item->cgroup_storage[stype];
1546 
1547 	if (stype == BPF_CGROUP_STORAGE_SHARED)
1548 		ptr = &READ_ONCE(storage->buf)->data[0];
1549 	else
1550 		ptr = this_cpu_ptr(storage->percpu_buf);
1551 
1552 	return (unsigned long)ptr;
1553 }
1554 
1555 const struct bpf_func_proto bpf_get_local_storage_proto = {
1556 	.func		= bpf_get_local_storage,
1557 	.gpl_only	= false,
1558 	.ret_type	= RET_PTR_TO_MAP_VALUE,
1559 	.arg1_type	= ARG_CONST_MAP_PTR,
1560 	.arg2_type	= ARG_ANYTHING,
1561 };
1562 
1563 BPF_CALL_0(bpf_get_retval)
1564 {
1565 	struct bpf_cg_run_ctx *ctx =
1566 		container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1567 
1568 	return ctx->retval;
1569 }
1570 
1571 const struct bpf_func_proto bpf_get_retval_proto = {
1572 	.func		= bpf_get_retval,
1573 	.gpl_only	= false,
1574 	.ret_type	= RET_INTEGER,
1575 };
1576 
1577 BPF_CALL_1(bpf_set_retval, int, retval)
1578 {
1579 	struct bpf_cg_run_ctx *ctx =
1580 		container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1581 
1582 	ctx->retval = retval;
1583 	return 0;
1584 }
1585 
1586 const struct bpf_func_proto bpf_set_retval_proto = {
1587 	.func		= bpf_set_retval,
1588 	.gpl_only	= false,
1589 	.ret_type	= RET_INTEGER,
1590 	.arg1_type	= ARG_ANYTHING,
1591 };
1592 
1593 static const struct bpf_func_proto *
1594 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1595 {
1596 	const struct bpf_func_proto *func_proto;
1597 
1598 	func_proto = cgroup_common_func_proto(func_id, prog);
1599 	if (func_proto)
1600 		return func_proto;
1601 
1602 	func_proto = cgroup_current_func_proto(func_id, prog);
1603 	if (func_proto)
1604 		return func_proto;
1605 
1606 	switch (func_id) {
1607 	case BPF_FUNC_perf_event_output:
1608 		return &bpf_event_output_data_proto;
1609 	default:
1610 		return bpf_base_func_proto(func_id);
1611 	}
1612 }
1613 
1614 static bool cgroup_dev_is_valid_access(int off, int size,
1615 				       enum bpf_access_type type,
1616 				       const struct bpf_prog *prog,
1617 				       struct bpf_insn_access_aux *info)
1618 {
1619 	const int size_default = sizeof(__u32);
1620 
1621 	if (type == BPF_WRITE)
1622 		return false;
1623 
1624 	if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
1625 		return false;
1626 	/* The verifier guarantees that size > 0. */
1627 	if (off % size != 0)
1628 		return false;
1629 
1630 	switch (off) {
1631 	case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
1632 		bpf_ctx_record_field_size(info, size_default);
1633 		if (!bpf_ctx_narrow_access_ok(off, size, size_default))
1634 			return false;
1635 		break;
1636 	default:
1637 		if (size != size_default)
1638 			return false;
1639 	}
1640 
1641 	return true;
1642 }
1643 
1644 const struct bpf_prog_ops cg_dev_prog_ops = {
1645 };
1646 
1647 const struct bpf_verifier_ops cg_dev_verifier_ops = {
1648 	.get_func_proto		= cgroup_dev_func_proto,
1649 	.is_valid_access	= cgroup_dev_is_valid_access,
1650 };
1651 
1652 /**
1653  * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
1654  *
1655  * @head: sysctl table header
1656  * @table: sysctl table
1657  * @write: sysctl is being read (= 0) or written (= 1)
1658  * @buf: pointer to buffer (in and out)
1659  * @pcount: value-result argument: value is size of buffer pointed to by @buf,
1660  *	result is size of @new_buf if program set new value, initial value
1661  *	otherwise
1662  * @ppos: value-result argument: value is position at which read from or write
1663  *	to sysctl is happening, result is new position if program overrode it,
1664  *	initial value otherwise
1665  * @type: type of program to be executed
1666  *
1667  * Program is run when sysctl is being accessed, either read or written, and
1668  * can allow or deny such access.
1669  *
1670  * This function will return %-EPERM if an attached program is found and
1671  * returned value != 1 during execution. In all other cases 0 is returned.
1672  */
1673 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
1674 				   struct ctl_table *table, int write,
1675 				   char **buf, size_t *pcount, loff_t *ppos,
1676 				   enum cgroup_bpf_attach_type atype)
1677 {
1678 	struct bpf_sysctl_kern ctx = {
1679 		.head = head,
1680 		.table = table,
1681 		.write = write,
1682 		.ppos = ppos,
1683 		.cur_val = NULL,
1684 		.cur_len = PAGE_SIZE,
1685 		.new_val = NULL,
1686 		.new_len = 0,
1687 		.new_updated = 0,
1688 	};
1689 	struct cgroup *cgrp;
1690 	loff_t pos = 0;
1691 	int ret;
1692 
1693 	ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
1694 	if (!ctx.cur_val ||
1695 	    table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) {
1696 		/* Let BPF program decide how to proceed. */
1697 		ctx.cur_len = 0;
1698 	}
1699 
1700 	if (write && *buf && *pcount) {
1701 		/* BPF program should be able to override new value with a
1702 		 * buffer bigger than provided by user.
1703 		 */
1704 		ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
1705 		ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
1706 		if (ctx.new_val) {
1707 			memcpy(ctx.new_val, *buf, ctx.new_len);
1708 		} else {
1709 			/* Let BPF program decide how to proceed. */
1710 			ctx.new_len = 0;
1711 		}
1712 	}
1713 
1714 	rcu_read_lock();
1715 	cgrp = task_dfl_cgroup(current);
1716 	ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
1717 				    NULL);
1718 	rcu_read_unlock();
1719 
1720 	kfree(ctx.cur_val);
1721 
1722 	if (ret == 1 && ctx.new_updated) {
1723 		kfree(*buf);
1724 		*buf = ctx.new_val;
1725 		*pcount = ctx.new_len;
1726 	} else {
1727 		kfree(ctx.new_val);
1728 	}
1729 
1730 	return ret;
1731 }
1732 
1733 #ifdef CONFIG_NET
1734 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen,
1735 			     struct bpf_sockopt_buf *buf)
1736 {
1737 	if (unlikely(max_optlen < 0))
1738 		return -EINVAL;
1739 
1740 	if (unlikely(max_optlen > PAGE_SIZE)) {
1741 		/* We don't expose optvals that are greater than PAGE_SIZE
1742 		 * to the BPF program.
1743 		 */
1744 		max_optlen = PAGE_SIZE;
1745 	}
1746 
1747 	if (max_optlen <= sizeof(buf->data)) {
1748 		/* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE
1749 		 * bytes avoid the cost of kzalloc.
1750 		 */
1751 		ctx->optval = buf->data;
1752 		ctx->optval_end = ctx->optval + max_optlen;
1753 		return max_optlen;
1754 	}
1755 
1756 	ctx->optval = kzalloc(max_optlen, GFP_USER);
1757 	if (!ctx->optval)
1758 		return -ENOMEM;
1759 
1760 	ctx->optval_end = ctx->optval + max_optlen;
1761 
1762 	return max_optlen;
1763 }
1764 
1765 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx,
1766 			     struct bpf_sockopt_buf *buf)
1767 {
1768 	if (ctx->optval == buf->data)
1769 		return;
1770 	kfree(ctx->optval);
1771 }
1772 
1773 static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx,
1774 				  struct bpf_sockopt_buf *buf)
1775 {
1776 	return ctx->optval != buf->data;
1777 }
1778 
1779 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
1780 				       int *optname, char __user *optval,
1781 				       int *optlen, char **kernel_optval)
1782 {
1783 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1784 	struct bpf_sockopt_buf buf = {};
1785 	struct bpf_sockopt_kern ctx = {
1786 		.sk = sk,
1787 		.level = *level,
1788 		.optname = *optname,
1789 	};
1790 	int ret, max_optlen;
1791 
1792 	/* Allocate a bit more than the initial user buffer for
1793 	 * BPF program. The canonical use case is overriding
1794 	 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
1795 	 */
1796 	max_optlen = max_t(int, 16, *optlen);
1797 	max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
1798 	if (max_optlen < 0)
1799 		return max_optlen;
1800 
1801 	ctx.optlen = *optlen;
1802 
1803 	if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
1804 		ret = -EFAULT;
1805 		goto out;
1806 	}
1807 
1808 	lock_sock(sk);
1809 	ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_SETSOCKOPT,
1810 				    &ctx, bpf_prog_run, 0, NULL);
1811 	release_sock(sk);
1812 
1813 	if (ret)
1814 		goto out;
1815 
1816 	if (ctx.optlen == -1) {
1817 		/* optlen set to -1, bypass kernel */
1818 		ret = 1;
1819 	} else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
1820 		/* optlen is out of bounds */
1821 		ret = -EFAULT;
1822 	} else {
1823 		/* optlen within bounds, run kernel handler */
1824 		ret = 0;
1825 
1826 		/* export any potential modifications */
1827 		*level = ctx.level;
1828 		*optname = ctx.optname;
1829 
1830 		/* optlen == 0 from BPF indicates that we should
1831 		 * use original userspace data.
1832 		 */
1833 		if (ctx.optlen != 0) {
1834 			*optlen = ctx.optlen;
1835 			/* We've used bpf_sockopt_kern->buf as an intermediary
1836 			 * storage, but the BPF program indicates that we need
1837 			 * to pass this data to the kernel setsockopt handler.
1838 			 * No way to export on-stack buf, have to allocate a
1839 			 * new buffer.
1840 			 */
1841 			if (!sockopt_buf_allocated(&ctx, &buf)) {
1842 				void *p = kmalloc(ctx.optlen, GFP_USER);
1843 
1844 				if (!p) {
1845 					ret = -ENOMEM;
1846 					goto out;
1847 				}
1848 				memcpy(p, ctx.optval, ctx.optlen);
1849 				*kernel_optval = p;
1850 			} else {
1851 				*kernel_optval = ctx.optval;
1852 			}
1853 			/* export and don't free sockopt buf */
1854 			return 0;
1855 		}
1856 	}
1857 
1858 out:
1859 	sockopt_free_buf(&ctx, &buf);
1860 	return ret;
1861 }
1862 
1863 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
1864 				       int optname, char __user *optval,
1865 				       int __user *optlen, int max_optlen,
1866 				       int retval)
1867 {
1868 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1869 	struct bpf_sockopt_buf buf = {};
1870 	struct bpf_sockopt_kern ctx = {
1871 		.sk = sk,
1872 		.level = level,
1873 		.optname = optname,
1874 		.current_task = current,
1875 	};
1876 	int ret;
1877 
1878 	ctx.optlen = max_optlen;
1879 	max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
1880 	if (max_optlen < 0)
1881 		return max_optlen;
1882 
1883 	if (!retval) {
1884 		/* If kernel getsockopt finished successfully,
1885 		 * copy whatever was returned to the user back
1886 		 * into our temporary buffer. Set optlen to the
1887 		 * one that kernel returned as well to let
1888 		 * BPF programs inspect the value.
1889 		 */
1890 
1891 		if (get_user(ctx.optlen, optlen)) {
1892 			ret = -EFAULT;
1893 			goto out;
1894 		}
1895 
1896 		if (ctx.optlen < 0) {
1897 			ret = -EFAULT;
1898 			goto out;
1899 		}
1900 
1901 		if (copy_from_user(ctx.optval, optval,
1902 				   min(ctx.optlen, max_optlen)) != 0) {
1903 			ret = -EFAULT;
1904 			goto out;
1905 		}
1906 	}
1907 
1908 	lock_sock(sk);
1909 	ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
1910 				    &ctx, bpf_prog_run, retval, NULL);
1911 	release_sock(sk);
1912 
1913 	if (ret < 0)
1914 		goto out;
1915 
1916 	if (ctx.optlen > max_optlen || ctx.optlen < 0) {
1917 		ret = -EFAULT;
1918 		goto out;
1919 	}
1920 
1921 	if (ctx.optlen != 0) {
1922 		if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
1923 		    put_user(ctx.optlen, optlen)) {
1924 			ret = -EFAULT;
1925 			goto out;
1926 		}
1927 	}
1928 
1929 out:
1930 	sockopt_free_buf(&ctx, &buf);
1931 	return ret;
1932 }
1933 
1934 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
1935 					    int optname, void *optval,
1936 					    int *optlen, int retval)
1937 {
1938 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1939 	struct bpf_sockopt_kern ctx = {
1940 		.sk = sk,
1941 		.level = level,
1942 		.optname = optname,
1943 		.optlen = *optlen,
1944 		.optval = optval,
1945 		.optval_end = optval + *optlen,
1946 		.current_task = current,
1947 	};
1948 	int ret;
1949 
1950 	/* Note that __cgroup_bpf_run_filter_getsockopt doesn't copy
1951 	 * user data back into BPF buffer when reval != 0. This is
1952 	 * done as an optimization to avoid extra copy, assuming
1953 	 * kernel won't populate the data in case of an error.
1954 	 * Here we always pass the data and memset() should
1955 	 * be called if that data shouldn't be "exported".
1956 	 */
1957 
1958 	ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
1959 				    &ctx, bpf_prog_run, retval, NULL);
1960 	if (ret < 0)
1961 		return ret;
1962 
1963 	if (ctx.optlen > *optlen)
1964 		return -EFAULT;
1965 
1966 	/* BPF programs can shrink the buffer, export the modifications.
1967 	 */
1968 	if (ctx.optlen != 0)
1969 		*optlen = ctx.optlen;
1970 
1971 	return ret;
1972 }
1973 #endif
1974 
1975 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
1976 			      size_t *lenp)
1977 {
1978 	ssize_t tmp_ret = 0, ret;
1979 
1980 	if (dir->header.parent) {
1981 		tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
1982 		if (tmp_ret < 0)
1983 			return tmp_ret;
1984 	}
1985 
1986 	ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
1987 	if (ret < 0)
1988 		return ret;
1989 	*bufp += ret;
1990 	*lenp -= ret;
1991 	ret += tmp_ret;
1992 
1993 	/* Avoid leading slash. */
1994 	if (!ret)
1995 		return ret;
1996 
1997 	tmp_ret = strscpy(*bufp, "/", *lenp);
1998 	if (tmp_ret < 0)
1999 		return tmp_ret;
2000 	*bufp += tmp_ret;
2001 	*lenp -= tmp_ret;
2002 
2003 	return ret + tmp_ret;
2004 }
2005 
2006 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
2007 	   size_t, buf_len, u64, flags)
2008 {
2009 	ssize_t tmp_ret = 0, ret;
2010 
2011 	if (!buf)
2012 		return -EINVAL;
2013 
2014 	if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
2015 		if (!ctx->head)
2016 			return -EINVAL;
2017 		tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
2018 		if (tmp_ret < 0)
2019 			return tmp_ret;
2020 	}
2021 
2022 	ret = strscpy(buf, ctx->table->procname, buf_len);
2023 
2024 	return ret < 0 ? ret : tmp_ret + ret;
2025 }
2026 
2027 static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
2028 	.func		= bpf_sysctl_get_name,
2029 	.gpl_only	= false,
2030 	.ret_type	= RET_INTEGER,
2031 	.arg1_type	= ARG_PTR_TO_CTX,
2032 	.arg2_type	= ARG_PTR_TO_MEM,
2033 	.arg3_type	= ARG_CONST_SIZE,
2034 	.arg4_type	= ARG_ANYTHING,
2035 };
2036 
2037 static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
2038 			     size_t src_len)
2039 {
2040 	if (!dst)
2041 		return -EINVAL;
2042 
2043 	if (!dst_len)
2044 		return -E2BIG;
2045 
2046 	if (!src || !src_len) {
2047 		memset(dst, 0, dst_len);
2048 		return -EINVAL;
2049 	}
2050 
2051 	memcpy(dst, src, min(dst_len, src_len));
2052 
2053 	if (dst_len > src_len) {
2054 		memset(dst + src_len, '\0', dst_len - src_len);
2055 		return src_len;
2056 	}
2057 
2058 	dst[dst_len - 1] = '\0';
2059 
2060 	return -E2BIG;
2061 }
2062 
2063 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
2064 	   char *, buf, size_t, buf_len)
2065 {
2066 	return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
2067 }
2068 
2069 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
2070 	.func		= bpf_sysctl_get_current_value,
2071 	.gpl_only	= false,
2072 	.ret_type	= RET_INTEGER,
2073 	.arg1_type	= ARG_PTR_TO_CTX,
2074 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
2075 	.arg3_type	= ARG_CONST_SIZE,
2076 };
2077 
2078 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
2079 	   size_t, buf_len)
2080 {
2081 	if (!ctx->write) {
2082 		if (buf && buf_len)
2083 			memset(buf, '\0', buf_len);
2084 		return -EINVAL;
2085 	}
2086 	return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
2087 }
2088 
2089 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
2090 	.func		= bpf_sysctl_get_new_value,
2091 	.gpl_only	= false,
2092 	.ret_type	= RET_INTEGER,
2093 	.arg1_type	= ARG_PTR_TO_CTX,
2094 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
2095 	.arg3_type	= ARG_CONST_SIZE,
2096 };
2097 
2098 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
2099 	   const char *, buf, size_t, buf_len)
2100 {
2101 	if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
2102 		return -EINVAL;
2103 
2104 	if (buf_len > PAGE_SIZE - 1)
2105 		return -E2BIG;
2106 
2107 	memcpy(ctx->new_val, buf, buf_len);
2108 	ctx->new_len = buf_len;
2109 	ctx->new_updated = 1;
2110 
2111 	return 0;
2112 }
2113 
2114 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
2115 	.func		= bpf_sysctl_set_new_value,
2116 	.gpl_only	= false,
2117 	.ret_type	= RET_INTEGER,
2118 	.arg1_type	= ARG_PTR_TO_CTX,
2119 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
2120 	.arg3_type	= ARG_CONST_SIZE,
2121 };
2122 
2123 static const struct bpf_func_proto *
2124 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2125 {
2126 	const struct bpf_func_proto *func_proto;
2127 
2128 	func_proto = cgroup_common_func_proto(func_id, prog);
2129 	if (func_proto)
2130 		return func_proto;
2131 
2132 	func_proto = cgroup_current_func_proto(func_id, prog);
2133 	if (func_proto)
2134 		return func_proto;
2135 
2136 	switch (func_id) {
2137 	case BPF_FUNC_sysctl_get_name:
2138 		return &bpf_sysctl_get_name_proto;
2139 	case BPF_FUNC_sysctl_get_current_value:
2140 		return &bpf_sysctl_get_current_value_proto;
2141 	case BPF_FUNC_sysctl_get_new_value:
2142 		return &bpf_sysctl_get_new_value_proto;
2143 	case BPF_FUNC_sysctl_set_new_value:
2144 		return &bpf_sysctl_set_new_value_proto;
2145 	case BPF_FUNC_ktime_get_coarse_ns:
2146 		return &bpf_ktime_get_coarse_ns_proto;
2147 	case BPF_FUNC_perf_event_output:
2148 		return &bpf_event_output_data_proto;
2149 	default:
2150 		return bpf_base_func_proto(func_id);
2151 	}
2152 }
2153 
2154 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
2155 				   const struct bpf_prog *prog,
2156 				   struct bpf_insn_access_aux *info)
2157 {
2158 	const int size_default = sizeof(__u32);
2159 
2160 	if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
2161 		return false;
2162 
2163 	switch (off) {
2164 	case bpf_ctx_range(struct bpf_sysctl, write):
2165 		if (type != BPF_READ)
2166 			return false;
2167 		bpf_ctx_record_field_size(info, size_default);
2168 		return bpf_ctx_narrow_access_ok(off, size, size_default);
2169 	case bpf_ctx_range(struct bpf_sysctl, file_pos):
2170 		if (type == BPF_READ) {
2171 			bpf_ctx_record_field_size(info, size_default);
2172 			return bpf_ctx_narrow_access_ok(off, size, size_default);
2173 		} else {
2174 			return size == size_default;
2175 		}
2176 	default:
2177 		return false;
2178 	}
2179 }
2180 
2181 static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
2182 				     const struct bpf_insn *si,
2183 				     struct bpf_insn *insn_buf,
2184 				     struct bpf_prog *prog, u32 *target_size)
2185 {
2186 	struct bpf_insn *insn = insn_buf;
2187 	u32 read_size;
2188 
2189 	switch (si->off) {
2190 	case offsetof(struct bpf_sysctl, write):
2191 		*insn++ = BPF_LDX_MEM(
2192 			BPF_SIZE(si->code), si->dst_reg, si->src_reg,
2193 			bpf_target_off(struct bpf_sysctl_kern, write,
2194 				       sizeof_field(struct bpf_sysctl_kern,
2195 						    write),
2196 				       target_size));
2197 		break;
2198 	case offsetof(struct bpf_sysctl, file_pos):
2199 		/* ppos is a pointer so it should be accessed via indirect
2200 		 * loads and stores. Also for stores additional temporary
2201 		 * register is used since neither src_reg nor dst_reg can be
2202 		 * overridden.
2203 		 */
2204 		if (type == BPF_WRITE) {
2205 			int treg = BPF_REG_9;
2206 
2207 			if (si->src_reg == treg || si->dst_reg == treg)
2208 				--treg;
2209 			if (si->src_reg == treg || si->dst_reg == treg)
2210 				--treg;
2211 			*insn++ = BPF_STX_MEM(
2212 				BPF_DW, si->dst_reg, treg,
2213 				offsetof(struct bpf_sysctl_kern, tmp_reg));
2214 			*insn++ = BPF_LDX_MEM(
2215 				BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
2216 				treg, si->dst_reg,
2217 				offsetof(struct bpf_sysctl_kern, ppos));
2218 			*insn++ = BPF_STX_MEM(
2219 				BPF_SIZEOF(u32), treg, si->src_reg,
2220 				bpf_ctx_narrow_access_offset(
2221 					0, sizeof(u32), sizeof(loff_t)));
2222 			*insn++ = BPF_LDX_MEM(
2223 				BPF_DW, treg, si->dst_reg,
2224 				offsetof(struct bpf_sysctl_kern, tmp_reg));
2225 		} else {
2226 			*insn++ = BPF_LDX_MEM(
2227 				BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
2228 				si->dst_reg, si->src_reg,
2229 				offsetof(struct bpf_sysctl_kern, ppos));
2230 			read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
2231 			*insn++ = BPF_LDX_MEM(
2232 				BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
2233 				bpf_ctx_narrow_access_offset(
2234 					0, read_size, sizeof(loff_t)));
2235 		}
2236 		*target_size = sizeof(u32);
2237 		break;
2238 	}
2239 
2240 	return insn - insn_buf;
2241 }
2242 
2243 const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
2244 	.get_func_proto		= sysctl_func_proto,
2245 	.is_valid_access	= sysctl_is_valid_access,
2246 	.convert_ctx_access	= sysctl_convert_ctx_access,
2247 };
2248 
2249 const struct bpf_prog_ops cg_sysctl_prog_ops = {
2250 };
2251 
2252 #ifdef CONFIG_NET
2253 BPF_CALL_1(bpf_get_netns_cookie_sockopt, struct bpf_sockopt_kern *, ctx)
2254 {
2255 	const struct net *net = ctx ? sock_net(ctx->sk) : &init_net;
2256 
2257 	return net->net_cookie;
2258 }
2259 
2260 static const struct bpf_func_proto bpf_get_netns_cookie_sockopt_proto = {
2261 	.func		= bpf_get_netns_cookie_sockopt,
2262 	.gpl_only	= false,
2263 	.ret_type	= RET_INTEGER,
2264 	.arg1_type	= ARG_PTR_TO_CTX_OR_NULL,
2265 };
2266 #endif
2267 
2268 static const struct bpf_func_proto *
2269 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2270 {
2271 	const struct bpf_func_proto *func_proto;
2272 
2273 	func_proto = cgroup_common_func_proto(func_id, prog);
2274 	if (func_proto)
2275 		return func_proto;
2276 
2277 	func_proto = cgroup_current_func_proto(func_id, prog);
2278 	if (func_proto)
2279 		return func_proto;
2280 
2281 	switch (func_id) {
2282 #ifdef CONFIG_NET
2283 	case BPF_FUNC_get_netns_cookie:
2284 		return &bpf_get_netns_cookie_sockopt_proto;
2285 	case BPF_FUNC_sk_storage_get:
2286 		return &bpf_sk_storage_get_proto;
2287 	case BPF_FUNC_sk_storage_delete:
2288 		return &bpf_sk_storage_delete_proto;
2289 	case BPF_FUNC_setsockopt:
2290 		if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
2291 			return &bpf_sk_setsockopt_proto;
2292 		return NULL;
2293 	case BPF_FUNC_getsockopt:
2294 		if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
2295 			return &bpf_sk_getsockopt_proto;
2296 		return NULL;
2297 #endif
2298 #ifdef CONFIG_INET
2299 	case BPF_FUNC_tcp_sock:
2300 		return &bpf_tcp_sock_proto;
2301 #endif
2302 	case BPF_FUNC_perf_event_output:
2303 		return &bpf_event_output_data_proto;
2304 	default:
2305 		return bpf_base_func_proto(func_id);
2306 	}
2307 }
2308 
2309 static bool cg_sockopt_is_valid_access(int off, int size,
2310 				       enum bpf_access_type type,
2311 				       const struct bpf_prog *prog,
2312 				       struct bpf_insn_access_aux *info)
2313 {
2314 	const int size_default = sizeof(__u32);
2315 
2316 	if (off < 0 || off >= sizeof(struct bpf_sockopt))
2317 		return false;
2318 
2319 	if (off % size != 0)
2320 		return false;
2321 
2322 	if (type == BPF_WRITE) {
2323 		switch (off) {
2324 		case offsetof(struct bpf_sockopt, retval):
2325 			if (size != size_default)
2326 				return false;
2327 			return prog->expected_attach_type ==
2328 				BPF_CGROUP_GETSOCKOPT;
2329 		case offsetof(struct bpf_sockopt, optname):
2330 			fallthrough;
2331 		case offsetof(struct bpf_sockopt, level):
2332 			if (size != size_default)
2333 				return false;
2334 			return prog->expected_attach_type ==
2335 				BPF_CGROUP_SETSOCKOPT;
2336 		case offsetof(struct bpf_sockopt, optlen):
2337 			return size == size_default;
2338 		default:
2339 			return false;
2340 		}
2341 	}
2342 
2343 	switch (off) {
2344 	case offsetof(struct bpf_sockopt, sk):
2345 		if (size != sizeof(__u64))
2346 			return false;
2347 		info->reg_type = PTR_TO_SOCKET;
2348 		break;
2349 	case offsetof(struct bpf_sockopt, optval):
2350 		if (size != sizeof(__u64))
2351 			return false;
2352 		info->reg_type = PTR_TO_PACKET;
2353 		break;
2354 	case offsetof(struct bpf_sockopt, optval_end):
2355 		if (size != sizeof(__u64))
2356 			return false;
2357 		info->reg_type = PTR_TO_PACKET_END;
2358 		break;
2359 	case offsetof(struct bpf_sockopt, retval):
2360 		if (size != size_default)
2361 			return false;
2362 		return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
2363 	default:
2364 		if (size != size_default)
2365 			return false;
2366 		break;
2367 	}
2368 	return true;
2369 }
2370 
2371 #define CG_SOCKOPT_ACCESS_FIELD(T, F)					\
2372 	T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F),			\
2373 	  si->dst_reg, si->src_reg,					\
2374 	  offsetof(struct bpf_sockopt_kern, F))
2375 
2376 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
2377 					 const struct bpf_insn *si,
2378 					 struct bpf_insn *insn_buf,
2379 					 struct bpf_prog *prog,
2380 					 u32 *target_size)
2381 {
2382 	struct bpf_insn *insn = insn_buf;
2383 
2384 	switch (si->off) {
2385 	case offsetof(struct bpf_sockopt, sk):
2386 		*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk);
2387 		break;
2388 	case offsetof(struct bpf_sockopt, level):
2389 		if (type == BPF_WRITE)
2390 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level);
2391 		else
2392 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level);
2393 		break;
2394 	case offsetof(struct bpf_sockopt, optname):
2395 		if (type == BPF_WRITE)
2396 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname);
2397 		else
2398 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname);
2399 		break;
2400 	case offsetof(struct bpf_sockopt, optlen):
2401 		if (type == BPF_WRITE)
2402 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen);
2403 		else
2404 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen);
2405 		break;
2406 	case offsetof(struct bpf_sockopt, retval):
2407 		BUILD_BUG_ON(offsetof(struct bpf_cg_run_ctx, run_ctx) != 0);
2408 
2409 		if (type == BPF_WRITE) {
2410 			int treg = BPF_REG_9;
2411 
2412 			if (si->src_reg == treg || si->dst_reg == treg)
2413 				--treg;
2414 			if (si->src_reg == treg || si->dst_reg == treg)
2415 				--treg;
2416 			*insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, treg,
2417 					      offsetof(struct bpf_sockopt_kern, tmp_reg));
2418 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task),
2419 					      treg, si->dst_reg,
2420 					      offsetof(struct bpf_sockopt_kern, current_task));
2421 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
2422 					      treg, treg,
2423 					      offsetof(struct task_struct, bpf_ctx));
2424 			*insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
2425 					      treg, si->src_reg,
2426 					      offsetof(struct bpf_cg_run_ctx, retval));
2427 			*insn++ = BPF_LDX_MEM(BPF_DW, treg, si->dst_reg,
2428 					      offsetof(struct bpf_sockopt_kern, tmp_reg));
2429 		} else {
2430 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task),
2431 					      si->dst_reg, si->src_reg,
2432 					      offsetof(struct bpf_sockopt_kern, current_task));
2433 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
2434 					      si->dst_reg, si->dst_reg,
2435 					      offsetof(struct task_struct, bpf_ctx));
2436 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
2437 					      si->dst_reg, si->dst_reg,
2438 					      offsetof(struct bpf_cg_run_ctx, retval));
2439 		}
2440 		break;
2441 	case offsetof(struct bpf_sockopt, optval):
2442 		*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval);
2443 		break;
2444 	case offsetof(struct bpf_sockopt, optval_end):
2445 		*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end);
2446 		break;
2447 	}
2448 
2449 	return insn - insn_buf;
2450 }
2451 
2452 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
2453 				   bool direct_write,
2454 				   const struct bpf_prog *prog)
2455 {
2456 	/* Nothing to do for sockopt argument. The data is kzalloc'ated.
2457 	 */
2458 	return 0;
2459 }
2460 
2461 const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
2462 	.get_func_proto		= cg_sockopt_func_proto,
2463 	.is_valid_access	= cg_sockopt_is_valid_access,
2464 	.convert_ctx_access	= cg_sockopt_convert_ctx_access,
2465 	.gen_prologue		= cg_sockopt_get_prologue,
2466 };
2467 
2468 const struct bpf_prog_ops cg_sockopt_prog_ops = {
2469 };
2470 
2471 /* Common helpers for cgroup hooks. */
2472 const struct bpf_func_proto *
2473 cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2474 {
2475 	switch (func_id) {
2476 	case BPF_FUNC_get_local_storage:
2477 		return &bpf_get_local_storage_proto;
2478 	case BPF_FUNC_get_retval:
2479 		switch (prog->expected_attach_type) {
2480 		case BPF_CGROUP_INET_INGRESS:
2481 		case BPF_CGROUP_INET_EGRESS:
2482 		case BPF_CGROUP_SOCK_OPS:
2483 		case BPF_CGROUP_UDP4_RECVMSG:
2484 		case BPF_CGROUP_UDP6_RECVMSG:
2485 		case BPF_CGROUP_INET4_GETPEERNAME:
2486 		case BPF_CGROUP_INET6_GETPEERNAME:
2487 		case BPF_CGROUP_INET4_GETSOCKNAME:
2488 		case BPF_CGROUP_INET6_GETSOCKNAME:
2489 			return NULL;
2490 		default:
2491 			return &bpf_get_retval_proto;
2492 		}
2493 	case BPF_FUNC_set_retval:
2494 		switch (prog->expected_attach_type) {
2495 		case BPF_CGROUP_INET_INGRESS:
2496 		case BPF_CGROUP_INET_EGRESS:
2497 		case BPF_CGROUP_SOCK_OPS:
2498 		case BPF_CGROUP_UDP4_RECVMSG:
2499 		case BPF_CGROUP_UDP6_RECVMSG:
2500 		case BPF_CGROUP_INET4_GETPEERNAME:
2501 		case BPF_CGROUP_INET6_GETPEERNAME:
2502 		case BPF_CGROUP_INET4_GETSOCKNAME:
2503 		case BPF_CGROUP_INET6_GETSOCKNAME:
2504 			return NULL;
2505 		default:
2506 			return &bpf_set_retval_proto;
2507 		}
2508 	default:
2509 		return NULL;
2510 	}
2511 }
2512 
2513 /* Common helpers for cgroup hooks with valid process context. */
2514 const struct bpf_func_proto *
2515 cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2516 {
2517 	switch (func_id) {
2518 	case BPF_FUNC_get_current_uid_gid:
2519 		return &bpf_get_current_uid_gid_proto;
2520 	case BPF_FUNC_get_current_pid_tgid:
2521 		return &bpf_get_current_pid_tgid_proto;
2522 	case BPF_FUNC_get_current_comm:
2523 		return &bpf_get_current_comm_proto;
2524 	case BPF_FUNC_get_current_cgroup_id:
2525 		return &bpf_get_current_cgroup_id_proto;
2526 	case BPF_FUNC_get_current_ancestor_cgroup_id:
2527 		return &bpf_get_current_ancestor_cgroup_id_proto;
2528 #ifdef CONFIG_CGROUP_NET_CLASSID
2529 	case BPF_FUNC_get_cgroup_classid:
2530 		return &bpf_get_cgroup_classid_curr_proto;
2531 #endif
2532 	default:
2533 		return NULL;
2534 	}
2535 }
2536