xref: /openbmc/linux/kernel/bpf/cgroup.c (revision 54a611b6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Functions to manage eBPF programs attached to cgroups
4  *
5  * Copyright (c) 2016 Daniel Mack
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/atomic.h>
10 #include <linux/cgroup.h>
11 #include <linux/filter.h>
12 #include <linux/slab.h>
13 #include <linux/sysctl.h>
14 #include <linux/string.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
17 #include <linux/bpf_lsm.h>
18 #include <linux/bpf_verifier.h>
19 #include <net/sock.h>
20 #include <net/bpf_sk_storage.h>
21 
22 #include "../cgroup/cgroup-internal.h"
23 
24 DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
25 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
26 
27 /* __always_inline is necessary to prevent indirect call through run_prog
28  * function pointer.
29  */
30 static __always_inline int
31 bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
32 		      enum cgroup_bpf_attach_type atype,
33 		      const void *ctx, bpf_prog_run_fn run_prog,
34 		      int retval, u32 *ret_flags)
35 {
36 	const struct bpf_prog_array_item *item;
37 	const struct bpf_prog *prog;
38 	const struct bpf_prog_array *array;
39 	struct bpf_run_ctx *old_run_ctx;
40 	struct bpf_cg_run_ctx run_ctx;
41 	u32 func_ret;
42 
43 	run_ctx.retval = retval;
44 	migrate_disable();
45 	rcu_read_lock();
46 	array = rcu_dereference(cgrp->effective[atype]);
47 	item = &array->items[0];
48 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
49 	while ((prog = READ_ONCE(item->prog))) {
50 		run_ctx.prog_item = item;
51 		func_ret = run_prog(prog, ctx);
52 		if (ret_flags) {
53 			*(ret_flags) |= (func_ret >> 1);
54 			func_ret &= 1;
55 		}
56 		if (!func_ret && !IS_ERR_VALUE((long)run_ctx.retval))
57 			run_ctx.retval = -EPERM;
58 		item++;
59 	}
60 	bpf_reset_run_ctx(old_run_ctx);
61 	rcu_read_unlock();
62 	migrate_enable();
63 	return run_ctx.retval;
64 }
65 
66 unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
67 				       const struct bpf_insn *insn)
68 {
69 	const struct bpf_prog *shim_prog;
70 	struct sock *sk;
71 	struct cgroup *cgrp;
72 	int ret = 0;
73 	u64 *args;
74 
75 	args = (u64 *)ctx;
76 	sk = (void *)(unsigned long)args[0];
77 	/*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
78 	shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
79 
80 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
81 	if (likely(cgrp))
82 		ret = bpf_prog_run_array_cg(&cgrp->bpf,
83 					    shim_prog->aux->cgroup_atype,
84 					    ctx, bpf_prog_run, 0, NULL);
85 	return ret;
86 }
87 
88 unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
89 					 const struct bpf_insn *insn)
90 {
91 	const struct bpf_prog *shim_prog;
92 	struct socket *sock;
93 	struct cgroup *cgrp;
94 	int ret = 0;
95 	u64 *args;
96 
97 	args = (u64 *)ctx;
98 	sock = (void *)(unsigned long)args[0];
99 	/*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
100 	shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
101 
102 	cgrp = sock_cgroup_ptr(&sock->sk->sk_cgrp_data);
103 	if (likely(cgrp))
104 		ret = bpf_prog_run_array_cg(&cgrp->bpf,
105 					    shim_prog->aux->cgroup_atype,
106 					    ctx, bpf_prog_run, 0, NULL);
107 	return ret;
108 }
109 
110 unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
111 					  const struct bpf_insn *insn)
112 {
113 	const struct bpf_prog *shim_prog;
114 	struct cgroup *cgrp;
115 	int ret = 0;
116 
117 	/*shim_prog = container_of(insn, struct bpf_prog, insnsi);*/
118 	shim_prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));
119 
120 	/* We rely on trampoline's __bpf_prog_enter_lsm_cgroup to grab RCU read lock. */
121 	cgrp = task_dfl_cgroup(current);
122 	if (likely(cgrp))
123 		ret = bpf_prog_run_array_cg(&cgrp->bpf,
124 					    shim_prog->aux->cgroup_atype,
125 					    ctx, bpf_prog_run, 0, NULL);
126 	return ret;
127 }
128 
129 #ifdef CONFIG_BPF_LSM
130 struct cgroup_lsm_atype {
131 	u32 attach_btf_id;
132 	int refcnt;
133 };
134 
135 static struct cgroup_lsm_atype cgroup_lsm_atype[CGROUP_LSM_NUM];
136 
137 static enum cgroup_bpf_attach_type
138 bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
139 {
140 	int i;
141 
142 	lockdep_assert_held(&cgroup_mutex);
143 
144 	if (attach_type != BPF_LSM_CGROUP)
145 		return to_cgroup_bpf_attach_type(attach_type);
146 
147 	for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++)
148 		if (cgroup_lsm_atype[i].attach_btf_id == attach_btf_id)
149 			return CGROUP_LSM_START + i;
150 
151 	for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++)
152 		if (cgroup_lsm_atype[i].attach_btf_id == 0)
153 			return CGROUP_LSM_START + i;
154 
155 	return -E2BIG;
156 
157 }
158 
159 void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype)
160 {
161 	int i = cgroup_atype - CGROUP_LSM_START;
162 
163 	lockdep_assert_held(&cgroup_mutex);
164 
165 	WARN_ON_ONCE(cgroup_lsm_atype[i].attach_btf_id &&
166 		     cgroup_lsm_atype[i].attach_btf_id != attach_btf_id);
167 
168 	cgroup_lsm_atype[i].attach_btf_id = attach_btf_id;
169 	cgroup_lsm_atype[i].refcnt++;
170 }
171 
172 void bpf_cgroup_atype_put(int cgroup_atype)
173 {
174 	int i = cgroup_atype - CGROUP_LSM_START;
175 
176 	mutex_lock(&cgroup_mutex);
177 	if (--cgroup_lsm_atype[i].refcnt <= 0)
178 		cgroup_lsm_atype[i].attach_btf_id = 0;
179 	WARN_ON_ONCE(cgroup_lsm_atype[i].refcnt < 0);
180 	mutex_unlock(&cgroup_mutex);
181 }
182 #else
183 static enum cgroup_bpf_attach_type
184 bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
185 {
186 	if (attach_type != BPF_LSM_CGROUP)
187 		return to_cgroup_bpf_attach_type(attach_type);
188 	return -EOPNOTSUPP;
189 }
190 #endif /* CONFIG_BPF_LSM */
191 
192 void cgroup_bpf_offline(struct cgroup *cgrp)
193 {
194 	cgroup_get(cgrp);
195 	percpu_ref_kill(&cgrp->bpf.refcnt);
196 }
197 
198 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
199 {
200 	enum bpf_cgroup_storage_type stype;
201 
202 	for_each_cgroup_storage_type(stype)
203 		bpf_cgroup_storage_free(storages[stype]);
204 }
205 
206 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
207 				     struct bpf_cgroup_storage *new_storages[],
208 				     enum bpf_attach_type type,
209 				     struct bpf_prog *prog,
210 				     struct cgroup *cgrp)
211 {
212 	enum bpf_cgroup_storage_type stype;
213 	struct bpf_cgroup_storage_key key;
214 	struct bpf_map *map;
215 
216 	key.cgroup_inode_id = cgroup_id(cgrp);
217 	key.attach_type = type;
218 
219 	for_each_cgroup_storage_type(stype) {
220 		map = prog->aux->cgroup_storage[stype];
221 		if (!map)
222 			continue;
223 
224 		storages[stype] = cgroup_storage_lookup((void *)map, &key, false);
225 		if (storages[stype])
226 			continue;
227 
228 		storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
229 		if (IS_ERR(storages[stype])) {
230 			bpf_cgroup_storages_free(new_storages);
231 			return -ENOMEM;
232 		}
233 
234 		new_storages[stype] = storages[stype];
235 	}
236 
237 	return 0;
238 }
239 
240 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
241 				       struct bpf_cgroup_storage *src[])
242 {
243 	enum bpf_cgroup_storage_type stype;
244 
245 	for_each_cgroup_storage_type(stype)
246 		dst[stype] = src[stype];
247 }
248 
249 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
250 				     struct cgroup *cgrp,
251 				     enum bpf_attach_type attach_type)
252 {
253 	enum bpf_cgroup_storage_type stype;
254 
255 	for_each_cgroup_storage_type(stype)
256 		bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
257 }
258 
259 /* Called when bpf_cgroup_link is auto-detached from dying cgroup.
260  * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
261  * doesn't free link memory, which will eventually be done by bpf_link's
262  * release() callback, when its last FD is closed.
263  */
264 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
265 {
266 	cgroup_put(link->cgroup);
267 	link->cgroup = NULL;
268 }
269 
270 /**
271  * cgroup_bpf_release() - put references of all bpf programs and
272  *                        release all cgroup bpf data
273  * @work: work structure embedded into the cgroup to modify
274  */
275 static void cgroup_bpf_release(struct work_struct *work)
276 {
277 	struct cgroup *p, *cgrp = container_of(work, struct cgroup,
278 					       bpf.release_work);
279 	struct bpf_prog_array *old_array;
280 	struct list_head *storages = &cgrp->bpf.storages;
281 	struct bpf_cgroup_storage *storage, *stmp;
282 
283 	unsigned int atype;
284 
285 	mutex_lock(&cgroup_mutex);
286 
287 	for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
288 		struct hlist_head *progs = &cgrp->bpf.progs[atype];
289 		struct bpf_prog_list *pl;
290 		struct hlist_node *pltmp;
291 
292 		hlist_for_each_entry_safe(pl, pltmp, progs, node) {
293 			hlist_del(&pl->node);
294 			if (pl->prog) {
295 				if (pl->prog->expected_attach_type == BPF_LSM_CGROUP)
296 					bpf_trampoline_unlink_cgroup_shim(pl->prog);
297 				bpf_prog_put(pl->prog);
298 			}
299 			if (pl->link) {
300 				if (pl->link->link.prog->expected_attach_type == BPF_LSM_CGROUP)
301 					bpf_trampoline_unlink_cgroup_shim(pl->link->link.prog);
302 				bpf_cgroup_link_auto_detach(pl->link);
303 			}
304 			kfree(pl);
305 			static_branch_dec(&cgroup_bpf_enabled_key[atype]);
306 		}
307 		old_array = rcu_dereference_protected(
308 				cgrp->bpf.effective[atype],
309 				lockdep_is_held(&cgroup_mutex));
310 		bpf_prog_array_free(old_array);
311 	}
312 
313 	list_for_each_entry_safe(storage, stmp, storages, list_cg) {
314 		bpf_cgroup_storage_unlink(storage);
315 		bpf_cgroup_storage_free(storage);
316 	}
317 
318 	mutex_unlock(&cgroup_mutex);
319 
320 	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
321 		cgroup_bpf_put(p);
322 
323 	percpu_ref_exit(&cgrp->bpf.refcnt);
324 	cgroup_put(cgrp);
325 }
326 
327 /**
328  * cgroup_bpf_release_fn() - callback used to schedule releasing
329  *                           of bpf cgroup data
330  * @ref: percpu ref counter structure
331  */
332 static void cgroup_bpf_release_fn(struct percpu_ref *ref)
333 {
334 	struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
335 
336 	INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
337 	queue_work(system_wq, &cgrp->bpf.release_work);
338 }
339 
340 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
341  * link or direct prog.
342  */
343 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
344 {
345 	if (pl->prog)
346 		return pl->prog;
347 	if (pl->link)
348 		return pl->link->link.prog;
349 	return NULL;
350 }
351 
352 /* count number of elements in the list.
353  * it's slow but the list cannot be long
354  */
355 static u32 prog_list_length(struct hlist_head *head)
356 {
357 	struct bpf_prog_list *pl;
358 	u32 cnt = 0;
359 
360 	hlist_for_each_entry(pl, head, node) {
361 		if (!prog_list_prog(pl))
362 			continue;
363 		cnt++;
364 	}
365 	return cnt;
366 }
367 
368 /* if parent has non-overridable prog attached,
369  * disallow attaching new programs to the descendent cgroup.
370  * if parent has overridable or multi-prog, allow attaching
371  */
372 static bool hierarchy_allows_attach(struct cgroup *cgrp,
373 				    enum cgroup_bpf_attach_type atype)
374 {
375 	struct cgroup *p;
376 
377 	p = cgroup_parent(cgrp);
378 	if (!p)
379 		return true;
380 	do {
381 		u32 flags = p->bpf.flags[atype];
382 		u32 cnt;
383 
384 		if (flags & BPF_F_ALLOW_MULTI)
385 			return true;
386 		cnt = prog_list_length(&p->bpf.progs[atype]);
387 		WARN_ON_ONCE(cnt > 1);
388 		if (cnt == 1)
389 			return !!(flags & BPF_F_ALLOW_OVERRIDE);
390 		p = cgroup_parent(p);
391 	} while (p);
392 	return true;
393 }
394 
395 /* compute a chain of effective programs for a given cgroup:
396  * start from the list of programs in this cgroup and add
397  * all parent programs.
398  * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
399  * to programs in this cgroup
400  */
401 static int compute_effective_progs(struct cgroup *cgrp,
402 				   enum cgroup_bpf_attach_type atype,
403 				   struct bpf_prog_array **array)
404 {
405 	struct bpf_prog_array_item *item;
406 	struct bpf_prog_array *progs;
407 	struct bpf_prog_list *pl;
408 	struct cgroup *p = cgrp;
409 	int cnt = 0;
410 
411 	/* count number of effective programs by walking parents */
412 	do {
413 		if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
414 			cnt += prog_list_length(&p->bpf.progs[atype]);
415 		p = cgroup_parent(p);
416 	} while (p);
417 
418 	progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
419 	if (!progs)
420 		return -ENOMEM;
421 
422 	/* populate the array with effective progs */
423 	cnt = 0;
424 	p = cgrp;
425 	do {
426 		if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
427 			continue;
428 
429 		hlist_for_each_entry(pl, &p->bpf.progs[atype], node) {
430 			if (!prog_list_prog(pl))
431 				continue;
432 
433 			item = &progs->items[cnt];
434 			item->prog = prog_list_prog(pl);
435 			bpf_cgroup_storages_assign(item->cgroup_storage,
436 						   pl->storage);
437 			cnt++;
438 		}
439 	} while ((p = cgroup_parent(p)));
440 
441 	*array = progs;
442 	return 0;
443 }
444 
445 static void activate_effective_progs(struct cgroup *cgrp,
446 				     enum cgroup_bpf_attach_type atype,
447 				     struct bpf_prog_array *old_array)
448 {
449 	old_array = rcu_replace_pointer(cgrp->bpf.effective[atype], old_array,
450 					lockdep_is_held(&cgroup_mutex));
451 	/* free prog array after grace period, since __cgroup_bpf_run_*()
452 	 * might be still walking the array
453 	 */
454 	bpf_prog_array_free(old_array);
455 }
456 
457 /**
458  * cgroup_bpf_inherit() - inherit effective programs from parent
459  * @cgrp: the cgroup to modify
460  */
461 int cgroup_bpf_inherit(struct cgroup *cgrp)
462 {
463 /* has to use marco instead of const int, since compiler thinks
464  * that array below is variable length
465  */
466 #define	NR ARRAY_SIZE(cgrp->bpf.effective)
467 	struct bpf_prog_array *arrays[NR] = {};
468 	struct cgroup *p;
469 	int ret, i;
470 
471 	ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
472 			      GFP_KERNEL);
473 	if (ret)
474 		return ret;
475 
476 	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
477 		cgroup_bpf_get(p);
478 
479 	for (i = 0; i < NR; i++)
480 		INIT_HLIST_HEAD(&cgrp->bpf.progs[i]);
481 
482 	INIT_LIST_HEAD(&cgrp->bpf.storages);
483 
484 	for (i = 0; i < NR; i++)
485 		if (compute_effective_progs(cgrp, i, &arrays[i]))
486 			goto cleanup;
487 
488 	for (i = 0; i < NR; i++)
489 		activate_effective_progs(cgrp, i, arrays[i]);
490 
491 	return 0;
492 cleanup:
493 	for (i = 0; i < NR; i++)
494 		bpf_prog_array_free(arrays[i]);
495 
496 	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
497 		cgroup_bpf_put(p);
498 
499 	percpu_ref_exit(&cgrp->bpf.refcnt);
500 
501 	return -ENOMEM;
502 }
503 
504 static int update_effective_progs(struct cgroup *cgrp,
505 				  enum cgroup_bpf_attach_type atype)
506 {
507 	struct cgroup_subsys_state *css;
508 	int err;
509 
510 	/* allocate and recompute effective prog arrays */
511 	css_for_each_descendant_pre(css, &cgrp->self) {
512 		struct cgroup *desc = container_of(css, struct cgroup, self);
513 
514 		if (percpu_ref_is_zero(&desc->bpf.refcnt))
515 			continue;
516 
517 		err = compute_effective_progs(desc, atype, &desc->bpf.inactive);
518 		if (err)
519 			goto cleanup;
520 	}
521 
522 	/* all allocations were successful. Activate all prog arrays */
523 	css_for_each_descendant_pre(css, &cgrp->self) {
524 		struct cgroup *desc = container_of(css, struct cgroup, self);
525 
526 		if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
527 			if (unlikely(desc->bpf.inactive)) {
528 				bpf_prog_array_free(desc->bpf.inactive);
529 				desc->bpf.inactive = NULL;
530 			}
531 			continue;
532 		}
533 
534 		activate_effective_progs(desc, atype, desc->bpf.inactive);
535 		desc->bpf.inactive = NULL;
536 	}
537 
538 	return 0;
539 
540 cleanup:
541 	/* oom while computing effective. Free all computed effective arrays
542 	 * since they were not activated
543 	 */
544 	css_for_each_descendant_pre(css, &cgrp->self) {
545 		struct cgroup *desc = container_of(css, struct cgroup, self);
546 
547 		bpf_prog_array_free(desc->bpf.inactive);
548 		desc->bpf.inactive = NULL;
549 	}
550 
551 	return err;
552 }
553 
554 #define BPF_CGROUP_MAX_PROGS 64
555 
556 static struct bpf_prog_list *find_attach_entry(struct hlist_head *progs,
557 					       struct bpf_prog *prog,
558 					       struct bpf_cgroup_link *link,
559 					       struct bpf_prog *replace_prog,
560 					       bool allow_multi)
561 {
562 	struct bpf_prog_list *pl;
563 
564 	/* single-attach case */
565 	if (!allow_multi) {
566 		if (hlist_empty(progs))
567 			return NULL;
568 		return hlist_entry(progs->first, typeof(*pl), node);
569 	}
570 
571 	hlist_for_each_entry(pl, progs, node) {
572 		if (prog && pl->prog == prog && prog != replace_prog)
573 			/* disallow attaching the same prog twice */
574 			return ERR_PTR(-EINVAL);
575 		if (link && pl->link == link)
576 			/* disallow attaching the same link twice */
577 			return ERR_PTR(-EINVAL);
578 	}
579 
580 	/* direct prog multi-attach w/ replacement case */
581 	if (replace_prog) {
582 		hlist_for_each_entry(pl, progs, node) {
583 			if (pl->prog == replace_prog)
584 				/* a match found */
585 				return pl;
586 		}
587 		/* prog to replace not found for cgroup */
588 		return ERR_PTR(-ENOENT);
589 	}
590 
591 	return NULL;
592 }
593 
594 /**
595  * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
596  *                         propagate the change to descendants
597  * @cgrp: The cgroup which descendants to traverse
598  * @prog: A program to attach
599  * @link: A link to attach
600  * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
601  * @type: Type of attach operation
602  * @flags: Option flags
603  *
604  * Exactly one of @prog or @link can be non-null.
605  * Must be called with cgroup_mutex held.
606  */
607 static int __cgroup_bpf_attach(struct cgroup *cgrp,
608 			       struct bpf_prog *prog, struct bpf_prog *replace_prog,
609 			       struct bpf_cgroup_link *link,
610 			       enum bpf_attach_type type, u32 flags)
611 {
612 	u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
613 	struct bpf_prog *old_prog = NULL;
614 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
615 	struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
616 	struct bpf_prog *new_prog = prog ? : link->link.prog;
617 	enum cgroup_bpf_attach_type atype;
618 	struct bpf_prog_list *pl;
619 	struct hlist_head *progs;
620 	int err;
621 
622 	if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
623 	    ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
624 		/* invalid combination */
625 		return -EINVAL;
626 	if (link && (prog || replace_prog))
627 		/* only either link or prog/replace_prog can be specified */
628 		return -EINVAL;
629 	if (!!replace_prog != !!(flags & BPF_F_REPLACE))
630 		/* replace_prog implies BPF_F_REPLACE, and vice versa */
631 		return -EINVAL;
632 
633 	atype = bpf_cgroup_atype_find(type, new_prog->aux->attach_btf_id);
634 	if (atype < 0)
635 		return -EINVAL;
636 
637 	progs = &cgrp->bpf.progs[atype];
638 
639 	if (!hierarchy_allows_attach(cgrp, atype))
640 		return -EPERM;
641 
642 	if (!hlist_empty(progs) && cgrp->bpf.flags[atype] != saved_flags)
643 		/* Disallow attaching non-overridable on top
644 		 * of existing overridable in this cgroup.
645 		 * Disallow attaching multi-prog if overridable or none
646 		 */
647 		return -EPERM;
648 
649 	if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
650 		return -E2BIG;
651 
652 	pl = find_attach_entry(progs, prog, link, replace_prog,
653 			       flags & BPF_F_ALLOW_MULTI);
654 	if (IS_ERR(pl))
655 		return PTR_ERR(pl);
656 
657 	if (bpf_cgroup_storages_alloc(storage, new_storage, type,
658 				      prog ? : link->link.prog, cgrp))
659 		return -ENOMEM;
660 
661 	if (pl) {
662 		old_prog = pl->prog;
663 	} else {
664 		struct hlist_node *last = NULL;
665 
666 		pl = kmalloc(sizeof(*pl), GFP_KERNEL);
667 		if (!pl) {
668 			bpf_cgroup_storages_free(new_storage);
669 			return -ENOMEM;
670 		}
671 		if (hlist_empty(progs))
672 			hlist_add_head(&pl->node, progs);
673 		else
674 			hlist_for_each(last, progs) {
675 				if (last->next)
676 					continue;
677 				hlist_add_behind(&pl->node, last);
678 				break;
679 			}
680 	}
681 
682 	pl->prog = prog;
683 	pl->link = link;
684 	bpf_cgroup_storages_assign(pl->storage, storage);
685 	cgrp->bpf.flags[atype] = saved_flags;
686 
687 	if (type == BPF_LSM_CGROUP) {
688 		err = bpf_trampoline_link_cgroup_shim(new_prog, atype);
689 		if (err)
690 			goto cleanup;
691 	}
692 
693 	err = update_effective_progs(cgrp, atype);
694 	if (err)
695 		goto cleanup_trampoline;
696 
697 	if (old_prog) {
698 		if (type == BPF_LSM_CGROUP)
699 			bpf_trampoline_unlink_cgroup_shim(old_prog);
700 		bpf_prog_put(old_prog);
701 	} else {
702 		static_branch_inc(&cgroup_bpf_enabled_key[atype]);
703 	}
704 	bpf_cgroup_storages_link(new_storage, cgrp, type);
705 	return 0;
706 
707 cleanup_trampoline:
708 	if (type == BPF_LSM_CGROUP)
709 		bpf_trampoline_unlink_cgroup_shim(new_prog);
710 
711 cleanup:
712 	if (old_prog) {
713 		pl->prog = old_prog;
714 		pl->link = NULL;
715 	}
716 	bpf_cgroup_storages_free(new_storage);
717 	if (!old_prog) {
718 		hlist_del(&pl->node);
719 		kfree(pl);
720 	}
721 	return err;
722 }
723 
724 static int cgroup_bpf_attach(struct cgroup *cgrp,
725 			     struct bpf_prog *prog, struct bpf_prog *replace_prog,
726 			     struct bpf_cgroup_link *link,
727 			     enum bpf_attach_type type,
728 			     u32 flags)
729 {
730 	int ret;
731 
732 	mutex_lock(&cgroup_mutex);
733 	ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags);
734 	mutex_unlock(&cgroup_mutex);
735 	return ret;
736 }
737 
738 /* Swap updated BPF program for given link in effective program arrays across
739  * all descendant cgroups. This function is guaranteed to succeed.
740  */
741 static void replace_effective_prog(struct cgroup *cgrp,
742 				   enum cgroup_bpf_attach_type atype,
743 				   struct bpf_cgroup_link *link)
744 {
745 	struct bpf_prog_array_item *item;
746 	struct cgroup_subsys_state *css;
747 	struct bpf_prog_array *progs;
748 	struct bpf_prog_list *pl;
749 	struct hlist_head *head;
750 	struct cgroup *cg;
751 	int pos;
752 
753 	css_for_each_descendant_pre(css, &cgrp->self) {
754 		struct cgroup *desc = container_of(css, struct cgroup, self);
755 
756 		if (percpu_ref_is_zero(&desc->bpf.refcnt))
757 			continue;
758 
759 		/* find position of link in effective progs array */
760 		for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
761 			if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
762 				continue;
763 
764 			head = &cg->bpf.progs[atype];
765 			hlist_for_each_entry(pl, head, node) {
766 				if (!prog_list_prog(pl))
767 					continue;
768 				if (pl->link == link)
769 					goto found;
770 				pos++;
771 			}
772 		}
773 found:
774 		BUG_ON(!cg);
775 		progs = rcu_dereference_protected(
776 				desc->bpf.effective[atype],
777 				lockdep_is_held(&cgroup_mutex));
778 		item = &progs->items[pos];
779 		WRITE_ONCE(item->prog, link->link.prog);
780 	}
781 }
782 
783 /**
784  * __cgroup_bpf_replace() - Replace link's program and propagate the change
785  *                          to descendants
786  * @cgrp: The cgroup which descendants to traverse
787  * @link: A link for which to replace BPF program
788  * @type: Type of attach operation
789  *
790  * Must be called with cgroup_mutex held.
791  */
792 static int __cgroup_bpf_replace(struct cgroup *cgrp,
793 				struct bpf_cgroup_link *link,
794 				struct bpf_prog *new_prog)
795 {
796 	enum cgroup_bpf_attach_type atype;
797 	struct bpf_prog *old_prog;
798 	struct bpf_prog_list *pl;
799 	struct hlist_head *progs;
800 	bool found = false;
801 
802 	atype = bpf_cgroup_atype_find(link->type, new_prog->aux->attach_btf_id);
803 	if (atype < 0)
804 		return -EINVAL;
805 
806 	progs = &cgrp->bpf.progs[atype];
807 
808 	if (link->link.prog->type != new_prog->type)
809 		return -EINVAL;
810 
811 	hlist_for_each_entry(pl, progs, node) {
812 		if (pl->link == link) {
813 			found = true;
814 			break;
815 		}
816 	}
817 	if (!found)
818 		return -ENOENT;
819 
820 	old_prog = xchg(&link->link.prog, new_prog);
821 	replace_effective_prog(cgrp, atype, link);
822 	bpf_prog_put(old_prog);
823 	return 0;
824 }
825 
826 static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
827 			      struct bpf_prog *old_prog)
828 {
829 	struct bpf_cgroup_link *cg_link;
830 	int ret;
831 
832 	cg_link = container_of(link, struct bpf_cgroup_link, link);
833 
834 	mutex_lock(&cgroup_mutex);
835 	/* link might have been auto-released by dying cgroup, so fail */
836 	if (!cg_link->cgroup) {
837 		ret = -ENOLINK;
838 		goto out_unlock;
839 	}
840 	if (old_prog && link->prog != old_prog) {
841 		ret = -EPERM;
842 		goto out_unlock;
843 	}
844 	ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
845 out_unlock:
846 	mutex_unlock(&cgroup_mutex);
847 	return ret;
848 }
849 
850 static struct bpf_prog_list *find_detach_entry(struct hlist_head *progs,
851 					       struct bpf_prog *prog,
852 					       struct bpf_cgroup_link *link,
853 					       bool allow_multi)
854 {
855 	struct bpf_prog_list *pl;
856 
857 	if (!allow_multi) {
858 		if (hlist_empty(progs))
859 			/* report error when trying to detach and nothing is attached */
860 			return ERR_PTR(-ENOENT);
861 
862 		/* to maintain backward compatibility NONE and OVERRIDE cgroups
863 		 * allow detaching with invalid FD (prog==NULL) in legacy mode
864 		 */
865 		return hlist_entry(progs->first, typeof(*pl), node);
866 	}
867 
868 	if (!prog && !link)
869 		/* to detach MULTI prog the user has to specify valid FD
870 		 * of the program or link to be detached
871 		 */
872 		return ERR_PTR(-EINVAL);
873 
874 	/* find the prog or link and detach it */
875 	hlist_for_each_entry(pl, progs, node) {
876 		if (pl->prog == prog && pl->link == link)
877 			return pl;
878 	}
879 	return ERR_PTR(-ENOENT);
880 }
881 
882 /**
883  * purge_effective_progs() - After compute_effective_progs fails to alloc new
884  *                           cgrp->bpf.inactive table we can recover by
885  *                           recomputing the array in place.
886  *
887  * @cgrp: The cgroup which descendants to travers
888  * @prog: A program to detach or NULL
889  * @link: A link to detach or NULL
890  * @atype: Type of detach operation
891  */
892 static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog,
893 				  struct bpf_cgroup_link *link,
894 				  enum cgroup_bpf_attach_type atype)
895 {
896 	struct cgroup_subsys_state *css;
897 	struct bpf_prog_array *progs;
898 	struct bpf_prog_list *pl;
899 	struct hlist_head *head;
900 	struct cgroup *cg;
901 	int pos;
902 
903 	/* recompute effective prog array in place */
904 	css_for_each_descendant_pre(css, &cgrp->self) {
905 		struct cgroup *desc = container_of(css, struct cgroup, self);
906 
907 		if (percpu_ref_is_zero(&desc->bpf.refcnt))
908 			continue;
909 
910 		/* find position of link or prog in effective progs array */
911 		for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
912 			if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
913 				continue;
914 
915 			head = &cg->bpf.progs[atype];
916 			hlist_for_each_entry(pl, head, node) {
917 				if (!prog_list_prog(pl))
918 					continue;
919 				if (pl->prog == prog && pl->link == link)
920 					goto found;
921 				pos++;
922 			}
923 		}
924 found:
925 		BUG_ON(!cg);
926 		progs = rcu_dereference_protected(
927 				desc->bpf.effective[atype],
928 				lockdep_is_held(&cgroup_mutex));
929 
930 		/* Remove the program from the array */
931 		WARN_ONCE(bpf_prog_array_delete_safe_at(progs, pos),
932 			  "Failed to purge a prog from array at index %d", pos);
933 	}
934 }
935 
936 /**
937  * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
938  *                         propagate the change to descendants
939  * @cgrp: The cgroup which descendants to traverse
940  * @prog: A program to detach or NULL
941  * @link: A link to detach or NULL
942  * @type: Type of detach operation
943  *
944  * At most one of @prog or @link can be non-NULL.
945  * Must be called with cgroup_mutex held.
946  */
947 static int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
948 			       struct bpf_cgroup_link *link, enum bpf_attach_type type)
949 {
950 	enum cgroup_bpf_attach_type atype;
951 	struct bpf_prog *old_prog;
952 	struct bpf_prog_list *pl;
953 	struct hlist_head *progs;
954 	u32 attach_btf_id = 0;
955 	u32 flags;
956 
957 	if (prog)
958 		attach_btf_id = prog->aux->attach_btf_id;
959 	if (link)
960 		attach_btf_id = link->link.prog->aux->attach_btf_id;
961 
962 	atype = bpf_cgroup_atype_find(type, attach_btf_id);
963 	if (atype < 0)
964 		return -EINVAL;
965 
966 	progs = &cgrp->bpf.progs[atype];
967 	flags = cgrp->bpf.flags[atype];
968 
969 	if (prog && link)
970 		/* only one of prog or link can be specified */
971 		return -EINVAL;
972 
973 	pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI);
974 	if (IS_ERR(pl))
975 		return PTR_ERR(pl);
976 
977 	/* mark it deleted, so it's ignored while recomputing effective */
978 	old_prog = pl->prog;
979 	pl->prog = NULL;
980 	pl->link = NULL;
981 
982 	if (update_effective_progs(cgrp, atype)) {
983 		/* if update effective array failed replace the prog with a dummy prog*/
984 		pl->prog = old_prog;
985 		pl->link = link;
986 		purge_effective_progs(cgrp, old_prog, link, atype);
987 	}
988 
989 	/* now can actually delete it from this cgroup list */
990 	hlist_del(&pl->node);
991 
992 	kfree(pl);
993 	if (hlist_empty(progs))
994 		/* last program was detached, reset flags to zero */
995 		cgrp->bpf.flags[atype] = 0;
996 	if (old_prog) {
997 		if (type == BPF_LSM_CGROUP)
998 			bpf_trampoline_unlink_cgroup_shim(old_prog);
999 		bpf_prog_put(old_prog);
1000 	}
1001 	static_branch_dec(&cgroup_bpf_enabled_key[atype]);
1002 	return 0;
1003 }
1004 
1005 static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
1006 			     enum bpf_attach_type type)
1007 {
1008 	int ret;
1009 
1010 	mutex_lock(&cgroup_mutex);
1011 	ret = __cgroup_bpf_detach(cgrp, prog, NULL, type);
1012 	mutex_unlock(&cgroup_mutex);
1013 	return ret;
1014 }
1015 
1016 /* Must be called with cgroup_mutex held to avoid races. */
1017 static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
1018 			      union bpf_attr __user *uattr)
1019 {
1020 	__u32 __user *prog_attach_flags = u64_to_user_ptr(attr->query.prog_attach_flags);
1021 	__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
1022 	enum bpf_attach_type type = attr->query.attach_type;
1023 	enum cgroup_bpf_attach_type from_atype, to_atype;
1024 	enum cgroup_bpf_attach_type atype;
1025 	struct bpf_prog_array *effective;
1026 	int cnt, ret = 0, i;
1027 	int total_cnt = 0;
1028 	u32 flags;
1029 
1030 	if (type == BPF_LSM_CGROUP) {
1031 		if (attr->query.prog_cnt && prog_ids && !prog_attach_flags)
1032 			return -EINVAL;
1033 
1034 		from_atype = CGROUP_LSM_START;
1035 		to_atype = CGROUP_LSM_END;
1036 		flags = 0;
1037 	} else {
1038 		from_atype = to_cgroup_bpf_attach_type(type);
1039 		if (from_atype < 0)
1040 			return -EINVAL;
1041 		to_atype = from_atype;
1042 		flags = cgrp->bpf.flags[from_atype];
1043 	}
1044 
1045 	for (atype = from_atype; atype <= to_atype; atype++) {
1046 		if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
1047 			effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
1048 							      lockdep_is_held(&cgroup_mutex));
1049 			total_cnt += bpf_prog_array_length(effective);
1050 		} else {
1051 			total_cnt += prog_list_length(&cgrp->bpf.progs[atype]);
1052 		}
1053 	}
1054 
1055 	if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
1056 		return -EFAULT;
1057 	if (copy_to_user(&uattr->query.prog_cnt, &total_cnt, sizeof(total_cnt)))
1058 		return -EFAULT;
1059 	if (attr->query.prog_cnt == 0 || !prog_ids || !total_cnt)
1060 		/* return early if user requested only program count + flags */
1061 		return 0;
1062 
1063 	if (attr->query.prog_cnt < total_cnt) {
1064 		total_cnt = attr->query.prog_cnt;
1065 		ret = -ENOSPC;
1066 	}
1067 
1068 	for (atype = from_atype; atype <= to_atype && total_cnt; atype++) {
1069 		if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
1070 			effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
1071 							      lockdep_is_held(&cgroup_mutex));
1072 			cnt = min_t(int, bpf_prog_array_length(effective), total_cnt);
1073 			ret = bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
1074 		} else {
1075 			struct hlist_head *progs;
1076 			struct bpf_prog_list *pl;
1077 			struct bpf_prog *prog;
1078 			u32 id;
1079 
1080 			progs = &cgrp->bpf.progs[atype];
1081 			cnt = min_t(int, prog_list_length(progs), total_cnt);
1082 			i = 0;
1083 			hlist_for_each_entry(pl, progs, node) {
1084 				prog = prog_list_prog(pl);
1085 				id = prog->aux->id;
1086 				if (copy_to_user(prog_ids + i, &id, sizeof(id)))
1087 					return -EFAULT;
1088 				if (++i == cnt)
1089 					break;
1090 			}
1091 		}
1092 
1093 		if (prog_attach_flags) {
1094 			flags = cgrp->bpf.flags[atype];
1095 
1096 			for (i = 0; i < cnt; i++)
1097 				if (copy_to_user(prog_attach_flags + i, &flags, sizeof(flags)))
1098 					return -EFAULT;
1099 			prog_attach_flags += cnt;
1100 		}
1101 
1102 		prog_ids += cnt;
1103 		total_cnt -= cnt;
1104 	}
1105 	return ret;
1106 }
1107 
1108 static int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
1109 			    union bpf_attr __user *uattr)
1110 {
1111 	int ret;
1112 
1113 	mutex_lock(&cgroup_mutex);
1114 	ret = __cgroup_bpf_query(cgrp, attr, uattr);
1115 	mutex_unlock(&cgroup_mutex);
1116 	return ret;
1117 }
1118 
1119 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
1120 			   enum bpf_prog_type ptype, struct bpf_prog *prog)
1121 {
1122 	struct bpf_prog *replace_prog = NULL;
1123 	struct cgroup *cgrp;
1124 	int ret;
1125 
1126 	cgrp = cgroup_get_from_fd(attr->target_fd);
1127 	if (IS_ERR(cgrp))
1128 		return PTR_ERR(cgrp);
1129 
1130 	if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
1131 	    (attr->attach_flags & BPF_F_REPLACE)) {
1132 		replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
1133 		if (IS_ERR(replace_prog)) {
1134 			cgroup_put(cgrp);
1135 			return PTR_ERR(replace_prog);
1136 		}
1137 	}
1138 
1139 	ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL,
1140 				attr->attach_type, attr->attach_flags);
1141 
1142 	if (replace_prog)
1143 		bpf_prog_put(replace_prog);
1144 	cgroup_put(cgrp);
1145 	return ret;
1146 }
1147 
1148 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
1149 {
1150 	struct bpf_prog *prog;
1151 	struct cgroup *cgrp;
1152 	int ret;
1153 
1154 	cgrp = cgroup_get_from_fd(attr->target_fd);
1155 	if (IS_ERR(cgrp))
1156 		return PTR_ERR(cgrp);
1157 
1158 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1159 	if (IS_ERR(prog))
1160 		prog = NULL;
1161 
1162 	ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type);
1163 	if (prog)
1164 		bpf_prog_put(prog);
1165 
1166 	cgroup_put(cgrp);
1167 	return ret;
1168 }
1169 
1170 static void bpf_cgroup_link_release(struct bpf_link *link)
1171 {
1172 	struct bpf_cgroup_link *cg_link =
1173 		container_of(link, struct bpf_cgroup_link, link);
1174 	struct cgroup *cg;
1175 
1176 	/* link might have been auto-detached by dying cgroup already,
1177 	 * in that case our work is done here
1178 	 */
1179 	if (!cg_link->cgroup)
1180 		return;
1181 
1182 	mutex_lock(&cgroup_mutex);
1183 
1184 	/* re-check cgroup under lock again */
1185 	if (!cg_link->cgroup) {
1186 		mutex_unlock(&cgroup_mutex);
1187 		return;
1188 	}
1189 
1190 	WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
1191 				    cg_link->type));
1192 	if (cg_link->type == BPF_LSM_CGROUP)
1193 		bpf_trampoline_unlink_cgroup_shim(cg_link->link.prog);
1194 
1195 	cg = cg_link->cgroup;
1196 	cg_link->cgroup = NULL;
1197 
1198 	mutex_unlock(&cgroup_mutex);
1199 
1200 	cgroup_put(cg);
1201 }
1202 
1203 static void bpf_cgroup_link_dealloc(struct bpf_link *link)
1204 {
1205 	struct bpf_cgroup_link *cg_link =
1206 		container_of(link, struct bpf_cgroup_link, link);
1207 
1208 	kfree(cg_link);
1209 }
1210 
1211 static int bpf_cgroup_link_detach(struct bpf_link *link)
1212 {
1213 	bpf_cgroup_link_release(link);
1214 
1215 	return 0;
1216 }
1217 
1218 static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
1219 					struct seq_file *seq)
1220 {
1221 	struct bpf_cgroup_link *cg_link =
1222 		container_of(link, struct bpf_cgroup_link, link);
1223 	u64 cg_id = 0;
1224 
1225 	mutex_lock(&cgroup_mutex);
1226 	if (cg_link->cgroup)
1227 		cg_id = cgroup_id(cg_link->cgroup);
1228 	mutex_unlock(&cgroup_mutex);
1229 
1230 	seq_printf(seq,
1231 		   "cgroup_id:\t%llu\n"
1232 		   "attach_type:\t%d\n",
1233 		   cg_id,
1234 		   cg_link->type);
1235 }
1236 
1237 static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
1238 					  struct bpf_link_info *info)
1239 {
1240 	struct bpf_cgroup_link *cg_link =
1241 		container_of(link, struct bpf_cgroup_link, link);
1242 	u64 cg_id = 0;
1243 
1244 	mutex_lock(&cgroup_mutex);
1245 	if (cg_link->cgroup)
1246 		cg_id = cgroup_id(cg_link->cgroup);
1247 	mutex_unlock(&cgroup_mutex);
1248 
1249 	info->cgroup.cgroup_id = cg_id;
1250 	info->cgroup.attach_type = cg_link->type;
1251 	return 0;
1252 }
1253 
1254 static const struct bpf_link_ops bpf_cgroup_link_lops = {
1255 	.release = bpf_cgroup_link_release,
1256 	.dealloc = bpf_cgroup_link_dealloc,
1257 	.detach = bpf_cgroup_link_detach,
1258 	.update_prog = cgroup_bpf_replace,
1259 	.show_fdinfo = bpf_cgroup_link_show_fdinfo,
1260 	.fill_link_info = bpf_cgroup_link_fill_link_info,
1261 };
1262 
1263 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
1264 {
1265 	struct bpf_link_primer link_primer;
1266 	struct bpf_cgroup_link *link;
1267 	struct cgroup *cgrp;
1268 	int err;
1269 
1270 	if (attr->link_create.flags)
1271 		return -EINVAL;
1272 
1273 	cgrp = cgroup_get_from_fd(attr->link_create.target_fd);
1274 	if (IS_ERR(cgrp))
1275 		return PTR_ERR(cgrp);
1276 
1277 	link = kzalloc(sizeof(*link), GFP_USER);
1278 	if (!link) {
1279 		err = -ENOMEM;
1280 		goto out_put_cgroup;
1281 	}
1282 	bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops,
1283 		      prog);
1284 	link->cgroup = cgrp;
1285 	link->type = attr->link_create.attach_type;
1286 
1287 	err = bpf_link_prime(&link->link, &link_primer);
1288 	if (err) {
1289 		kfree(link);
1290 		goto out_put_cgroup;
1291 	}
1292 
1293 	err = cgroup_bpf_attach(cgrp, NULL, NULL, link,
1294 				link->type, BPF_F_ALLOW_MULTI);
1295 	if (err) {
1296 		bpf_link_cleanup(&link_primer);
1297 		goto out_put_cgroup;
1298 	}
1299 
1300 	return bpf_link_settle(&link_primer);
1301 
1302 out_put_cgroup:
1303 	cgroup_put(cgrp);
1304 	return err;
1305 }
1306 
1307 int cgroup_bpf_prog_query(const union bpf_attr *attr,
1308 			  union bpf_attr __user *uattr)
1309 {
1310 	struct cgroup *cgrp;
1311 	int ret;
1312 
1313 	cgrp = cgroup_get_from_fd(attr->query.target_fd);
1314 	if (IS_ERR(cgrp))
1315 		return PTR_ERR(cgrp);
1316 
1317 	ret = cgroup_bpf_query(cgrp, attr, uattr);
1318 
1319 	cgroup_put(cgrp);
1320 	return ret;
1321 }
1322 
1323 /**
1324  * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
1325  * @sk: The socket sending or receiving traffic
1326  * @skb: The skb that is being sent or received
1327  * @type: The type of program to be executed
1328  *
1329  * If no socket is passed, or the socket is not of type INET or INET6,
1330  * this function does nothing and returns 0.
1331  *
1332  * The program type passed in via @type must be suitable for network
1333  * filtering. No further check is performed to assert that.
1334  *
1335  * For egress packets, this function can return:
1336  *   NET_XMIT_SUCCESS    (0)	- continue with packet output
1337  *   NET_XMIT_DROP       (1)	- drop packet and notify TCP to call cwr
1338  *   NET_XMIT_CN         (2)	- continue with packet output and notify TCP
1339  *				  to call cwr
1340  *   -err			- drop packet
1341  *
1342  * For ingress packets, this function will return -EPERM if any
1343  * attached program was found and if it returned != 1 during execution.
1344  * Otherwise 0 is returned.
1345  */
1346 int __cgroup_bpf_run_filter_skb(struct sock *sk,
1347 				struct sk_buff *skb,
1348 				enum cgroup_bpf_attach_type atype)
1349 {
1350 	unsigned int offset = skb->data - skb_network_header(skb);
1351 	struct sock *save_sk;
1352 	void *saved_data_end;
1353 	struct cgroup *cgrp;
1354 	int ret;
1355 
1356 	if (!sk || !sk_fullsock(sk))
1357 		return 0;
1358 
1359 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1360 		return 0;
1361 
1362 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1363 	save_sk = skb->sk;
1364 	skb->sk = sk;
1365 	__skb_push(skb, offset);
1366 
1367 	/* compute pointers for the bpf prog */
1368 	bpf_compute_and_save_data_end(skb, &saved_data_end);
1369 
1370 	if (atype == CGROUP_INET_EGRESS) {
1371 		u32 flags = 0;
1372 		bool cn;
1373 
1374 		ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, skb,
1375 					    __bpf_prog_run_save_cb, 0, &flags);
1376 
1377 		/* Return values of CGROUP EGRESS BPF programs are:
1378 		 *   0: drop packet
1379 		 *   1: keep packet
1380 		 *   2: drop packet and cn
1381 		 *   3: keep packet and cn
1382 		 *
1383 		 * The returned value is then converted to one of the NET_XMIT
1384 		 * or an error code that is then interpreted as drop packet
1385 		 * (and no cn):
1386 		 *   0: NET_XMIT_SUCCESS  skb should be transmitted
1387 		 *   1: NET_XMIT_DROP     skb should be dropped and cn
1388 		 *   2: NET_XMIT_CN       skb should be transmitted and cn
1389 		 *   3: -err              skb should be dropped
1390 		 */
1391 
1392 		cn = flags & BPF_RET_SET_CN;
1393 		if (ret && !IS_ERR_VALUE((long)ret))
1394 			ret = -EFAULT;
1395 		if (!ret)
1396 			ret = (cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);
1397 		else
1398 			ret = (cn ? NET_XMIT_DROP : ret);
1399 	} else {
1400 		ret = bpf_prog_run_array_cg(&cgrp->bpf, atype,
1401 					    skb, __bpf_prog_run_save_cb, 0,
1402 					    NULL);
1403 		if (ret && !IS_ERR_VALUE((long)ret))
1404 			ret = -EFAULT;
1405 	}
1406 	bpf_restore_data_end(skb, saved_data_end);
1407 	__skb_pull(skb, offset);
1408 	skb->sk = save_sk;
1409 
1410 	return ret;
1411 }
1412 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
1413 
1414 /**
1415  * __cgroup_bpf_run_filter_sk() - Run a program on a sock
1416  * @sk: sock structure to manipulate
1417  * @type: The type of program to be executed
1418  *
1419  * socket is passed is expected to be of type INET or INET6.
1420  *
1421  * The program type passed in via @type must be suitable for sock
1422  * filtering. No further check is performed to assert that.
1423  *
1424  * This function will return %-EPERM if any if an attached program was found
1425  * and if it returned != 1 during execution. In all other cases, 0 is returned.
1426  */
1427 int __cgroup_bpf_run_filter_sk(struct sock *sk,
1428 			       enum cgroup_bpf_attach_type atype)
1429 {
1430 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1431 
1432 	return bpf_prog_run_array_cg(&cgrp->bpf, atype, sk, bpf_prog_run, 0,
1433 				     NULL);
1434 }
1435 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
1436 
1437 /**
1438  * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
1439  *                                       provided by user sockaddr
1440  * @sk: sock struct that will use sockaddr
1441  * @uaddr: sockaddr struct provided by user
1442  * @type: The type of program to be executed
1443  * @t_ctx: Pointer to attach type specific context
1444  * @flags: Pointer to u32 which contains higher bits of BPF program
1445  *         return value (OR'ed together).
1446  *
1447  * socket is expected to be of type INET or INET6.
1448  *
1449  * This function will return %-EPERM if an attached program is found and
1450  * returned value != 1 during execution. In all other cases, 0 is returned.
1451  */
1452 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
1453 				      struct sockaddr *uaddr,
1454 				      enum cgroup_bpf_attach_type atype,
1455 				      void *t_ctx,
1456 				      u32 *flags)
1457 {
1458 	struct bpf_sock_addr_kern ctx = {
1459 		.sk = sk,
1460 		.uaddr = uaddr,
1461 		.t_ctx = t_ctx,
1462 	};
1463 	struct sockaddr_storage unspec;
1464 	struct cgroup *cgrp;
1465 
1466 	/* Check socket family since not all sockets represent network
1467 	 * endpoint (e.g. AF_UNIX).
1468 	 */
1469 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1470 		return 0;
1471 
1472 	if (!ctx.uaddr) {
1473 		memset(&unspec, 0, sizeof(unspec));
1474 		ctx.uaddr = (struct sockaddr *)&unspec;
1475 	}
1476 
1477 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1478 	return bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run,
1479 				     0, flags);
1480 }
1481 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
1482 
1483 /**
1484  * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
1485  * @sk: socket to get cgroup from
1486  * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
1487  * sk with connection information (IP addresses, etc.) May not contain
1488  * cgroup info if it is a req sock.
1489  * @type: The type of program to be executed
1490  *
1491  * socket passed is expected to be of type INET or INET6.
1492  *
1493  * The program type passed in via @type must be suitable for sock_ops
1494  * filtering. No further check is performed to assert that.
1495  *
1496  * This function will return %-EPERM if any if an attached program was found
1497  * and if it returned != 1 during execution. In all other cases, 0 is returned.
1498  */
1499 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
1500 				     struct bpf_sock_ops_kern *sock_ops,
1501 				     enum cgroup_bpf_attach_type atype)
1502 {
1503 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1504 
1505 	return bpf_prog_run_array_cg(&cgrp->bpf, atype, sock_ops, bpf_prog_run,
1506 				     0, NULL);
1507 }
1508 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
1509 
1510 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
1511 				      short access, enum cgroup_bpf_attach_type atype)
1512 {
1513 	struct cgroup *cgrp;
1514 	struct bpf_cgroup_dev_ctx ctx = {
1515 		.access_type = (access << 16) | dev_type,
1516 		.major = major,
1517 		.minor = minor,
1518 	};
1519 	int ret;
1520 
1521 	rcu_read_lock();
1522 	cgrp = task_dfl_cgroup(current);
1523 	ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
1524 				    NULL);
1525 	rcu_read_unlock();
1526 
1527 	return ret;
1528 }
1529 
1530 BPF_CALL_0(bpf_get_retval)
1531 {
1532 	struct bpf_cg_run_ctx *ctx =
1533 		container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1534 
1535 	return ctx->retval;
1536 }
1537 
1538 const struct bpf_func_proto bpf_get_retval_proto = {
1539 	.func		= bpf_get_retval,
1540 	.gpl_only	= false,
1541 	.ret_type	= RET_INTEGER,
1542 };
1543 
1544 BPF_CALL_1(bpf_set_retval, int, retval)
1545 {
1546 	struct bpf_cg_run_ctx *ctx =
1547 		container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1548 
1549 	ctx->retval = retval;
1550 	return 0;
1551 }
1552 
1553 const struct bpf_func_proto bpf_set_retval_proto = {
1554 	.func		= bpf_set_retval,
1555 	.gpl_only	= false,
1556 	.ret_type	= RET_INTEGER,
1557 	.arg1_type	= ARG_ANYTHING,
1558 };
1559 
1560 static const struct bpf_func_proto *
1561 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1562 {
1563 	switch (func_id) {
1564 	case BPF_FUNC_get_current_uid_gid:
1565 		return &bpf_get_current_uid_gid_proto;
1566 	case BPF_FUNC_get_local_storage:
1567 		return &bpf_get_local_storage_proto;
1568 	case BPF_FUNC_get_current_cgroup_id:
1569 		return &bpf_get_current_cgroup_id_proto;
1570 	case BPF_FUNC_perf_event_output:
1571 		return &bpf_event_output_data_proto;
1572 	case BPF_FUNC_get_retval:
1573 		return &bpf_get_retval_proto;
1574 	case BPF_FUNC_set_retval:
1575 		return &bpf_set_retval_proto;
1576 	default:
1577 		return bpf_base_func_proto(func_id);
1578 	}
1579 }
1580 
1581 static const struct bpf_func_proto *
1582 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1583 {
1584 	return cgroup_base_func_proto(func_id, prog);
1585 }
1586 
1587 static bool cgroup_dev_is_valid_access(int off, int size,
1588 				       enum bpf_access_type type,
1589 				       const struct bpf_prog *prog,
1590 				       struct bpf_insn_access_aux *info)
1591 {
1592 	const int size_default = sizeof(__u32);
1593 
1594 	if (type == BPF_WRITE)
1595 		return false;
1596 
1597 	if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
1598 		return false;
1599 	/* The verifier guarantees that size > 0. */
1600 	if (off % size != 0)
1601 		return false;
1602 
1603 	switch (off) {
1604 	case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
1605 		bpf_ctx_record_field_size(info, size_default);
1606 		if (!bpf_ctx_narrow_access_ok(off, size, size_default))
1607 			return false;
1608 		break;
1609 	default:
1610 		if (size != size_default)
1611 			return false;
1612 	}
1613 
1614 	return true;
1615 }
1616 
1617 const struct bpf_prog_ops cg_dev_prog_ops = {
1618 };
1619 
1620 const struct bpf_verifier_ops cg_dev_verifier_ops = {
1621 	.get_func_proto		= cgroup_dev_func_proto,
1622 	.is_valid_access	= cgroup_dev_is_valid_access,
1623 };
1624 
1625 /**
1626  * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
1627  *
1628  * @head: sysctl table header
1629  * @table: sysctl table
1630  * @write: sysctl is being read (= 0) or written (= 1)
1631  * @buf: pointer to buffer (in and out)
1632  * @pcount: value-result argument: value is size of buffer pointed to by @buf,
1633  *	result is size of @new_buf if program set new value, initial value
1634  *	otherwise
1635  * @ppos: value-result argument: value is position at which read from or write
1636  *	to sysctl is happening, result is new position if program overrode it,
1637  *	initial value otherwise
1638  * @type: type of program to be executed
1639  *
1640  * Program is run when sysctl is being accessed, either read or written, and
1641  * can allow or deny such access.
1642  *
1643  * This function will return %-EPERM if an attached program is found and
1644  * returned value != 1 during execution. In all other cases 0 is returned.
1645  */
1646 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
1647 				   struct ctl_table *table, int write,
1648 				   char **buf, size_t *pcount, loff_t *ppos,
1649 				   enum cgroup_bpf_attach_type atype)
1650 {
1651 	struct bpf_sysctl_kern ctx = {
1652 		.head = head,
1653 		.table = table,
1654 		.write = write,
1655 		.ppos = ppos,
1656 		.cur_val = NULL,
1657 		.cur_len = PAGE_SIZE,
1658 		.new_val = NULL,
1659 		.new_len = 0,
1660 		.new_updated = 0,
1661 	};
1662 	struct cgroup *cgrp;
1663 	loff_t pos = 0;
1664 	int ret;
1665 
1666 	ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
1667 	if (!ctx.cur_val ||
1668 	    table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) {
1669 		/* Let BPF program decide how to proceed. */
1670 		ctx.cur_len = 0;
1671 	}
1672 
1673 	if (write && *buf && *pcount) {
1674 		/* BPF program should be able to override new value with a
1675 		 * buffer bigger than provided by user.
1676 		 */
1677 		ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
1678 		ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
1679 		if (ctx.new_val) {
1680 			memcpy(ctx.new_val, *buf, ctx.new_len);
1681 		} else {
1682 			/* Let BPF program decide how to proceed. */
1683 			ctx.new_len = 0;
1684 		}
1685 	}
1686 
1687 	rcu_read_lock();
1688 	cgrp = task_dfl_cgroup(current);
1689 	ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
1690 				    NULL);
1691 	rcu_read_unlock();
1692 
1693 	kfree(ctx.cur_val);
1694 
1695 	if (ret == 1 && ctx.new_updated) {
1696 		kfree(*buf);
1697 		*buf = ctx.new_val;
1698 		*pcount = ctx.new_len;
1699 	} else {
1700 		kfree(ctx.new_val);
1701 	}
1702 
1703 	return ret;
1704 }
1705 
1706 #ifdef CONFIG_NET
1707 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen,
1708 			     struct bpf_sockopt_buf *buf)
1709 {
1710 	if (unlikely(max_optlen < 0))
1711 		return -EINVAL;
1712 
1713 	if (unlikely(max_optlen > PAGE_SIZE)) {
1714 		/* We don't expose optvals that are greater than PAGE_SIZE
1715 		 * to the BPF program.
1716 		 */
1717 		max_optlen = PAGE_SIZE;
1718 	}
1719 
1720 	if (max_optlen <= sizeof(buf->data)) {
1721 		/* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE
1722 		 * bytes avoid the cost of kzalloc.
1723 		 */
1724 		ctx->optval = buf->data;
1725 		ctx->optval_end = ctx->optval + max_optlen;
1726 		return max_optlen;
1727 	}
1728 
1729 	ctx->optval = kzalloc(max_optlen, GFP_USER);
1730 	if (!ctx->optval)
1731 		return -ENOMEM;
1732 
1733 	ctx->optval_end = ctx->optval + max_optlen;
1734 
1735 	return max_optlen;
1736 }
1737 
1738 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx,
1739 			     struct bpf_sockopt_buf *buf)
1740 {
1741 	if (ctx->optval == buf->data)
1742 		return;
1743 	kfree(ctx->optval);
1744 }
1745 
1746 static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx,
1747 				  struct bpf_sockopt_buf *buf)
1748 {
1749 	return ctx->optval != buf->data;
1750 }
1751 
1752 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
1753 				       int *optname, char __user *optval,
1754 				       int *optlen, char **kernel_optval)
1755 {
1756 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1757 	struct bpf_sockopt_buf buf = {};
1758 	struct bpf_sockopt_kern ctx = {
1759 		.sk = sk,
1760 		.level = *level,
1761 		.optname = *optname,
1762 	};
1763 	int ret, max_optlen;
1764 
1765 	/* Allocate a bit more than the initial user buffer for
1766 	 * BPF program. The canonical use case is overriding
1767 	 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
1768 	 */
1769 	max_optlen = max_t(int, 16, *optlen);
1770 	max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
1771 	if (max_optlen < 0)
1772 		return max_optlen;
1773 
1774 	ctx.optlen = *optlen;
1775 
1776 	if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
1777 		ret = -EFAULT;
1778 		goto out;
1779 	}
1780 
1781 	lock_sock(sk);
1782 	ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_SETSOCKOPT,
1783 				    &ctx, bpf_prog_run, 0, NULL);
1784 	release_sock(sk);
1785 
1786 	if (ret)
1787 		goto out;
1788 
1789 	if (ctx.optlen == -1) {
1790 		/* optlen set to -1, bypass kernel */
1791 		ret = 1;
1792 	} else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
1793 		/* optlen is out of bounds */
1794 		ret = -EFAULT;
1795 	} else {
1796 		/* optlen within bounds, run kernel handler */
1797 		ret = 0;
1798 
1799 		/* export any potential modifications */
1800 		*level = ctx.level;
1801 		*optname = ctx.optname;
1802 
1803 		/* optlen == 0 from BPF indicates that we should
1804 		 * use original userspace data.
1805 		 */
1806 		if (ctx.optlen != 0) {
1807 			*optlen = ctx.optlen;
1808 			/* We've used bpf_sockopt_kern->buf as an intermediary
1809 			 * storage, but the BPF program indicates that we need
1810 			 * to pass this data to the kernel setsockopt handler.
1811 			 * No way to export on-stack buf, have to allocate a
1812 			 * new buffer.
1813 			 */
1814 			if (!sockopt_buf_allocated(&ctx, &buf)) {
1815 				void *p = kmalloc(ctx.optlen, GFP_USER);
1816 
1817 				if (!p) {
1818 					ret = -ENOMEM;
1819 					goto out;
1820 				}
1821 				memcpy(p, ctx.optval, ctx.optlen);
1822 				*kernel_optval = p;
1823 			} else {
1824 				*kernel_optval = ctx.optval;
1825 			}
1826 			/* export and don't free sockopt buf */
1827 			return 0;
1828 		}
1829 	}
1830 
1831 out:
1832 	sockopt_free_buf(&ctx, &buf);
1833 	return ret;
1834 }
1835 
1836 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
1837 				       int optname, char __user *optval,
1838 				       int __user *optlen, int max_optlen,
1839 				       int retval)
1840 {
1841 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1842 	struct bpf_sockopt_buf buf = {};
1843 	struct bpf_sockopt_kern ctx = {
1844 		.sk = sk,
1845 		.level = level,
1846 		.optname = optname,
1847 		.current_task = current,
1848 	};
1849 	int ret;
1850 
1851 	ctx.optlen = max_optlen;
1852 	max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
1853 	if (max_optlen < 0)
1854 		return max_optlen;
1855 
1856 	if (!retval) {
1857 		/* If kernel getsockopt finished successfully,
1858 		 * copy whatever was returned to the user back
1859 		 * into our temporary buffer. Set optlen to the
1860 		 * one that kernel returned as well to let
1861 		 * BPF programs inspect the value.
1862 		 */
1863 
1864 		if (get_user(ctx.optlen, optlen)) {
1865 			ret = -EFAULT;
1866 			goto out;
1867 		}
1868 
1869 		if (ctx.optlen < 0) {
1870 			ret = -EFAULT;
1871 			goto out;
1872 		}
1873 
1874 		if (copy_from_user(ctx.optval, optval,
1875 				   min(ctx.optlen, max_optlen)) != 0) {
1876 			ret = -EFAULT;
1877 			goto out;
1878 		}
1879 	}
1880 
1881 	lock_sock(sk);
1882 	ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
1883 				    &ctx, bpf_prog_run, retval, NULL);
1884 	release_sock(sk);
1885 
1886 	if (ret < 0)
1887 		goto out;
1888 
1889 	if (ctx.optlen > max_optlen || ctx.optlen < 0) {
1890 		ret = -EFAULT;
1891 		goto out;
1892 	}
1893 
1894 	if (ctx.optlen != 0) {
1895 		if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
1896 		    put_user(ctx.optlen, optlen)) {
1897 			ret = -EFAULT;
1898 			goto out;
1899 		}
1900 	}
1901 
1902 out:
1903 	sockopt_free_buf(&ctx, &buf);
1904 	return ret;
1905 }
1906 
1907 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
1908 					    int optname, void *optval,
1909 					    int *optlen, int retval)
1910 {
1911 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1912 	struct bpf_sockopt_kern ctx = {
1913 		.sk = sk,
1914 		.level = level,
1915 		.optname = optname,
1916 		.optlen = *optlen,
1917 		.optval = optval,
1918 		.optval_end = optval + *optlen,
1919 		.current_task = current,
1920 	};
1921 	int ret;
1922 
1923 	/* Note that __cgroup_bpf_run_filter_getsockopt doesn't copy
1924 	 * user data back into BPF buffer when reval != 0. This is
1925 	 * done as an optimization to avoid extra copy, assuming
1926 	 * kernel won't populate the data in case of an error.
1927 	 * Here we always pass the data and memset() should
1928 	 * be called if that data shouldn't be "exported".
1929 	 */
1930 
1931 	ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
1932 				    &ctx, bpf_prog_run, retval, NULL);
1933 	if (ret < 0)
1934 		return ret;
1935 
1936 	if (ctx.optlen > *optlen)
1937 		return -EFAULT;
1938 
1939 	/* BPF programs can shrink the buffer, export the modifications.
1940 	 */
1941 	if (ctx.optlen != 0)
1942 		*optlen = ctx.optlen;
1943 
1944 	return ret;
1945 }
1946 #endif
1947 
1948 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
1949 			      size_t *lenp)
1950 {
1951 	ssize_t tmp_ret = 0, ret;
1952 
1953 	if (dir->header.parent) {
1954 		tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
1955 		if (tmp_ret < 0)
1956 			return tmp_ret;
1957 	}
1958 
1959 	ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
1960 	if (ret < 0)
1961 		return ret;
1962 	*bufp += ret;
1963 	*lenp -= ret;
1964 	ret += tmp_ret;
1965 
1966 	/* Avoid leading slash. */
1967 	if (!ret)
1968 		return ret;
1969 
1970 	tmp_ret = strscpy(*bufp, "/", *lenp);
1971 	if (tmp_ret < 0)
1972 		return tmp_ret;
1973 	*bufp += tmp_ret;
1974 	*lenp -= tmp_ret;
1975 
1976 	return ret + tmp_ret;
1977 }
1978 
1979 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
1980 	   size_t, buf_len, u64, flags)
1981 {
1982 	ssize_t tmp_ret = 0, ret;
1983 
1984 	if (!buf)
1985 		return -EINVAL;
1986 
1987 	if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
1988 		if (!ctx->head)
1989 			return -EINVAL;
1990 		tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
1991 		if (tmp_ret < 0)
1992 			return tmp_ret;
1993 	}
1994 
1995 	ret = strscpy(buf, ctx->table->procname, buf_len);
1996 
1997 	return ret < 0 ? ret : tmp_ret + ret;
1998 }
1999 
2000 static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
2001 	.func		= bpf_sysctl_get_name,
2002 	.gpl_only	= false,
2003 	.ret_type	= RET_INTEGER,
2004 	.arg1_type	= ARG_PTR_TO_CTX,
2005 	.arg2_type	= ARG_PTR_TO_MEM,
2006 	.arg3_type	= ARG_CONST_SIZE,
2007 	.arg4_type	= ARG_ANYTHING,
2008 };
2009 
2010 static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
2011 			     size_t src_len)
2012 {
2013 	if (!dst)
2014 		return -EINVAL;
2015 
2016 	if (!dst_len)
2017 		return -E2BIG;
2018 
2019 	if (!src || !src_len) {
2020 		memset(dst, 0, dst_len);
2021 		return -EINVAL;
2022 	}
2023 
2024 	memcpy(dst, src, min(dst_len, src_len));
2025 
2026 	if (dst_len > src_len) {
2027 		memset(dst + src_len, '\0', dst_len - src_len);
2028 		return src_len;
2029 	}
2030 
2031 	dst[dst_len - 1] = '\0';
2032 
2033 	return -E2BIG;
2034 }
2035 
2036 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
2037 	   char *, buf, size_t, buf_len)
2038 {
2039 	return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
2040 }
2041 
2042 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
2043 	.func		= bpf_sysctl_get_current_value,
2044 	.gpl_only	= false,
2045 	.ret_type	= RET_INTEGER,
2046 	.arg1_type	= ARG_PTR_TO_CTX,
2047 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
2048 	.arg3_type	= ARG_CONST_SIZE,
2049 };
2050 
2051 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
2052 	   size_t, buf_len)
2053 {
2054 	if (!ctx->write) {
2055 		if (buf && buf_len)
2056 			memset(buf, '\0', buf_len);
2057 		return -EINVAL;
2058 	}
2059 	return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
2060 }
2061 
2062 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
2063 	.func		= bpf_sysctl_get_new_value,
2064 	.gpl_only	= false,
2065 	.ret_type	= RET_INTEGER,
2066 	.arg1_type	= ARG_PTR_TO_CTX,
2067 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
2068 	.arg3_type	= ARG_CONST_SIZE,
2069 };
2070 
2071 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
2072 	   const char *, buf, size_t, buf_len)
2073 {
2074 	if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
2075 		return -EINVAL;
2076 
2077 	if (buf_len > PAGE_SIZE - 1)
2078 		return -E2BIG;
2079 
2080 	memcpy(ctx->new_val, buf, buf_len);
2081 	ctx->new_len = buf_len;
2082 	ctx->new_updated = 1;
2083 
2084 	return 0;
2085 }
2086 
2087 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
2088 	.func		= bpf_sysctl_set_new_value,
2089 	.gpl_only	= false,
2090 	.ret_type	= RET_INTEGER,
2091 	.arg1_type	= ARG_PTR_TO_CTX,
2092 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
2093 	.arg3_type	= ARG_CONST_SIZE,
2094 };
2095 
2096 static const struct bpf_func_proto *
2097 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2098 {
2099 	switch (func_id) {
2100 	case BPF_FUNC_strtol:
2101 		return &bpf_strtol_proto;
2102 	case BPF_FUNC_strtoul:
2103 		return &bpf_strtoul_proto;
2104 	case BPF_FUNC_sysctl_get_name:
2105 		return &bpf_sysctl_get_name_proto;
2106 	case BPF_FUNC_sysctl_get_current_value:
2107 		return &bpf_sysctl_get_current_value_proto;
2108 	case BPF_FUNC_sysctl_get_new_value:
2109 		return &bpf_sysctl_get_new_value_proto;
2110 	case BPF_FUNC_sysctl_set_new_value:
2111 		return &bpf_sysctl_set_new_value_proto;
2112 	case BPF_FUNC_ktime_get_coarse_ns:
2113 		return &bpf_ktime_get_coarse_ns_proto;
2114 	default:
2115 		return cgroup_base_func_proto(func_id, prog);
2116 	}
2117 }
2118 
2119 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
2120 				   const struct bpf_prog *prog,
2121 				   struct bpf_insn_access_aux *info)
2122 {
2123 	const int size_default = sizeof(__u32);
2124 
2125 	if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
2126 		return false;
2127 
2128 	switch (off) {
2129 	case bpf_ctx_range(struct bpf_sysctl, write):
2130 		if (type != BPF_READ)
2131 			return false;
2132 		bpf_ctx_record_field_size(info, size_default);
2133 		return bpf_ctx_narrow_access_ok(off, size, size_default);
2134 	case bpf_ctx_range(struct bpf_sysctl, file_pos):
2135 		if (type == BPF_READ) {
2136 			bpf_ctx_record_field_size(info, size_default);
2137 			return bpf_ctx_narrow_access_ok(off, size, size_default);
2138 		} else {
2139 			return size == size_default;
2140 		}
2141 	default:
2142 		return false;
2143 	}
2144 }
2145 
2146 static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
2147 				     const struct bpf_insn *si,
2148 				     struct bpf_insn *insn_buf,
2149 				     struct bpf_prog *prog, u32 *target_size)
2150 {
2151 	struct bpf_insn *insn = insn_buf;
2152 	u32 read_size;
2153 
2154 	switch (si->off) {
2155 	case offsetof(struct bpf_sysctl, write):
2156 		*insn++ = BPF_LDX_MEM(
2157 			BPF_SIZE(si->code), si->dst_reg, si->src_reg,
2158 			bpf_target_off(struct bpf_sysctl_kern, write,
2159 				       sizeof_field(struct bpf_sysctl_kern,
2160 						    write),
2161 				       target_size));
2162 		break;
2163 	case offsetof(struct bpf_sysctl, file_pos):
2164 		/* ppos is a pointer so it should be accessed via indirect
2165 		 * loads and stores. Also for stores additional temporary
2166 		 * register is used since neither src_reg nor dst_reg can be
2167 		 * overridden.
2168 		 */
2169 		if (type == BPF_WRITE) {
2170 			int treg = BPF_REG_9;
2171 
2172 			if (si->src_reg == treg || si->dst_reg == treg)
2173 				--treg;
2174 			if (si->src_reg == treg || si->dst_reg == treg)
2175 				--treg;
2176 			*insn++ = BPF_STX_MEM(
2177 				BPF_DW, si->dst_reg, treg,
2178 				offsetof(struct bpf_sysctl_kern, tmp_reg));
2179 			*insn++ = BPF_LDX_MEM(
2180 				BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
2181 				treg, si->dst_reg,
2182 				offsetof(struct bpf_sysctl_kern, ppos));
2183 			*insn++ = BPF_STX_MEM(
2184 				BPF_SIZEOF(u32), treg, si->src_reg,
2185 				bpf_ctx_narrow_access_offset(
2186 					0, sizeof(u32), sizeof(loff_t)));
2187 			*insn++ = BPF_LDX_MEM(
2188 				BPF_DW, treg, si->dst_reg,
2189 				offsetof(struct bpf_sysctl_kern, tmp_reg));
2190 		} else {
2191 			*insn++ = BPF_LDX_MEM(
2192 				BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
2193 				si->dst_reg, si->src_reg,
2194 				offsetof(struct bpf_sysctl_kern, ppos));
2195 			read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
2196 			*insn++ = BPF_LDX_MEM(
2197 				BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
2198 				bpf_ctx_narrow_access_offset(
2199 					0, read_size, sizeof(loff_t)));
2200 		}
2201 		*target_size = sizeof(u32);
2202 		break;
2203 	}
2204 
2205 	return insn - insn_buf;
2206 }
2207 
2208 const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
2209 	.get_func_proto		= sysctl_func_proto,
2210 	.is_valid_access	= sysctl_is_valid_access,
2211 	.convert_ctx_access	= sysctl_convert_ctx_access,
2212 };
2213 
2214 const struct bpf_prog_ops cg_sysctl_prog_ops = {
2215 };
2216 
2217 #ifdef CONFIG_NET
2218 BPF_CALL_1(bpf_get_netns_cookie_sockopt, struct bpf_sockopt_kern *, ctx)
2219 {
2220 	const struct net *net = ctx ? sock_net(ctx->sk) : &init_net;
2221 
2222 	return net->net_cookie;
2223 }
2224 
2225 static const struct bpf_func_proto bpf_get_netns_cookie_sockopt_proto = {
2226 	.func		= bpf_get_netns_cookie_sockopt,
2227 	.gpl_only	= false,
2228 	.ret_type	= RET_INTEGER,
2229 	.arg1_type	= ARG_PTR_TO_CTX_OR_NULL,
2230 };
2231 #endif
2232 
2233 static const struct bpf_func_proto *
2234 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2235 {
2236 	switch (func_id) {
2237 #ifdef CONFIG_NET
2238 	case BPF_FUNC_get_netns_cookie:
2239 		return &bpf_get_netns_cookie_sockopt_proto;
2240 	case BPF_FUNC_sk_storage_get:
2241 		return &bpf_sk_storage_get_proto;
2242 	case BPF_FUNC_sk_storage_delete:
2243 		return &bpf_sk_storage_delete_proto;
2244 	case BPF_FUNC_setsockopt:
2245 		if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
2246 			return &bpf_sk_setsockopt_proto;
2247 		return NULL;
2248 	case BPF_FUNC_getsockopt:
2249 		if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
2250 			return &bpf_sk_getsockopt_proto;
2251 		return NULL;
2252 #endif
2253 #ifdef CONFIG_INET
2254 	case BPF_FUNC_tcp_sock:
2255 		return &bpf_tcp_sock_proto;
2256 #endif
2257 	default:
2258 		return cgroup_base_func_proto(func_id, prog);
2259 	}
2260 }
2261 
2262 static bool cg_sockopt_is_valid_access(int off, int size,
2263 				       enum bpf_access_type type,
2264 				       const struct bpf_prog *prog,
2265 				       struct bpf_insn_access_aux *info)
2266 {
2267 	const int size_default = sizeof(__u32);
2268 
2269 	if (off < 0 || off >= sizeof(struct bpf_sockopt))
2270 		return false;
2271 
2272 	if (off % size != 0)
2273 		return false;
2274 
2275 	if (type == BPF_WRITE) {
2276 		switch (off) {
2277 		case offsetof(struct bpf_sockopt, retval):
2278 			if (size != size_default)
2279 				return false;
2280 			return prog->expected_attach_type ==
2281 				BPF_CGROUP_GETSOCKOPT;
2282 		case offsetof(struct bpf_sockopt, optname):
2283 			fallthrough;
2284 		case offsetof(struct bpf_sockopt, level):
2285 			if (size != size_default)
2286 				return false;
2287 			return prog->expected_attach_type ==
2288 				BPF_CGROUP_SETSOCKOPT;
2289 		case offsetof(struct bpf_sockopt, optlen):
2290 			return size == size_default;
2291 		default:
2292 			return false;
2293 		}
2294 	}
2295 
2296 	switch (off) {
2297 	case offsetof(struct bpf_sockopt, sk):
2298 		if (size != sizeof(__u64))
2299 			return false;
2300 		info->reg_type = PTR_TO_SOCKET;
2301 		break;
2302 	case offsetof(struct bpf_sockopt, optval):
2303 		if (size != sizeof(__u64))
2304 			return false;
2305 		info->reg_type = PTR_TO_PACKET;
2306 		break;
2307 	case offsetof(struct bpf_sockopt, optval_end):
2308 		if (size != sizeof(__u64))
2309 			return false;
2310 		info->reg_type = PTR_TO_PACKET_END;
2311 		break;
2312 	case offsetof(struct bpf_sockopt, retval):
2313 		if (size != size_default)
2314 			return false;
2315 		return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
2316 	default:
2317 		if (size != size_default)
2318 			return false;
2319 		break;
2320 	}
2321 	return true;
2322 }
2323 
2324 #define CG_SOCKOPT_ACCESS_FIELD(T, F)					\
2325 	T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F),			\
2326 	  si->dst_reg, si->src_reg,					\
2327 	  offsetof(struct bpf_sockopt_kern, F))
2328 
2329 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
2330 					 const struct bpf_insn *si,
2331 					 struct bpf_insn *insn_buf,
2332 					 struct bpf_prog *prog,
2333 					 u32 *target_size)
2334 {
2335 	struct bpf_insn *insn = insn_buf;
2336 
2337 	switch (si->off) {
2338 	case offsetof(struct bpf_sockopt, sk):
2339 		*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk);
2340 		break;
2341 	case offsetof(struct bpf_sockopt, level):
2342 		if (type == BPF_WRITE)
2343 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level);
2344 		else
2345 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level);
2346 		break;
2347 	case offsetof(struct bpf_sockopt, optname):
2348 		if (type == BPF_WRITE)
2349 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname);
2350 		else
2351 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname);
2352 		break;
2353 	case offsetof(struct bpf_sockopt, optlen):
2354 		if (type == BPF_WRITE)
2355 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen);
2356 		else
2357 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen);
2358 		break;
2359 	case offsetof(struct bpf_sockopt, retval):
2360 		BUILD_BUG_ON(offsetof(struct bpf_cg_run_ctx, run_ctx) != 0);
2361 
2362 		if (type == BPF_WRITE) {
2363 			int treg = BPF_REG_9;
2364 
2365 			if (si->src_reg == treg || si->dst_reg == treg)
2366 				--treg;
2367 			if (si->src_reg == treg || si->dst_reg == treg)
2368 				--treg;
2369 			*insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, treg,
2370 					      offsetof(struct bpf_sockopt_kern, tmp_reg));
2371 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task),
2372 					      treg, si->dst_reg,
2373 					      offsetof(struct bpf_sockopt_kern, current_task));
2374 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
2375 					      treg, treg,
2376 					      offsetof(struct task_struct, bpf_ctx));
2377 			*insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
2378 					      treg, si->src_reg,
2379 					      offsetof(struct bpf_cg_run_ctx, retval));
2380 			*insn++ = BPF_LDX_MEM(BPF_DW, treg, si->dst_reg,
2381 					      offsetof(struct bpf_sockopt_kern, tmp_reg));
2382 		} else {
2383 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task),
2384 					      si->dst_reg, si->src_reg,
2385 					      offsetof(struct bpf_sockopt_kern, current_task));
2386 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
2387 					      si->dst_reg, si->dst_reg,
2388 					      offsetof(struct task_struct, bpf_ctx));
2389 			*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
2390 					      si->dst_reg, si->dst_reg,
2391 					      offsetof(struct bpf_cg_run_ctx, retval));
2392 		}
2393 		break;
2394 	case offsetof(struct bpf_sockopt, optval):
2395 		*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval);
2396 		break;
2397 	case offsetof(struct bpf_sockopt, optval_end):
2398 		*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end);
2399 		break;
2400 	}
2401 
2402 	return insn - insn_buf;
2403 }
2404 
2405 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
2406 				   bool direct_write,
2407 				   const struct bpf_prog *prog)
2408 {
2409 	/* Nothing to do for sockopt argument. The data is kzalloc'ated.
2410 	 */
2411 	return 0;
2412 }
2413 
2414 const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
2415 	.get_func_proto		= cg_sockopt_func_proto,
2416 	.is_valid_access	= cg_sockopt_is_valid_access,
2417 	.convert_ctx_access	= cg_sockopt_convert_ctx_access,
2418 	.gen_prologue		= cg_sockopt_get_prologue,
2419 };
2420 
2421 const struct bpf_prog_ops cg_sockopt_prog_ops = {
2422 };
2423