xref: /openbmc/linux/kernel/bpf/cgroup.c (revision af6eea57)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Functions to manage eBPF programs attached to cgroups
4  *
5  * Copyright (c) 2016 Daniel Mack
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/atomic.h>
10 #include <linux/cgroup.h>
11 #include <linux/filter.h>
12 #include <linux/slab.h>
13 #include <linux/sysctl.h>
14 #include <linux/string.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
17 #include <net/sock.h>
18 #include <net/bpf_sk_storage.h>
19 
20 #include "../cgroup/cgroup-internal.h"
21 
22 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
23 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
24 
25 void cgroup_bpf_offline(struct cgroup *cgrp)
26 {
27 	cgroup_get(cgrp);
28 	percpu_ref_kill(&cgrp->bpf.refcnt);
29 }
30 
31 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
32 {
33 	enum bpf_cgroup_storage_type stype;
34 
35 	for_each_cgroup_storage_type(stype)
36 		bpf_cgroup_storage_free(storages[stype]);
37 }
38 
39 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
40 				     struct bpf_prog *prog)
41 {
42 	enum bpf_cgroup_storage_type stype;
43 
44 	for_each_cgroup_storage_type(stype) {
45 		storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
46 		if (IS_ERR(storages[stype])) {
47 			storages[stype] = NULL;
48 			bpf_cgroup_storages_free(storages);
49 			return -ENOMEM;
50 		}
51 	}
52 
53 	return 0;
54 }
55 
56 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
57 				       struct bpf_cgroup_storage *src[])
58 {
59 	enum bpf_cgroup_storage_type stype;
60 
61 	for_each_cgroup_storage_type(stype)
62 		dst[stype] = src[stype];
63 }
64 
65 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
66 				     struct cgroup* cgrp,
67 				     enum bpf_attach_type attach_type)
68 {
69 	enum bpf_cgroup_storage_type stype;
70 
71 	for_each_cgroup_storage_type(stype)
72 		bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
73 }
74 
75 static void bpf_cgroup_storages_unlink(struct bpf_cgroup_storage *storages[])
76 {
77 	enum bpf_cgroup_storage_type stype;
78 
79 	for_each_cgroup_storage_type(stype)
80 		bpf_cgroup_storage_unlink(storages[stype]);
81 }
82 
83 /* Called when bpf_cgroup_link is auto-detached from dying cgroup.
84  * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
85  * doesn't free link memory, which will eventually be done by bpf_link's
86  * release() callback, when its last FD is closed.
87  */
88 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
89 {
90 	cgroup_put(link->cgroup);
91 	link->cgroup = NULL;
92 }
93 
94 /**
95  * cgroup_bpf_release() - put references of all bpf programs and
96  *                        release all cgroup bpf data
97  * @work: work structure embedded into the cgroup to modify
98  */
99 static void cgroup_bpf_release(struct work_struct *work)
100 {
101 	struct cgroup *p, *cgrp = container_of(work, struct cgroup,
102 					       bpf.release_work);
103 	struct bpf_prog_array *old_array;
104 	unsigned int type;
105 
106 	mutex_lock(&cgroup_mutex);
107 
108 	for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
109 		struct list_head *progs = &cgrp->bpf.progs[type];
110 		struct bpf_prog_list *pl, *tmp;
111 
112 		list_for_each_entry_safe(pl, tmp, progs, node) {
113 			list_del(&pl->node);
114 			if (pl->prog)
115 				bpf_prog_put(pl->prog);
116 			if (pl->link)
117 				bpf_cgroup_link_auto_detach(pl->link);
118 			bpf_cgroup_storages_unlink(pl->storage);
119 			bpf_cgroup_storages_free(pl->storage);
120 			kfree(pl);
121 			static_branch_dec(&cgroup_bpf_enabled_key);
122 		}
123 		old_array = rcu_dereference_protected(
124 				cgrp->bpf.effective[type],
125 				lockdep_is_held(&cgroup_mutex));
126 		bpf_prog_array_free(old_array);
127 	}
128 
129 	mutex_unlock(&cgroup_mutex);
130 
131 	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
132 		cgroup_bpf_put(p);
133 
134 	percpu_ref_exit(&cgrp->bpf.refcnt);
135 	cgroup_put(cgrp);
136 }
137 
138 /**
139  * cgroup_bpf_release_fn() - callback used to schedule releasing
140  *                           of bpf cgroup data
141  * @ref: percpu ref counter structure
142  */
143 static void cgroup_bpf_release_fn(struct percpu_ref *ref)
144 {
145 	struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
146 
147 	INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
148 	queue_work(system_wq, &cgrp->bpf.release_work);
149 }
150 
151 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
152  * link or direct prog.
153  */
154 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
155 {
156 	if (pl->prog)
157 		return pl->prog;
158 	if (pl->link)
159 		return pl->link->link.prog;
160 	return NULL;
161 }
162 
163 /* count number of elements in the list.
164  * it's slow but the list cannot be long
165  */
166 static u32 prog_list_length(struct list_head *head)
167 {
168 	struct bpf_prog_list *pl;
169 	u32 cnt = 0;
170 
171 	list_for_each_entry(pl, head, node) {
172 		if (!prog_list_prog(pl))
173 			continue;
174 		cnt++;
175 	}
176 	return cnt;
177 }
178 
179 /* if parent has non-overridable prog attached,
180  * disallow attaching new programs to the descendent cgroup.
181  * if parent has overridable or multi-prog, allow attaching
182  */
183 static bool hierarchy_allows_attach(struct cgroup *cgrp,
184 				    enum bpf_attach_type type)
185 {
186 	struct cgroup *p;
187 
188 	p = cgroup_parent(cgrp);
189 	if (!p)
190 		return true;
191 	do {
192 		u32 flags = p->bpf.flags[type];
193 		u32 cnt;
194 
195 		if (flags & BPF_F_ALLOW_MULTI)
196 			return true;
197 		cnt = prog_list_length(&p->bpf.progs[type]);
198 		WARN_ON_ONCE(cnt > 1);
199 		if (cnt == 1)
200 			return !!(flags & BPF_F_ALLOW_OVERRIDE);
201 		p = cgroup_parent(p);
202 	} while (p);
203 	return true;
204 }
205 
206 /* compute a chain of effective programs for a given cgroup:
207  * start from the list of programs in this cgroup and add
208  * all parent programs.
209  * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
210  * to programs in this cgroup
211  */
212 static int compute_effective_progs(struct cgroup *cgrp,
213 				   enum bpf_attach_type type,
214 				   struct bpf_prog_array **array)
215 {
216 	struct bpf_prog_array_item *item;
217 	struct bpf_prog_array *progs;
218 	struct bpf_prog_list *pl;
219 	struct cgroup *p = cgrp;
220 	int cnt = 0;
221 
222 	/* count number of effective programs by walking parents */
223 	do {
224 		if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
225 			cnt += prog_list_length(&p->bpf.progs[type]);
226 		p = cgroup_parent(p);
227 	} while (p);
228 
229 	progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
230 	if (!progs)
231 		return -ENOMEM;
232 
233 	/* populate the array with effective progs */
234 	cnt = 0;
235 	p = cgrp;
236 	do {
237 		if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
238 			continue;
239 
240 		list_for_each_entry(pl, &p->bpf.progs[type], node) {
241 			if (!prog_list_prog(pl))
242 				continue;
243 
244 			item = &progs->items[cnt];
245 			item->prog = prog_list_prog(pl);
246 			bpf_cgroup_storages_assign(item->cgroup_storage,
247 						   pl->storage);
248 			cnt++;
249 		}
250 	} while ((p = cgroup_parent(p)));
251 
252 	*array = progs;
253 	return 0;
254 }
255 
256 static void activate_effective_progs(struct cgroup *cgrp,
257 				     enum bpf_attach_type type,
258 				     struct bpf_prog_array *old_array)
259 {
260 	old_array = rcu_replace_pointer(cgrp->bpf.effective[type], old_array,
261 					lockdep_is_held(&cgroup_mutex));
262 	/* free prog array after grace period, since __cgroup_bpf_run_*()
263 	 * might be still walking the array
264 	 */
265 	bpf_prog_array_free(old_array);
266 }
267 
268 /**
269  * cgroup_bpf_inherit() - inherit effective programs from parent
270  * @cgrp: the cgroup to modify
271  */
272 int cgroup_bpf_inherit(struct cgroup *cgrp)
273 {
274 /* has to use marco instead of const int, since compiler thinks
275  * that array below is variable length
276  */
277 #define	NR ARRAY_SIZE(cgrp->bpf.effective)
278 	struct bpf_prog_array *arrays[NR] = {};
279 	struct cgroup *p;
280 	int ret, i;
281 
282 	ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
283 			      GFP_KERNEL);
284 	if (ret)
285 		return ret;
286 
287 	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
288 		cgroup_bpf_get(p);
289 
290 	for (i = 0; i < NR; i++)
291 		INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
292 
293 	for (i = 0; i < NR; i++)
294 		if (compute_effective_progs(cgrp, i, &arrays[i]))
295 			goto cleanup;
296 
297 	for (i = 0; i < NR; i++)
298 		activate_effective_progs(cgrp, i, arrays[i]);
299 
300 	return 0;
301 cleanup:
302 	for (i = 0; i < NR; i++)
303 		bpf_prog_array_free(arrays[i]);
304 
305 	percpu_ref_exit(&cgrp->bpf.refcnt);
306 
307 	return -ENOMEM;
308 }
309 
310 static int update_effective_progs(struct cgroup *cgrp,
311 				  enum bpf_attach_type type)
312 {
313 	struct cgroup_subsys_state *css;
314 	int err;
315 
316 	/* allocate and recompute effective prog arrays */
317 	css_for_each_descendant_pre(css, &cgrp->self) {
318 		struct cgroup *desc = container_of(css, struct cgroup, self);
319 
320 		if (percpu_ref_is_zero(&desc->bpf.refcnt))
321 			continue;
322 
323 		err = compute_effective_progs(desc, type, &desc->bpf.inactive);
324 		if (err)
325 			goto cleanup;
326 	}
327 
328 	/* all allocations were successful. Activate all prog arrays */
329 	css_for_each_descendant_pre(css, &cgrp->self) {
330 		struct cgroup *desc = container_of(css, struct cgroup, self);
331 
332 		if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
333 			if (unlikely(desc->bpf.inactive)) {
334 				bpf_prog_array_free(desc->bpf.inactive);
335 				desc->bpf.inactive = NULL;
336 			}
337 			continue;
338 		}
339 
340 		activate_effective_progs(desc, type, desc->bpf.inactive);
341 		desc->bpf.inactive = NULL;
342 	}
343 
344 	return 0;
345 
346 cleanup:
347 	/* oom while computing effective. Free all computed effective arrays
348 	 * since they were not activated
349 	 */
350 	css_for_each_descendant_pre(css, &cgrp->self) {
351 		struct cgroup *desc = container_of(css, struct cgroup, self);
352 
353 		bpf_prog_array_free(desc->bpf.inactive);
354 		desc->bpf.inactive = NULL;
355 	}
356 
357 	return err;
358 }
359 
360 #define BPF_CGROUP_MAX_PROGS 64
361 
362 static struct bpf_prog_list *find_attach_entry(struct list_head *progs,
363 					       struct bpf_prog *prog,
364 					       struct bpf_cgroup_link *link,
365 					       struct bpf_prog *replace_prog,
366 					       bool allow_multi)
367 {
368 	struct bpf_prog_list *pl;
369 
370 	/* single-attach case */
371 	if (!allow_multi) {
372 		if (list_empty(progs))
373 			return NULL;
374 		return list_first_entry(progs, typeof(*pl), node);
375 	}
376 
377 	list_for_each_entry(pl, progs, node) {
378 		if (prog && pl->prog == prog)
379 			/* disallow attaching the same prog twice */
380 			return ERR_PTR(-EINVAL);
381 		if (link && pl->link == link)
382 			/* disallow attaching the same link twice */
383 			return ERR_PTR(-EINVAL);
384 	}
385 
386 	/* direct prog multi-attach w/ replacement case */
387 	if (replace_prog) {
388 		list_for_each_entry(pl, progs, node) {
389 			if (pl->prog == replace_prog)
390 				/* a match found */
391 				return pl;
392 		}
393 		/* prog to replace not found for cgroup */
394 		return ERR_PTR(-ENOENT);
395 	}
396 
397 	return NULL;
398 }
399 
400 /**
401  * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
402  *                         propagate the change to descendants
403  * @cgrp: The cgroup which descendants to traverse
404  * @prog: A program to attach
405  * @link: A link to attach
406  * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
407  * @type: Type of attach operation
408  * @flags: Option flags
409  *
410  * Exactly one of @prog or @link can be non-null.
411  * Must be called with cgroup_mutex held.
412  */
413 int __cgroup_bpf_attach(struct cgroup *cgrp,
414 			struct bpf_prog *prog, struct bpf_prog *replace_prog,
415 			struct bpf_cgroup_link *link,
416 			enum bpf_attach_type type, u32 flags)
417 {
418 	u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
419 	struct list_head *progs = &cgrp->bpf.progs[type];
420 	struct bpf_prog *old_prog = NULL;
421 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
422 		*old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL};
423 	struct bpf_prog_list *pl;
424 	int err;
425 
426 	if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
427 	    ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
428 		/* invalid combination */
429 		return -EINVAL;
430 	if (link && (prog || replace_prog))
431 		/* only either link or prog/replace_prog can be specified */
432 		return -EINVAL;
433 	if (!!replace_prog != !!(flags & BPF_F_REPLACE))
434 		/* replace_prog implies BPF_F_REPLACE, and vice versa */
435 		return -EINVAL;
436 
437 	if (!hierarchy_allows_attach(cgrp, type))
438 		return -EPERM;
439 
440 	if (!list_empty(progs) && cgrp->bpf.flags[type] != saved_flags)
441 		/* Disallow attaching non-overridable on top
442 		 * of existing overridable in this cgroup.
443 		 * Disallow attaching multi-prog if overridable or none
444 		 */
445 		return -EPERM;
446 
447 	if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
448 		return -E2BIG;
449 
450 	pl = find_attach_entry(progs, prog, link, replace_prog,
451 			       flags & BPF_F_ALLOW_MULTI);
452 	if (IS_ERR(pl))
453 		return PTR_ERR(pl);
454 
455 	if (bpf_cgroup_storages_alloc(storage, prog ? : link->link.prog))
456 		return -ENOMEM;
457 
458 	if (pl) {
459 		old_prog = pl->prog;
460 		bpf_cgroup_storages_unlink(pl->storage);
461 		bpf_cgroup_storages_assign(old_storage, pl->storage);
462 	} else {
463 		pl = kmalloc(sizeof(*pl), GFP_KERNEL);
464 		if (!pl) {
465 			bpf_cgroup_storages_free(storage);
466 			return -ENOMEM;
467 		}
468 		list_add_tail(&pl->node, progs);
469 	}
470 
471 	pl->prog = prog;
472 	pl->link = link;
473 	bpf_cgroup_storages_assign(pl->storage, storage);
474 	cgrp->bpf.flags[type] = saved_flags;
475 
476 	err = update_effective_progs(cgrp, type);
477 	if (err)
478 		goto cleanup;
479 
480 	bpf_cgroup_storages_free(old_storage);
481 	if (old_prog)
482 		bpf_prog_put(old_prog);
483 	else
484 		static_branch_inc(&cgroup_bpf_enabled_key);
485 	bpf_cgroup_storages_link(pl->storage, cgrp, type);
486 	return 0;
487 
488 cleanup:
489 	if (old_prog) {
490 		pl->prog = old_prog;
491 		pl->link = NULL;
492 	}
493 	bpf_cgroup_storages_free(pl->storage);
494 	bpf_cgroup_storages_assign(pl->storage, old_storage);
495 	bpf_cgroup_storages_link(pl->storage, cgrp, type);
496 	if (!old_prog) {
497 		list_del(&pl->node);
498 		kfree(pl);
499 	}
500 	return err;
501 }
502 
503 static struct bpf_prog_list *find_detach_entry(struct list_head *progs,
504 					       struct bpf_prog *prog,
505 					       struct bpf_cgroup_link *link,
506 					       bool allow_multi)
507 {
508 	struct bpf_prog_list *pl;
509 
510 	if (!allow_multi) {
511 		if (list_empty(progs))
512 			/* report error when trying to detach and nothing is attached */
513 			return ERR_PTR(-ENOENT);
514 
515 		/* to maintain backward compatibility NONE and OVERRIDE cgroups
516 		 * allow detaching with invalid FD (prog==NULL) in legacy mode
517 		 */
518 		return list_first_entry(progs, typeof(*pl), node);
519 	}
520 
521 	if (!prog && !link)
522 		/* to detach MULTI prog the user has to specify valid FD
523 		 * of the program or link to be detached
524 		 */
525 		return ERR_PTR(-EINVAL);
526 
527 	/* find the prog or link and detach it */
528 	list_for_each_entry(pl, progs, node) {
529 		if (pl->prog == prog && pl->link == link)
530 			return pl;
531 	}
532 	return ERR_PTR(-ENOENT);
533 }
534 
535 /**
536  * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
537  *                         propagate the change to descendants
538  * @cgrp: The cgroup which descendants to traverse
539  * @prog: A program to detach or NULL
540  * @prog: A link to detach or NULL
541  * @type: Type of detach operation
542  *
543  * At most one of @prog or @link can be non-NULL.
544  * Must be called with cgroup_mutex held.
545  */
546 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
547 			struct bpf_cgroup_link *link, enum bpf_attach_type type)
548 {
549 	struct list_head *progs = &cgrp->bpf.progs[type];
550 	u32 flags = cgrp->bpf.flags[type];
551 	struct bpf_prog_list *pl;
552 	struct bpf_prog *old_prog;
553 	int err;
554 
555 	if (prog && link)
556 		/* only one of prog or link can be specified */
557 		return -EINVAL;
558 
559 	pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI);
560 	if (IS_ERR(pl))
561 		return PTR_ERR(pl);
562 
563 	/* mark it deleted, so it's ignored while recomputing effective */
564 	old_prog = pl->prog;
565 	pl->prog = NULL;
566 	pl->link = NULL;
567 
568 	err = update_effective_progs(cgrp, type);
569 	if (err)
570 		goto cleanup;
571 
572 	/* now can actually delete it from this cgroup list */
573 	list_del(&pl->node);
574 	bpf_cgroup_storages_unlink(pl->storage);
575 	bpf_cgroup_storages_free(pl->storage);
576 	kfree(pl);
577 	if (list_empty(progs))
578 		/* last program was detached, reset flags to zero */
579 		cgrp->bpf.flags[type] = 0;
580 	if (old_prog)
581 		bpf_prog_put(old_prog);
582 	static_branch_dec(&cgroup_bpf_enabled_key);
583 	return 0;
584 
585 cleanup:
586 	/* restore back prog or link */
587 	pl->prog = old_prog;
588 	pl->link = link;
589 	return err;
590 }
591 
592 /* Must be called with cgroup_mutex held to avoid races. */
593 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
594 		       union bpf_attr __user *uattr)
595 {
596 	__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
597 	enum bpf_attach_type type = attr->query.attach_type;
598 	struct list_head *progs = &cgrp->bpf.progs[type];
599 	u32 flags = cgrp->bpf.flags[type];
600 	struct bpf_prog_array *effective;
601 	struct bpf_prog *prog;
602 	int cnt, ret = 0, i;
603 
604 	effective = rcu_dereference_protected(cgrp->bpf.effective[type],
605 					      lockdep_is_held(&cgroup_mutex));
606 
607 	if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
608 		cnt = bpf_prog_array_length(effective);
609 	else
610 		cnt = prog_list_length(progs);
611 
612 	if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
613 		return -EFAULT;
614 	if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
615 		return -EFAULT;
616 	if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
617 		/* return early if user requested only program count + flags */
618 		return 0;
619 	if (attr->query.prog_cnt < cnt) {
620 		cnt = attr->query.prog_cnt;
621 		ret = -ENOSPC;
622 	}
623 
624 	if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
625 		return bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
626 	} else {
627 		struct bpf_prog_list *pl;
628 		u32 id;
629 
630 		i = 0;
631 		list_for_each_entry(pl, progs, node) {
632 			prog = prog_list_prog(pl);
633 			id = prog->aux->id;
634 			if (copy_to_user(prog_ids + i, &id, sizeof(id)))
635 				return -EFAULT;
636 			if (++i == cnt)
637 				break;
638 		}
639 	}
640 	return ret;
641 }
642 
643 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
644 			   enum bpf_prog_type ptype, struct bpf_prog *prog)
645 {
646 	struct bpf_prog *replace_prog = NULL;
647 	struct cgroup *cgrp;
648 	int ret;
649 
650 	cgrp = cgroup_get_from_fd(attr->target_fd);
651 	if (IS_ERR(cgrp))
652 		return PTR_ERR(cgrp);
653 
654 	if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
655 	    (attr->attach_flags & BPF_F_REPLACE)) {
656 		replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
657 		if (IS_ERR(replace_prog)) {
658 			cgroup_put(cgrp);
659 			return PTR_ERR(replace_prog);
660 		}
661 	}
662 
663 	ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL,
664 				attr->attach_type, attr->attach_flags);
665 
666 	if (replace_prog)
667 		bpf_prog_put(replace_prog);
668 	cgroup_put(cgrp);
669 	return ret;
670 }
671 
672 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
673 {
674 	struct bpf_prog *prog;
675 	struct cgroup *cgrp;
676 	int ret;
677 
678 	cgrp = cgroup_get_from_fd(attr->target_fd);
679 	if (IS_ERR(cgrp))
680 		return PTR_ERR(cgrp);
681 
682 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
683 	if (IS_ERR(prog))
684 		prog = NULL;
685 
686 	ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type);
687 	if (prog)
688 		bpf_prog_put(prog);
689 
690 	cgroup_put(cgrp);
691 	return ret;
692 }
693 
694 static void bpf_cgroup_link_release(struct bpf_link *link)
695 {
696 	struct bpf_cgroup_link *cg_link =
697 		container_of(link, struct bpf_cgroup_link, link);
698 
699 	/* link might have been auto-detached by dying cgroup already,
700 	 * in that case our work is done here
701 	 */
702 	if (!cg_link->cgroup)
703 		return;
704 
705 	mutex_lock(&cgroup_mutex);
706 
707 	/* re-check cgroup under lock again */
708 	if (!cg_link->cgroup) {
709 		mutex_unlock(&cgroup_mutex);
710 		return;
711 	}
712 
713 	WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
714 				    cg_link->type));
715 
716 	mutex_unlock(&cgroup_mutex);
717 	cgroup_put(cg_link->cgroup);
718 }
719 
720 static void bpf_cgroup_link_dealloc(struct bpf_link *link)
721 {
722 	struct bpf_cgroup_link *cg_link =
723 		container_of(link, struct bpf_cgroup_link, link);
724 
725 	kfree(cg_link);
726 }
727 
728 const struct bpf_link_ops bpf_cgroup_link_lops = {
729 	.release = bpf_cgroup_link_release,
730 	.dealloc = bpf_cgroup_link_dealloc,
731 };
732 
733 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
734 {
735 	struct bpf_cgroup_link *link;
736 	struct file *link_file;
737 	struct cgroup *cgrp;
738 	int err, link_fd;
739 
740 	if (attr->link_create.flags)
741 		return -EINVAL;
742 
743 	cgrp = cgroup_get_from_fd(attr->link_create.target_fd);
744 	if (IS_ERR(cgrp))
745 		return PTR_ERR(cgrp);
746 
747 	link = kzalloc(sizeof(*link), GFP_USER);
748 	if (!link) {
749 		err = -ENOMEM;
750 		goto out_put_cgroup;
751 	}
752 	bpf_link_init(&link->link, &bpf_cgroup_link_lops, prog);
753 	link->cgroup = cgrp;
754 	link->type = attr->link_create.attach_type;
755 
756 	link_file = bpf_link_new_file(&link->link, &link_fd);
757 	if (IS_ERR(link_file)) {
758 		kfree(link);
759 		err = PTR_ERR(link_file);
760 		goto out_put_cgroup;
761 	}
762 
763 	err = cgroup_bpf_attach(cgrp, NULL, NULL, link, link->type,
764 				BPF_F_ALLOW_MULTI);
765 	if (err) {
766 		bpf_link_cleanup(&link->link, link_file, link_fd);
767 		goto out_put_cgroup;
768 	}
769 
770 	fd_install(link_fd, link_file);
771 	return link_fd;
772 
773 out_put_cgroup:
774 	cgroup_put(cgrp);
775 	return err;
776 }
777 
778 int cgroup_bpf_prog_query(const union bpf_attr *attr,
779 			  union bpf_attr __user *uattr)
780 {
781 	struct cgroup *cgrp;
782 	int ret;
783 
784 	cgrp = cgroup_get_from_fd(attr->query.target_fd);
785 	if (IS_ERR(cgrp))
786 		return PTR_ERR(cgrp);
787 
788 	ret = cgroup_bpf_query(cgrp, attr, uattr);
789 
790 	cgroup_put(cgrp);
791 	return ret;
792 }
793 
794 /**
795  * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
796  * @sk: The socket sending or receiving traffic
797  * @skb: The skb that is being sent or received
798  * @type: The type of program to be exectuted
799  *
800  * If no socket is passed, or the socket is not of type INET or INET6,
801  * this function does nothing and returns 0.
802  *
803  * The program type passed in via @type must be suitable for network
804  * filtering. No further check is performed to assert that.
805  *
806  * For egress packets, this function can return:
807  *   NET_XMIT_SUCCESS    (0)	- continue with packet output
808  *   NET_XMIT_DROP       (1)	- drop packet and notify TCP to call cwr
809  *   NET_XMIT_CN         (2)	- continue with packet output and notify TCP
810  *				  to call cwr
811  *   -EPERM			- drop packet
812  *
813  * For ingress packets, this function will return -EPERM if any
814  * attached program was found and if it returned != 1 during execution.
815  * Otherwise 0 is returned.
816  */
817 int __cgroup_bpf_run_filter_skb(struct sock *sk,
818 				struct sk_buff *skb,
819 				enum bpf_attach_type type)
820 {
821 	unsigned int offset = skb->data - skb_network_header(skb);
822 	struct sock *save_sk;
823 	void *saved_data_end;
824 	struct cgroup *cgrp;
825 	int ret;
826 
827 	if (!sk || !sk_fullsock(sk))
828 		return 0;
829 
830 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
831 		return 0;
832 
833 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
834 	save_sk = skb->sk;
835 	skb->sk = sk;
836 	__skb_push(skb, offset);
837 
838 	/* compute pointers for the bpf prog */
839 	bpf_compute_and_save_data_end(skb, &saved_data_end);
840 
841 	if (type == BPF_CGROUP_INET_EGRESS) {
842 		ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(
843 			cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb);
844 	} else {
845 		ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
846 					  __bpf_prog_run_save_cb);
847 		ret = (ret == 1 ? 0 : -EPERM);
848 	}
849 	bpf_restore_data_end(skb, saved_data_end);
850 	__skb_pull(skb, offset);
851 	skb->sk = save_sk;
852 
853 	return ret;
854 }
855 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
856 
857 /**
858  * __cgroup_bpf_run_filter_sk() - Run a program on a sock
859  * @sk: sock structure to manipulate
860  * @type: The type of program to be exectuted
861  *
862  * socket is passed is expected to be of type INET or INET6.
863  *
864  * The program type passed in via @type must be suitable for sock
865  * filtering. No further check is performed to assert that.
866  *
867  * This function will return %-EPERM if any if an attached program was found
868  * and if it returned != 1 during execution. In all other cases, 0 is returned.
869  */
870 int __cgroup_bpf_run_filter_sk(struct sock *sk,
871 			       enum bpf_attach_type type)
872 {
873 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
874 	int ret;
875 
876 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
877 	return ret == 1 ? 0 : -EPERM;
878 }
879 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
880 
881 /**
882  * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
883  *                                       provided by user sockaddr
884  * @sk: sock struct that will use sockaddr
885  * @uaddr: sockaddr struct provided by user
886  * @type: The type of program to be exectuted
887  * @t_ctx: Pointer to attach type specific context
888  *
889  * socket is expected to be of type INET or INET6.
890  *
891  * This function will return %-EPERM if an attached program is found and
892  * returned value != 1 during execution. In all other cases, 0 is returned.
893  */
894 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
895 				      struct sockaddr *uaddr,
896 				      enum bpf_attach_type type,
897 				      void *t_ctx)
898 {
899 	struct bpf_sock_addr_kern ctx = {
900 		.sk = sk,
901 		.uaddr = uaddr,
902 		.t_ctx = t_ctx,
903 	};
904 	struct sockaddr_storage unspec;
905 	struct cgroup *cgrp;
906 	int ret;
907 
908 	/* Check socket family since not all sockets represent network
909 	 * endpoint (e.g. AF_UNIX).
910 	 */
911 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
912 		return 0;
913 
914 	if (!ctx.uaddr) {
915 		memset(&unspec, 0, sizeof(unspec));
916 		ctx.uaddr = (struct sockaddr *)&unspec;
917 	}
918 
919 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
920 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
921 
922 	return ret == 1 ? 0 : -EPERM;
923 }
924 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
925 
926 /**
927  * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
928  * @sk: socket to get cgroup from
929  * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
930  * sk with connection information (IP addresses, etc.) May not contain
931  * cgroup info if it is a req sock.
932  * @type: The type of program to be exectuted
933  *
934  * socket passed is expected to be of type INET or INET6.
935  *
936  * The program type passed in via @type must be suitable for sock_ops
937  * filtering. No further check is performed to assert that.
938  *
939  * This function will return %-EPERM if any if an attached program was found
940  * and if it returned != 1 during execution. In all other cases, 0 is returned.
941  */
942 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
943 				     struct bpf_sock_ops_kern *sock_ops,
944 				     enum bpf_attach_type type)
945 {
946 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
947 	int ret;
948 
949 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops,
950 				 BPF_PROG_RUN);
951 	return ret == 1 ? 0 : -EPERM;
952 }
953 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
954 
955 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
956 				      short access, enum bpf_attach_type type)
957 {
958 	struct cgroup *cgrp;
959 	struct bpf_cgroup_dev_ctx ctx = {
960 		.access_type = (access << 16) | dev_type,
961 		.major = major,
962 		.minor = minor,
963 	};
964 	int allow = 1;
965 
966 	rcu_read_lock();
967 	cgrp = task_dfl_cgroup(current);
968 	allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx,
969 				   BPF_PROG_RUN);
970 	rcu_read_unlock();
971 
972 	return !allow;
973 }
974 EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission);
975 
976 static const struct bpf_func_proto *
977 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
978 {
979 	switch (func_id) {
980 	case BPF_FUNC_map_lookup_elem:
981 		return &bpf_map_lookup_elem_proto;
982 	case BPF_FUNC_map_update_elem:
983 		return &bpf_map_update_elem_proto;
984 	case BPF_FUNC_map_delete_elem:
985 		return &bpf_map_delete_elem_proto;
986 	case BPF_FUNC_map_push_elem:
987 		return &bpf_map_push_elem_proto;
988 	case BPF_FUNC_map_pop_elem:
989 		return &bpf_map_pop_elem_proto;
990 	case BPF_FUNC_map_peek_elem:
991 		return &bpf_map_peek_elem_proto;
992 	case BPF_FUNC_get_current_uid_gid:
993 		return &bpf_get_current_uid_gid_proto;
994 	case BPF_FUNC_get_local_storage:
995 		return &bpf_get_local_storage_proto;
996 	case BPF_FUNC_get_current_cgroup_id:
997 		return &bpf_get_current_cgroup_id_proto;
998 	case BPF_FUNC_trace_printk:
999 		if (capable(CAP_SYS_ADMIN))
1000 			return bpf_get_trace_printk_proto();
1001 		/* fall through */
1002 	default:
1003 		return NULL;
1004 	}
1005 }
1006 
1007 static const struct bpf_func_proto *
1008 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1009 {
1010 	return cgroup_base_func_proto(func_id, prog);
1011 }
1012 
1013 static bool cgroup_dev_is_valid_access(int off, int size,
1014 				       enum bpf_access_type type,
1015 				       const struct bpf_prog *prog,
1016 				       struct bpf_insn_access_aux *info)
1017 {
1018 	const int size_default = sizeof(__u32);
1019 
1020 	if (type == BPF_WRITE)
1021 		return false;
1022 
1023 	if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
1024 		return false;
1025 	/* The verifier guarantees that size > 0. */
1026 	if (off % size != 0)
1027 		return false;
1028 
1029 	switch (off) {
1030 	case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
1031 		bpf_ctx_record_field_size(info, size_default);
1032 		if (!bpf_ctx_narrow_access_ok(off, size, size_default))
1033 			return false;
1034 		break;
1035 	default:
1036 		if (size != size_default)
1037 			return false;
1038 	}
1039 
1040 	return true;
1041 }
1042 
1043 const struct bpf_prog_ops cg_dev_prog_ops = {
1044 };
1045 
1046 const struct bpf_verifier_ops cg_dev_verifier_ops = {
1047 	.get_func_proto		= cgroup_dev_func_proto,
1048 	.is_valid_access	= cgroup_dev_is_valid_access,
1049 };
1050 
1051 /**
1052  * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
1053  *
1054  * @head: sysctl table header
1055  * @table: sysctl table
1056  * @write: sysctl is being read (= 0) or written (= 1)
1057  * @buf: pointer to buffer passed by user space
1058  * @pcount: value-result argument: value is size of buffer pointed to by @buf,
1059  *	result is size of @new_buf if program set new value, initial value
1060  *	otherwise
1061  * @ppos: value-result argument: value is position at which read from or write
1062  *	to sysctl is happening, result is new position if program overrode it,
1063  *	initial value otherwise
1064  * @new_buf: pointer to pointer to new buffer that will be allocated if program
1065  *	overrides new value provided by user space on sysctl write
1066  *	NOTE: it's caller responsibility to free *new_buf if it was set
1067  * @type: type of program to be executed
1068  *
1069  * Program is run when sysctl is being accessed, either read or written, and
1070  * can allow or deny such access.
1071  *
1072  * This function will return %-EPERM if an attached program is found and
1073  * returned value != 1 during execution. In all other cases 0 is returned.
1074  */
1075 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
1076 				   struct ctl_table *table, int write,
1077 				   void __user *buf, size_t *pcount,
1078 				   loff_t *ppos, void **new_buf,
1079 				   enum bpf_attach_type type)
1080 {
1081 	struct bpf_sysctl_kern ctx = {
1082 		.head = head,
1083 		.table = table,
1084 		.write = write,
1085 		.ppos = ppos,
1086 		.cur_val = NULL,
1087 		.cur_len = PAGE_SIZE,
1088 		.new_val = NULL,
1089 		.new_len = 0,
1090 		.new_updated = 0,
1091 	};
1092 	struct cgroup *cgrp;
1093 	int ret;
1094 
1095 	ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
1096 	if (ctx.cur_val) {
1097 		mm_segment_t old_fs;
1098 		loff_t pos = 0;
1099 
1100 		old_fs = get_fs();
1101 		set_fs(KERNEL_DS);
1102 		if (table->proc_handler(table, 0, (void __user *)ctx.cur_val,
1103 					&ctx.cur_len, &pos)) {
1104 			/* Let BPF program decide how to proceed. */
1105 			ctx.cur_len = 0;
1106 		}
1107 		set_fs(old_fs);
1108 	} else {
1109 		/* Let BPF program decide how to proceed. */
1110 		ctx.cur_len = 0;
1111 	}
1112 
1113 	if (write && buf && *pcount) {
1114 		/* BPF program should be able to override new value with a
1115 		 * buffer bigger than provided by user.
1116 		 */
1117 		ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
1118 		ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
1119 		if (!ctx.new_val ||
1120 		    copy_from_user(ctx.new_val, buf, ctx.new_len))
1121 			/* Let BPF program decide how to proceed. */
1122 			ctx.new_len = 0;
1123 	}
1124 
1125 	rcu_read_lock();
1126 	cgrp = task_dfl_cgroup(current);
1127 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
1128 	rcu_read_unlock();
1129 
1130 	kfree(ctx.cur_val);
1131 
1132 	if (ret == 1 && ctx.new_updated) {
1133 		*new_buf = ctx.new_val;
1134 		*pcount = ctx.new_len;
1135 	} else {
1136 		kfree(ctx.new_val);
1137 	}
1138 
1139 	return ret == 1 ? 0 : -EPERM;
1140 }
1141 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sysctl);
1142 
1143 #ifdef CONFIG_NET
1144 static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
1145 					     enum bpf_attach_type attach_type)
1146 {
1147 	struct bpf_prog_array *prog_array;
1148 	bool empty;
1149 
1150 	rcu_read_lock();
1151 	prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]);
1152 	empty = bpf_prog_array_is_empty(prog_array);
1153 	rcu_read_unlock();
1154 
1155 	return empty;
1156 }
1157 
1158 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
1159 {
1160 	if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0)
1161 		return -EINVAL;
1162 
1163 	ctx->optval = kzalloc(max_optlen, GFP_USER);
1164 	if (!ctx->optval)
1165 		return -ENOMEM;
1166 
1167 	ctx->optval_end = ctx->optval + max_optlen;
1168 
1169 	return 0;
1170 }
1171 
1172 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx)
1173 {
1174 	kfree(ctx->optval);
1175 }
1176 
1177 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
1178 				       int *optname, char __user *optval,
1179 				       int *optlen, char **kernel_optval)
1180 {
1181 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1182 	struct bpf_sockopt_kern ctx = {
1183 		.sk = sk,
1184 		.level = *level,
1185 		.optname = *optname,
1186 	};
1187 	int ret, max_optlen;
1188 
1189 	/* Opportunistic check to see whether we have any BPF program
1190 	 * attached to the hook so we don't waste time allocating
1191 	 * memory and locking the socket.
1192 	 */
1193 	if (!cgroup_bpf_enabled ||
1194 	    __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT))
1195 		return 0;
1196 
1197 	/* Allocate a bit more than the initial user buffer for
1198 	 * BPF program. The canonical use case is overriding
1199 	 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
1200 	 */
1201 	max_optlen = max_t(int, 16, *optlen);
1202 
1203 	ret = sockopt_alloc_buf(&ctx, max_optlen);
1204 	if (ret)
1205 		return ret;
1206 
1207 	ctx.optlen = *optlen;
1208 
1209 	if (copy_from_user(ctx.optval, optval, *optlen) != 0) {
1210 		ret = -EFAULT;
1211 		goto out;
1212 	}
1213 
1214 	lock_sock(sk);
1215 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT],
1216 				 &ctx, BPF_PROG_RUN);
1217 	release_sock(sk);
1218 
1219 	if (!ret) {
1220 		ret = -EPERM;
1221 		goto out;
1222 	}
1223 
1224 	if (ctx.optlen == -1) {
1225 		/* optlen set to -1, bypass kernel */
1226 		ret = 1;
1227 	} else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
1228 		/* optlen is out of bounds */
1229 		ret = -EFAULT;
1230 	} else {
1231 		/* optlen within bounds, run kernel handler */
1232 		ret = 0;
1233 
1234 		/* export any potential modifications */
1235 		*level = ctx.level;
1236 		*optname = ctx.optname;
1237 		*optlen = ctx.optlen;
1238 		*kernel_optval = ctx.optval;
1239 	}
1240 
1241 out:
1242 	if (ret)
1243 		sockopt_free_buf(&ctx);
1244 	return ret;
1245 }
1246 EXPORT_SYMBOL(__cgroup_bpf_run_filter_setsockopt);
1247 
1248 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
1249 				       int optname, char __user *optval,
1250 				       int __user *optlen, int max_optlen,
1251 				       int retval)
1252 {
1253 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1254 	struct bpf_sockopt_kern ctx = {
1255 		.sk = sk,
1256 		.level = level,
1257 		.optname = optname,
1258 		.retval = retval,
1259 	};
1260 	int ret;
1261 
1262 	/* Opportunistic check to see whether we have any BPF program
1263 	 * attached to the hook so we don't waste time allocating
1264 	 * memory and locking the socket.
1265 	 */
1266 	if (!cgroup_bpf_enabled ||
1267 	    __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
1268 		return retval;
1269 
1270 	ret = sockopt_alloc_buf(&ctx, max_optlen);
1271 	if (ret)
1272 		return ret;
1273 
1274 	ctx.optlen = max_optlen;
1275 
1276 	if (!retval) {
1277 		/* If kernel getsockopt finished successfully,
1278 		 * copy whatever was returned to the user back
1279 		 * into our temporary buffer. Set optlen to the
1280 		 * one that kernel returned as well to let
1281 		 * BPF programs inspect the value.
1282 		 */
1283 
1284 		if (get_user(ctx.optlen, optlen)) {
1285 			ret = -EFAULT;
1286 			goto out;
1287 		}
1288 
1289 		if (ctx.optlen > max_optlen)
1290 			ctx.optlen = max_optlen;
1291 
1292 		if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) {
1293 			ret = -EFAULT;
1294 			goto out;
1295 		}
1296 	}
1297 
1298 	lock_sock(sk);
1299 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT],
1300 				 &ctx, BPF_PROG_RUN);
1301 	release_sock(sk);
1302 
1303 	if (!ret) {
1304 		ret = -EPERM;
1305 		goto out;
1306 	}
1307 
1308 	if (ctx.optlen > max_optlen) {
1309 		ret = -EFAULT;
1310 		goto out;
1311 	}
1312 
1313 	/* BPF programs only allowed to set retval to 0, not some
1314 	 * arbitrary value.
1315 	 */
1316 	if (ctx.retval != 0 && ctx.retval != retval) {
1317 		ret = -EFAULT;
1318 		goto out;
1319 	}
1320 
1321 	if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
1322 	    put_user(ctx.optlen, optlen)) {
1323 		ret = -EFAULT;
1324 		goto out;
1325 	}
1326 
1327 	ret = ctx.retval;
1328 
1329 out:
1330 	sockopt_free_buf(&ctx);
1331 	return ret;
1332 }
1333 EXPORT_SYMBOL(__cgroup_bpf_run_filter_getsockopt);
1334 #endif
1335 
1336 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
1337 			      size_t *lenp)
1338 {
1339 	ssize_t tmp_ret = 0, ret;
1340 
1341 	if (dir->header.parent) {
1342 		tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
1343 		if (tmp_ret < 0)
1344 			return tmp_ret;
1345 	}
1346 
1347 	ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
1348 	if (ret < 0)
1349 		return ret;
1350 	*bufp += ret;
1351 	*lenp -= ret;
1352 	ret += tmp_ret;
1353 
1354 	/* Avoid leading slash. */
1355 	if (!ret)
1356 		return ret;
1357 
1358 	tmp_ret = strscpy(*bufp, "/", *lenp);
1359 	if (tmp_ret < 0)
1360 		return tmp_ret;
1361 	*bufp += tmp_ret;
1362 	*lenp -= tmp_ret;
1363 
1364 	return ret + tmp_ret;
1365 }
1366 
1367 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
1368 	   size_t, buf_len, u64, flags)
1369 {
1370 	ssize_t tmp_ret = 0, ret;
1371 
1372 	if (!buf)
1373 		return -EINVAL;
1374 
1375 	if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
1376 		if (!ctx->head)
1377 			return -EINVAL;
1378 		tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
1379 		if (tmp_ret < 0)
1380 			return tmp_ret;
1381 	}
1382 
1383 	ret = strscpy(buf, ctx->table->procname, buf_len);
1384 
1385 	return ret < 0 ? ret : tmp_ret + ret;
1386 }
1387 
1388 static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
1389 	.func		= bpf_sysctl_get_name,
1390 	.gpl_only	= false,
1391 	.ret_type	= RET_INTEGER,
1392 	.arg1_type	= ARG_PTR_TO_CTX,
1393 	.arg2_type	= ARG_PTR_TO_MEM,
1394 	.arg3_type	= ARG_CONST_SIZE,
1395 	.arg4_type	= ARG_ANYTHING,
1396 };
1397 
1398 static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
1399 			     size_t src_len)
1400 {
1401 	if (!dst)
1402 		return -EINVAL;
1403 
1404 	if (!dst_len)
1405 		return -E2BIG;
1406 
1407 	if (!src || !src_len) {
1408 		memset(dst, 0, dst_len);
1409 		return -EINVAL;
1410 	}
1411 
1412 	memcpy(dst, src, min(dst_len, src_len));
1413 
1414 	if (dst_len > src_len) {
1415 		memset(dst + src_len, '\0', dst_len - src_len);
1416 		return src_len;
1417 	}
1418 
1419 	dst[dst_len - 1] = '\0';
1420 
1421 	return -E2BIG;
1422 }
1423 
1424 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
1425 	   char *, buf, size_t, buf_len)
1426 {
1427 	return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
1428 }
1429 
1430 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
1431 	.func		= bpf_sysctl_get_current_value,
1432 	.gpl_only	= false,
1433 	.ret_type	= RET_INTEGER,
1434 	.arg1_type	= ARG_PTR_TO_CTX,
1435 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1436 	.arg3_type	= ARG_CONST_SIZE,
1437 };
1438 
1439 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
1440 	   size_t, buf_len)
1441 {
1442 	if (!ctx->write) {
1443 		if (buf && buf_len)
1444 			memset(buf, '\0', buf_len);
1445 		return -EINVAL;
1446 	}
1447 	return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
1448 }
1449 
1450 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
1451 	.func		= bpf_sysctl_get_new_value,
1452 	.gpl_only	= false,
1453 	.ret_type	= RET_INTEGER,
1454 	.arg1_type	= ARG_PTR_TO_CTX,
1455 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1456 	.arg3_type	= ARG_CONST_SIZE,
1457 };
1458 
1459 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
1460 	   const char *, buf, size_t, buf_len)
1461 {
1462 	if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
1463 		return -EINVAL;
1464 
1465 	if (buf_len > PAGE_SIZE - 1)
1466 		return -E2BIG;
1467 
1468 	memcpy(ctx->new_val, buf, buf_len);
1469 	ctx->new_len = buf_len;
1470 	ctx->new_updated = 1;
1471 
1472 	return 0;
1473 }
1474 
1475 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
1476 	.func		= bpf_sysctl_set_new_value,
1477 	.gpl_only	= false,
1478 	.ret_type	= RET_INTEGER,
1479 	.arg1_type	= ARG_PTR_TO_CTX,
1480 	.arg2_type	= ARG_PTR_TO_MEM,
1481 	.arg3_type	= ARG_CONST_SIZE,
1482 };
1483 
1484 static const struct bpf_func_proto *
1485 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1486 {
1487 	switch (func_id) {
1488 	case BPF_FUNC_strtol:
1489 		return &bpf_strtol_proto;
1490 	case BPF_FUNC_strtoul:
1491 		return &bpf_strtoul_proto;
1492 	case BPF_FUNC_sysctl_get_name:
1493 		return &bpf_sysctl_get_name_proto;
1494 	case BPF_FUNC_sysctl_get_current_value:
1495 		return &bpf_sysctl_get_current_value_proto;
1496 	case BPF_FUNC_sysctl_get_new_value:
1497 		return &bpf_sysctl_get_new_value_proto;
1498 	case BPF_FUNC_sysctl_set_new_value:
1499 		return &bpf_sysctl_set_new_value_proto;
1500 	default:
1501 		return cgroup_base_func_proto(func_id, prog);
1502 	}
1503 }
1504 
1505 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
1506 				   const struct bpf_prog *prog,
1507 				   struct bpf_insn_access_aux *info)
1508 {
1509 	const int size_default = sizeof(__u32);
1510 
1511 	if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
1512 		return false;
1513 
1514 	switch (off) {
1515 	case bpf_ctx_range(struct bpf_sysctl, write):
1516 		if (type != BPF_READ)
1517 			return false;
1518 		bpf_ctx_record_field_size(info, size_default);
1519 		return bpf_ctx_narrow_access_ok(off, size, size_default);
1520 	case bpf_ctx_range(struct bpf_sysctl, file_pos):
1521 		if (type == BPF_READ) {
1522 			bpf_ctx_record_field_size(info, size_default);
1523 			return bpf_ctx_narrow_access_ok(off, size, size_default);
1524 		} else {
1525 			return size == size_default;
1526 		}
1527 	default:
1528 		return false;
1529 	}
1530 }
1531 
1532 static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
1533 				     const struct bpf_insn *si,
1534 				     struct bpf_insn *insn_buf,
1535 				     struct bpf_prog *prog, u32 *target_size)
1536 {
1537 	struct bpf_insn *insn = insn_buf;
1538 	u32 read_size;
1539 
1540 	switch (si->off) {
1541 	case offsetof(struct bpf_sysctl, write):
1542 		*insn++ = BPF_LDX_MEM(
1543 			BPF_SIZE(si->code), si->dst_reg, si->src_reg,
1544 			bpf_target_off(struct bpf_sysctl_kern, write,
1545 				       sizeof_field(struct bpf_sysctl_kern,
1546 						    write),
1547 				       target_size));
1548 		break;
1549 	case offsetof(struct bpf_sysctl, file_pos):
1550 		/* ppos is a pointer so it should be accessed via indirect
1551 		 * loads and stores. Also for stores additional temporary
1552 		 * register is used since neither src_reg nor dst_reg can be
1553 		 * overridden.
1554 		 */
1555 		if (type == BPF_WRITE) {
1556 			int treg = BPF_REG_9;
1557 
1558 			if (si->src_reg == treg || si->dst_reg == treg)
1559 				--treg;
1560 			if (si->src_reg == treg || si->dst_reg == treg)
1561 				--treg;
1562 			*insn++ = BPF_STX_MEM(
1563 				BPF_DW, si->dst_reg, treg,
1564 				offsetof(struct bpf_sysctl_kern, tmp_reg));
1565 			*insn++ = BPF_LDX_MEM(
1566 				BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1567 				treg, si->dst_reg,
1568 				offsetof(struct bpf_sysctl_kern, ppos));
1569 			*insn++ = BPF_STX_MEM(
1570 				BPF_SIZEOF(u32), treg, si->src_reg,
1571 				bpf_ctx_narrow_access_offset(
1572 					0, sizeof(u32), sizeof(loff_t)));
1573 			*insn++ = BPF_LDX_MEM(
1574 				BPF_DW, treg, si->dst_reg,
1575 				offsetof(struct bpf_sysctl_kern, tmp_reg));
1576 		} else {
1577 			*insn++ = BPF_LDX_MEM(
1578 				BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1579 				si->dst_reg, si->src_reg,
1580 				offsetof(struct bpf_sysctl_kern, ppos));
1581 			read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
1582 			*insn++ = BPF_LDX_MEM(
1583 				BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
1584 				bpf_ctx_narrow_access_offset(
1585 					0, read_size, sizeof(loff_t)));
1586 		}
1587 		*target_size = sizeof(u32);
1588 		break;
1589 	}
1590 
1591 	return insn - insn_buf;
1592 }
1593 
1594 const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
1595 	.get_func_proto		= sysctl_func_proto,
1596 	.is_valid_access	= sysctl_is_valid_access,
1597 	.convert_ctx_access	= sysctl_convert_ctx_access,
1598 };
1599 
1600 const struct bpf_prog_ops cg_sysctl_prog_ops = {
1601 };
1602 
1603 static const struct bpf_func_proto *
1604 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1605 {
1606 	switch (func_id) {
1607 #ifdef CONFIG_NET
1608 	case BPF_FUNC_sk_storage_get:
1609 		return &bpf_sk_storage_get_proto;
1610 	case BPF_FUNC_sk_storage_delete:
1611 		return &bpf_sk_storage_delete_proto;
1612 #endif
1613 #ifdef CONFIG_INET
1614 	case BPF_FUNC_tcp_sock:
1615 		return &bpf_tcp_sock_proto;
1616 #endif
1617 	default:
1618 		return cgroup_base_func_proto(func_id, prog);
1619 	}
1620 }
1621 
1622 static bool cg_sockopt_is_valid_access(int off, int size,
1623 				       enum bpf_access_type type,
1624 				       const struct bpf_prog *prog,
1625 				       struct bpf_insn_access_aux *info)
1626 {
1627 	const int size_default = sizeof(__u32);
1628 
1629 	if (off < 0 || off >= sizeof(struct bpf_sockopt))
1630 		return false;
1631 
1632 	if (off % size != 0)
1633 		return false;
1634 
1635 	if (type == BPF_WRITE) {
1636 		switch (off) {
1637 		case offsetof(struct bpf_sockopt, retval):
1638 			if (size != size_default)
1639 				return false;
1640 			return prog->expected_attach_type ==
1641 				BPF_CGROUP_GETSOCKOPT;
1642 		case offsetof(struct bpf_sockopt, optname):
1643 			/* fallthrough */
1644 		case offsetof(struct bpf_sockopt, level):
1645 			if (size != size_default)
1646 				return false;
1647 			return prog->expected_attach_type ==
1648 				BPF_CGROUP_SETSOCKOPT;
1649 		case offsetof(struct bpf_sockopt, optlen):
1650 			return size == size_default;
1651 		default:
1652 			return false;
1653 		}
1654 	}
1655 
1656 	switch (off) {
1657 	case offsetof(struct bpf_sockopt, sk):
1658 		if (size != sizeof(__u64))
1659 			return false;
1660 		info->reg_type = PTR_TO_SOCKET;
1661 		break;
1662 	case offsetof(struct bpf_sockopt, optval):
1663 		if (size != sizeof(__u64))
1664 			return false;
1665 		info->reg_type = PTR_TO_PACKET;
1666 		break;
1667 	case offsetof(struct bpf_sockopt, optval_end):
1668 		if (size != sizeof(__u64))
1669 			return false;
1670 		info->reg_type = PTR_TO_PACKET_END;
1671 		break;
1672 	case offsetof(struct bpf_sockopt, retval):
1673 		if (size != size_default)
1674 			return false;
1675 		return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
1676 	default:
1677 		if (size != size_default)
1678 			return false;
1679 		break;
1680 	}
1681 	return true;
1682 }
1683 
1684 #define CG_SOCKOPT_ACCESS_FIELD(T, F)					\
1685 	T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F),			\
1686 	  si->dst_reg, si->src_reg,					\
1687 	  offsetof(struct bpf_sockopt_kern, F))
1688 
1689 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
1690 					 const struct bpf_insn *si,
1691 					 struct bpf_insn *insn_buf,
1692 					 struct bpf_prog *prog,
1693 					 u32 *target_size)
1694 {
1695 	struct bpf_insn *insn = insn_buf;
1696 
1697 	switch (si->off) {
1698 	case offsetof(struct bpf_sockopt, sk):
1699 		*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk);
1700 		break;
1701 	case offsetof(struct bpf_sockopt, level):
1702 		if (type == BPF_WRITE)
1703 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level);
1704 		else
1705 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level);
1706 		break;
1707 	case offsetof(struct bpf_sockopt, optname):
1708 		if (type == BPF_WRITE)
1709 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname);
1710 		else
1711 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname);
1712 		break;
1713 	case offsetof(struct bpf_sockopt, optlen):
1714 		if (type == BPF_WRITE)
1715 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen);
1716 		else
1717 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen);
1718 		break;
1719 	case offsetof(struct bpf_sockopt, retval):
1720 		if (type == BPF_WRITE)
1721 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval);
1722 		else
1723 			*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval);
1724 		break;
1725 	case offsetof(struct bpf_sockopt, optval):
1726 		*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval);
1727 		break;
1728 	case offsetof(struct bpf_sockopt, optval_end):
1729 		*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end);
1730 		break;
1731 	}
1732 
1733 	return insn - insn_buf;
1734 }
1735 
1736 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
1737 				   bool direct_write,
1738 				   const struct bpf_prog *prog)
1739 {
1740 	/* Nothing to do for sockopt argument. The data is kzalloc'ated.
1741 	 */
1742 	return 0;
1743 }
1744 
1745 const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
1746 	.get_func_proto		= cg_sockopt_func_proto,
1747 	.is_valid_access	= cg_sockopt_is_valid_access,
1748 	.convert_ctx_access	= cg_sockopt_convert_ctx_access,
1749 	.gen_prologue		= cg_sockopt_get_prologue,
1750 };
1751 
1752 const struct bpf_prog_ops cg_sockopt_prog_ops = {
1753 };
1754