Lines Matching refs:bpf
99 ret = bpf_prog_run_array_cg(&cgrp->bpf, in __cgroup_bpf_run_lsm_sock()
121 ret = bpf_prog_run_array_cg(&cgrp->bpf, in __cgroup_bpf_run_lsm_socket()
140 ret = bpf_prog_run_array_cg(&cgrp->bpf, in __cgroup_bpf_run_lsm_current()
212 percpu_ref_kill(&cgrp->bpf.refcnt); in cgroup_bpf_offline()
295 bpf.release_work); in cgroup_bpf_release()
297 struct list_head *storages = &cgrp->bpf.storages; in cgroup_bpf_release()
304 for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) { in cgroup_bpf_release()
305 struct hlist_head *progs = &cgrp->bpf.progs[atype]; in cgroup_bpf_release()
325 cgrp->bpf.effective[atype], in cgroup_bpf_release()
340 percpu_ref_exit(&cgrp->bpf.refcnt); in cgroup_bpf_release()
351 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt); in cgroup_bpf_release_fn()
353 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release); in cgroup_bpf_release_fn()
354 queue_work(cgroup_bpf_destroy_wq, &cgrp->bpf.release_work); in cgroup_bpf_release_fn()
398 u32 flags = p->bpf.flags[atype]; in hierarchy_allows_attach()
403 cnt = prog_list_length(&p->bpf.progs[atype]); in hierarchy_allows_attach()
430 if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) in compute_effective_progs()
431 cnt += prog_list_length(&p->bpf.progs[atype]); in compute_effective_progs()
443 if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) in compute_effective_progs()
446 hlist_for_each_entry(pl, &p->bpf.progs[atype], node) { in compute_effective_progs()
466 old_array = rcu_replace_pointer(cgrp->bpf.effective[atype], old_array, in activate_effective_progs()
483 #define NR ARRAY_SIZE(cgrp->bpf.effective) in cgroup_bpf_inherit()
488 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, in cgroup_bpf_inherit()
497 INIT_HLIST_HEAD(&cgrp->bpf.progs[i]); in cgroup_bpf_inherit()
499 INIT_LIST_HEAD(&cgrp->bpf.storages); in cgroup_bpf_inherit()
516 percpu_ref_exit(&cgrp->bpf.refcnt); in cgroup_bpf_inherit()
531 if (percpu_ref_is_zero(&desc->bpf.refcnt)) in update_effective_progs()
534 err = compute_effective_progs(desc, atype, &desc->bpf.inactive); in update_effective_progs()
543 if (percpu_ref_is_zero(&desc->bpf.refcnt)) { in update_effective_progs()
544 if (unlikely(desc->bpf.inactive)) { in update_effective_progs()
545 bpf_prog_array_free(desc->bpf.inactive); in update_effective_progs()
546 desc->bpf.inactive = NULL; in update_effective_progs()
551 activate_effective_progs(desc, atype, desc->bpf.inactive); in update_effective_progs()
552 desc->bpf.inactive = NULL; in update_effective_progs()
564 bpf_prog_array_free(desc->bpf.inactive); in update_effective_progs()
565 desc->bpf.inactive = NULL; in update_effective_progs()
654 progs = &cgrp->bpf.progs[atype]; in __cgroup_bpf_attach()
659 if (!hlist_empty(progs) && cgrp->bpf.flags[atype] != saved_flags) in __cgroup_bpf_attach()
702 cgrp->bpf.flags[atype] = saved_flags; in __cgroup_bpf_attach()
773 if (percpu_ref_is_zero(&desc->bpf.refcnt)) in replace_effective_prog()
778 if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) in replace_effective_prog()
781 head = &cg->bpf.progs[atype]; in replace_effective_prog()
793 desc->bpf.effective[atype], in replace_effective_prog()
824 progs = &cgrp->bpf.progs[atype]; in __cgroup_bpf_replace()
925 if (percpu_ref_is_zero(&desc->bpf.refcnt)) in purge_effective_progs()
930 if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) in purge_effective_progs()
933 head = &cg->bpf.progs[atype]; in purge_effective_progs()
947 desc->bpf.effective[atype], in purge_effective_progs()
986 progs = &cgrp->bpf.progs[atype]; in __cgroup_bpf_detach()
987 flags = cgrp->bpf.flags[atype]; in __cgroup_bpf_detach()
1015 cgrp->bpf.flags[atype] = 0; in __cgroup_bpf_detach()
1067 flags = cgrp->bpf.flags[from_atype]; in __cgroup_bpf_query()
1072 effective = rcu_dereference_protected(cgrp->bpf.effective[atype], in __cgroup_bpf_query()
1076 total_cnt += prog_list_length(&cgrp->bpf.progs[atype]); in __cgroup_bpf_query()
1097 effective = rcu_dereference_protected(cgrp->bpf.effective[atype], in __cgroup_bpf_query()
1107 progs = &cgrp->bpf.progs[atype]; in __cgroup_bpf_query()
1120 flags = cgrp->bpf.flags[atype]; in __cgroup_bpf_query()
1402 ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, skb, in __cgroup_bpf_run_filter_skb()
1428 ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, in __cgroup_bpf_run_filter_skb()
1460 return bpf_prog_run_array_cg(&cgrp->bpf, atype, sk, bpf_prog_run, 0, in __cgroup_bpf_run_filter_sk()
1514 ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, in __cgroup_bpf_run_filter_sock_addr()
1546 return bpf_prog_run_array_cg(&cgrp->bpf, atype, sock_ops, bpf_prog_run, in __cgroup_bpf_run_filter_sock_ops()
1564 ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0, in __cgroup_bpf_check_dev_permission()
1755 ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0, in __cgroup_bpf_run_filter_sysctl()
1849 ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_SETSOCKOPT, in __cgroup_bpf_run_filter_setsockopt()
1958 ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT, in __cgroup_bpf_run_filter_getsockopt()
2018 ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT, in __cgroup_bpf_run_filter_getsockopt_kern()