Lines Matching +full:im +full:-
1 // SPDX-License-Identifier: GPL-2.0-only
37 struct bpf_trampoline *tr = ops->private; in bpf_tramp_ftrace_ops_func()
42 * tr->mutex is already locked. in bpf_tramp_ftrace_ops_func()
44 lockdep_assert_held_once(&tr->mutex); in bpf_tramp_ftrace_ops_func()
47 * -EAGAIN to register_ftrace_direct(). Then we can in bpf_tramp_ftrace_ops_func()
51 if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) && in bpf_tramp_ftrace_ops_func()
52 !(tr->flags & BPF_TRAMP_F_ORIG_STACK)) { in bpf_tramp_ftrace_ops_func()
53 if (WARN_ON_ONCE(tr->flags & BPF_TRAMP_F_SHARE_IPMODIFY)) in bpf_tramp_ftrace_ops_func()
54 return -EBUSY; in bpf_tramp_ftrace_ops_func()
56 tr->flags |= BPF_TRAMP_F_SHARE_IPMODIFY; in bpf_tramp_ftrace_ops_func()
57 return -EAGAIN; in bpf_tramp_ftrace_ops_func()
64 * tr->mutex => direct_mutex (ftrace.c) => ftrace_lock (ftrace.c) in bpf_tramp_ftrace_ops_func()
72 * mutex_trylock(&tr->mutex) to avoid deadlock in race condition in bpf_tramp_ftrace_ops_func()
75 if (!mutex_trylock(&tr->mutex)) { in bpf_tramp_ftrace_ops_func()
76 /* sleep 1 ms to make sure whatever holding tr->mutex makes in bpf_tramp_ftrace_ops_func()
80 return -EAGAIN; in bpf_tramp_ftrace_ops_func()
85 tr->flags |= BPF_TRAMP_F_SHARE_IPMODIFY; in bpf_tramp_ftrace_ops_func()
87 if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) && in bpf_tramp_ftrace_ops_func()
88 !(tr->flags & BPF_TRAMP_F_ORIG_STACK)) in bpf_tramp_ftrace_ops_func()
92 tr->flags &= ~BPF_TRAMP_F_SHARE_IPMODIFY; in bpf_tramp_ftrace_ops_func()
94 if (tr->flags & BPF_TRAMP_F_ORIG_STACK) in bpf_tramp_ftrace_ops_func()
98 ret = -EINVAL; in bpf_tramp_ftrace_ops_func()
102 mutex_unlock(&tr->mutex); in bpf_tramp_ftrace_ops_func()
109 enum bpf_attach_type eatype = prog->expected_attach_type; in bpf_prog_has_trampoline()
110 enum bpf_prog_type ptype = prog->type; in bpf_prog_has_trampoline()
120 ksym->start = (unsigned long) data; in bpf_image_ksym_add()
121 ksym->end = ksym->start + PAGE_SIZE; in bpf_image_ksym_add()
123 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start, in bpf_image_ksym_add()
124 PAGE_SIZE, false, ksym->name); in bpf_image_ksym_add()
130 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start, in bpf_image_ksym_del()
131 PAGE_SIZE, true, ksym->name); in bpf_image_ksym_del()
143 if (tr->key == key) { in bpf_trampoline_lookup()
144 refcount_inc(&tr->refcnt); in bpf_trampoline_lookup()
152 tr->fops = kzalloc(sizeof(struct ftrace_ops), GFP_KERNEL); in bpf_trampoline_lookup()
153 if (!tr->fops) { in bpf_trampoline_lookup()
158 tr->fops->private = tr; in bpf_trampoline_lookup()
159 tr->fops->ops_func = bpf_tramp_ftrace_ops_func; in bpf_trampoline_lookup()
162 tr->key = key; in bpf_trampoline_lookup()
163 INIT_HLIST_NODE(&tr->hlist); in bpf_trampoline_lookup()
164 hlist_add_head(&tr->hlist, head); in bpf_trampoline_lookup()
165 refcount_set(&tr->refcnt, 1); in bpf_trampoline_lookup()
166 mutex_init(&tr->mutex); in bpf_trampoline_lookup()
168 INIT_HLIST_HEAD(&tr->progs_hlist[i]); in bpf_trampoline_lookup()
176 void *ip = tr->func.addr; in unregister_fentry()
179 if (tr->func.ftrace_managed) in unregister_fentry()
180 ret = unregister_ftrace_direct(tr->fops, (long)old_addr, false); in unregister_fentry()
190 void *ip = tr->func.addr; in modify_fentry()
193 if (tr->func.ftrace_managed) { in modify_fentry()
195 ret = modify_ftrace_direct(tr->fops, (long)new_addr); in modify_fentry()
197 ret = modify_ftrace_direct_nolock(tr->fops, (long)new_addr); in modify_fentry()
207 void *ip = tr->func.addr; in register_fentry()
213 if (!tr->fops) in register_fentry()
214 return -ENOTSUPP; in register_fentry()
215 tr->func.ftrace_managed = true; in register_fentry()
218 if (tr->func.ftrace_managed) { in register_fentry()
219 ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1); in register_fentry()
220 ret = register_ftrace_direct(tr->fops, (long)new_addr); in register_fentry()
239 return ERR_PTR(-ENOMEM); in bpf_trampoline_get_progs()
242 tlinks[kind].nr_links = tr->progs_cnt[kind]; in bpf_trampoline_get_progs()
243 *total += tr->progs_cnt[kind]; in bpf_trampoline_get_progs()
246 hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) { in bpf_trampoline_get_progs()
247 *ip_arg |= link->link.prog->call_get_func_ip; in bpf_trampoline_get_progs()
254 static void bpf_tramp_image_free(struct bpf_tramp_image *im) in bpf_tramp_image_free() argument
256 bpf_image_ksym_del(&im->ksym); in bpf_tramp_image_free()
257 bpf_jit_free_exec(im->image); in bpf_tramp_image_free()
259 percpu_ref_exit(&im->pcref); in bpf_tramp_image_free()
260 kfree_rcu(im, rcu); in bpf_tramp_image_free()
265 struct bpf_tramp_image *im; in __bpf_tramp_image_put_deferred() local
267 im = container_of(work, struct bpf_tramp_image, work); in __bpf_tramp_image_put_deferred()
268 bpf_tramp_image_free(im); in __bpf_tramp_image_put_deferred()
274 struct bpf_tramp_image *im; in __bpf_tramp_image_put_rcu() local
276 im = container_of(rcu, struct bpf_tramp_image, rcu); in __bpf_tramp_image_put_rcu()
277 INIT_WORK(&im->work, __bpf_tramp_image_put_deferred); in __bpf_tramp_image_put_rcu()
278 schedule_work(&im->work); in __bpf_tramp_image_put_rcu()
284 struct bpf_tramp_image *im; in __bpf_tramp_image_release() local
286 im = container_of(pcref, struct bpf_tramp_image, pcref); in __bpf_tramp_image_release()
287 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu); in __bpf_tramp_image_release()
293 struct bpf_tramp_image *im; in __bpf_tramp_image_put_rcu_tasks() local
295 im = container_of(rcu, struct bpf_tramp_image, rcu); in __bpf_tramp_image_put_rcu_tasks()
296 if (im->ip_after_call) in __bpf_tramp_image_put_rcu_tasks()
298 percpu_ref_kill(&im->pcref); in __bpf_tramp_image_put_rcu_tasks()
301 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu); in __bpf_tramp_image_put_rcu_tasks()
304 static void bpf_tramp_image_put(struct bpf_tramp_image *im) in bpf_tramp_image_put() argument
321 * __bpf_tramp_enter->percpu_ref_get. in bpf_tramp_image_put()
332 if (im->ip_after_call) { in bpf_tramp_image_put()
333 int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP, in bpf_tramp_image_put()
334 NULL, im->ip_epilogue); in bpf_tramp_image_put()
337 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks); in bpf_tramp_image_put()
339 percpu_ref_kill(&im->pcref); in bpf_tramp_image_put()
349 call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks); in bpf_tramp_image_put()
354 struct bpf_tramp_image *im; in bpf_tramp_image_alloc() local
357 int err = -ENOMEM; in bpf_tramp_image_alloc()
359 im = kzalloc(sizeof(*im), GFP_KERNEL); in bpf_tramp_image_alloc()
360 if (!im) in bpf_tramp_image_alloc()
367 err = -ENOMEM; in bpf_tramp_image_alloc()
368 im->image = image = bpf_jit_alloc_exec(PAGE_SIZE); in bpf_tramp_image_alloc()
373 err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL); in bpf_tramp_image_alloc()
377 ksym = &im->ksym; in bpf_tramp_image_alloc()
378 INIT_LIST_HEAD_RCU(&ksym->lnode); in bpf_tramp_image_alloc()
379 snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", key); in bpf_tramp_image_alloc()
381 return im; in bpf_tramp_image_alloc()
384 bpf_jit_free_exec(im->image); in bpf_tramp_image_alloc()
388 kfree(im); in bpf_tramp_image_alloc()
395 struct bpf_tramp_image *im; in bpf_trampoline_update() local
397 u32 orig_flags = tr->flags; in bpf_trampoline_update()
406 err = unregister_fentry(tr, tr->cur_image->image); in bpf_trampoline_update()
407 bpf_tramp_image_put(tr->cur_image); in bpf_trampoline_update()
408 tr->cur_image = NULL; in bpf_trampoline_update()
412 im = bpf_tramp_image_alloc(tr->key); in bpf_trampoline_update()
413 if (IS_ERR(im)) { in bpf_trampoline_update()
414 err = PTR_ERR(im); in bpf_trampoline_update()
419 tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX); in bpf_trampoline_update()
426 tr->flags |= BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME; in bpf_trampoline_update()
428 tr->flags |= BPF_TRAMP_F_RESTORE_REGS; in bpf_trampoline_update()
432 tr->flags |= BPF_TRAMP_F_IP_ARG; in bpf_trampoline_update()
436 if ((tr->flags & BPF_TRAMP_F_SHARE_IPMODIFY) && in bpf_trampoline_update()
437 (tr->flags & BPF_TRAMP_F_CALL_ORIG)) in bpf_trampoline_update()
438 tr->flags |= BPF_TRAMP_F_ORIG_STACK; in bpf_trampoline_update()
441 err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE, in bpf_trampoline_update()
442 &tr->func.model, tr->flags, tlinks, in bpf_trampoline_update()
443 tr->func.addr); in bpf_trampoline_update()
447 set_memory_rox((long)im->image, 1); in bpf_trampoline_update()
449 WARN_ON(tr->cur_image && total == 0); in bpf_trampoline_update()
450 if (tr->cur_image) in bpf_trampoline_update()
452 err = modify_fentry(tr, tr->cur_image->image, im->image, lock_direct_mutex); in bpf_trampoline_update()
455 err = register_fentry(tr, im->image); in bpf_trampoline_update()
458 if (err == -EAGAIN) { in bpf_trampoline_update()
459 /* -EAGAIN from bpf_tramp_ftrace_ops_func. Now in bpf_trampoline_update()
463 /* reset fops->func and fops->trampoline for re-register */ in bpf_trampoline_update()
464 tr->fops->func = NULL; in bpf_trampoline_update()
465 tr->fops->trampoline = 0; in bpf_trampoline_update()
467 /* reset im->image memory attr for arch_prepare_bpf_trampoline */ in bpf_trampoline_update()
468 set_memory_nx((long)im->image, 1); in bpf_trampoline_update()
469 set_memory_rw((long)im->image, 1); in bpf_trampoline_update()
476 if (tr->cur_image) in bpf_trampoline_update()
477 bpf_tramp_image_put(tr->cur_image); in bpf_trampoline_update()
478 tr->cur_image = im; in bpf_trampoline_update()
482 tr->flags = orig_flags; in bpf_trampoline_update()
487 bpf_tramp_image_free(im); in bpf_trampoline_update()
493 switch (prog->expected_attach_type) { in bpf_attach_type_to_tramp()
501 if (!prog->aux->attach_func_proto->type) in bpf_attach_type_to_tramp()
520 kind = bpf_attach_type_to_tramp(link->link.prog); in __bpf_trampoline_link_prog()
521 if (tr->extension_prog) in __bpf_trampoline_link_prog()
525 return -EBUSY; in __bpf_trampoline_link_prog()
528 cnt += tr->progs_cnt[i]; in __bpf_trampoline_link_prog()
533 return -EBUSY; in __bpf_trampoline_link_prog()
534 tr->extension_prog = link->link.prog; in __bpf_trampoline_link_prog()
535 return bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL, in __bpf_trampoline_link_prog()
536 link->link.prog->bpf_func); in __bpf_trampoline_link_prog()
539 return -E2BIG; in __bpf_trampoline_link_prog()
540 if (!hlist_unhashed(&link->tramp_hlist)) in __bpf_trampoline_link_prog()
542 return -EBUSY; in __bpf_trampoline_link_prog()
543 hlist_for_each_entry(link_exiting, &tr->progs_hlist[kind], tramp_hlist) { in __bpf_trampoline_link_prog()
544 if (link_exiting->link.prog != link->link.prog) in __bpf_trampoline_link_prog()
547 return -EBUSY; in __bpf_trampoline_link_prog()
550 hlist_add_head(&link->tramp_hlist, &tr->progs_hlist[kind]); in __bpf_trampoline_link_prog()
551 tr->progs_cnt[kind]++; in __bpf_trampoline_link_prog()
554 hlist_del_init(&link->tramp_hlist); in __bpf_trampoline_link_prog()
555 tr->progs_cnt[kind]--; in __bpf_trampoline_link_prog()
564 mutex_lock(&tr->mutex); in bpf_trampoline_link_prog()
566 mutex_unlock(&tr->mutex); in bpf_trampoline_link_prog()
575 kind = bpf_attach_type_to_tramp(link->link.prog); in __bpf_trampoline_unlink_prog()
577 WARN_ON_ONCE(!tr->extension_prog); in __bpf_trampoline_unlink_prog()
578 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, in __bpf_trampoline_unlink_prog()
579 tr->extension_prog->bpf_func, NULL); in __bpf_trampoline_unlink_prog()
580 tr->extension_prog = NULL; in __bpf_trampoline_unlink_prog()
583 hlist_del_init(&link->tramp_hlist); in __bpf_trampoline_unlink_prog()
584 tr->progs_cnt[kind]--; in __bpf_trampoline_unlink_prog()
593 mutex_lock(&tr->mutex); in bpf_trampoline_unlink_prog()
595 mutex_unlock(&tr->mutex); in bpf_trampoline_unlink_prog()
605 /* paired with 'shim_link->trampoline = tr' in bpf_trampoline_link_cgroup_shim */ in bpf_shim_tramp_link_release()
606 if (!shim_link->trampoline) in bpf_shim_tramp_link_release()
609 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link, shim_link->trampoline)); in bpf_shim_tramp_link_release()
610 bpf_trampoline_put(shim_link->trampoline); in bpf_shim_tramp_link_release()
643 p->jited = false; in cgroup_shim_alloc()
644 p->bpf_func = bpf_func; in cgroup_shim_alloc()
646 p->aux->cgroup_atype = cgroup_atype; in cgroup_shim_alloc()
647 p->aux->attach_func_proto = prog->aux->attach_func_proto; in cgroup_shim_alloc()
648 p->aux->attach_btf_id = prog->aux->attach_btf_id; in cgroup_shim_alloc()
649 p->aux->attach_btf = prog->aux->attach_btf; in cgroup_shim_alloc()
650 btf_get(p->aux->attach_btf); in cgroup_shim_alloc()
651 p->type = BPF_PROG_TYPE_LSM; in cgroup_shim_alloc()
652 p->expected_attach_type = BPF_LSM_MAC; in cgroup_shim_alloc()
654 bpf_link_init(&shim_link->link.link, BPF_LINK_TYPE_UNSPEC, in cgroup_shim_alloc()
656 bpf_cgroup_atype_get(p->aux->attach_btf_id, cgroup_atype); in cgroup_shim_alloc()
668 hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) { in cgroup_shim_find()
669 struct bpf_prog *p = link->link.prog; in cgroup_shim_find()
671 if (p->bpf_func == bpf_func) in cgroup_shim_find()
690 prog->aux->attach_btf_id, in bpf_trampoline_link_cgroup_shim()
695 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, in bpf_trampoline_link_cgroup_shim()
696 prog->aux->attach_btf_id); in bpf_trampoline_link_cgroup_shim()
701 return -ENOMEM; in bpf_trampoline_link_cgroup_shim()
703 mutex_lock(&tr->mutex); in bpf_trampoline_link_cgroup_shim()
708 bpf_link_inc(&shim_link->link.link); in bpf_trampoline_link_cgroup_shim()
710 mutex_unlock(&tr->mutex); in bpf_trampoline_link_cgroup_shim()
719 err = -ENOMEM; in bpf_trampoline_link_cgroup_shim()
723 err = __bpf_trampoline_link_prog(&shim_link->link, tr); in bpf_trampoline_link_cgroup_shim()
727 shim_link->trampoline = tr; in bpf_trampoline_link_cgroup_shim()
730 mutex_unlock(&tr->mutex); in bpf_trampoline_link_cgroup_shim()
734 mutex_unlock(&tr->mutex); in bpf_trampoline_link_cgroup_shim()
737 bpf_link_put(&shim_link->link.link); in bpf_trampoline_link_cgroup_shim()
752 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, in bpf_trampoline_unlink_cgroup_shim()
753 prog->aux->attach_btf_id); in bpf_trampoline_unlink_cgroup_shim()
760 mutex_lock(&tr->mutex); in bpf_trampoline_unlink_cgroup_shim()
762 mutex_unlock(&tr->mutex); in bpf_trampoline_unlink_cgroup_shim()
765 bpf_link_put(&shim_link->link.link); in bpf_trampoline_unlink_cgroup_shim()
780 mutex_lock(&tr->mutex); in bpf_trampoline_get()
781 if (tr->func.addr) in bpf_trampoline_get()
784 memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel)); in bpf_trampoline_get()
785 tr->func.addr = (void *)tgt_info->tgt_addr; in bpf_trampoline_get()
787 mutex_unlock(&tr->mutex); in bpf_trampoline_get()
798 if (!refcount_dec_and_test(&tr->refcnt)) in bpf_trampoline_put()
800 WARN_ON_ONCE(mutex_is_locked(&tr->mutex)); in bpf_trampoline_put()
803 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[i]))) in bpf_trampoline_put()
809 * fexit progs. The fentry-only trampoline will be freed via in bpf_trampoline_put()
812 hlist_del(&tr->hlist); in bpf_trampoline_put()
813 if (tr->fops) { in bpf_trampoline_put()
814 ftrace_free_filter(tr->fops); in bpf_trampoline_put()
815 kfree(tr->fops); in bpf_trampoline_put()
839 * call prog->bpf_func
843 * 0 - skip execution of the bpf prog
844 * 1 - execute bpf prog
845 * [2..MAX_U64] - execute bpf prog and record execution time.
854 run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx); in __bpf_prog_enter_recur()
856 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { in __bpf_prog_enter_recur()
877 stats = this_cpu_ptr(prog->stats); in update_prog_stats()
878 flags = u64_stats_update_begin_irqsave(&stats->syncp); in update_prog_stats()
879 u64_stats_inc(&stats->cnt); in update_prog_stats()
880 u64_stats_add(&stats->nsecs, sched_clock() - start); in update_prog_stats()
881 u64_stats_update_end_irqrestore(&stats->syncp, flags); in update_prog_stats()
889 bpf_reset_run_ctx(run_ctx->saved_run_ctx); in __bpf_prog_exit_recur()
892 this_cpu_dec(*(prog->active)); in __bpf_prog_exit_recur()
907 run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx); in __bpf_prog_enter_lsm_cgroup()
916 bpf_reset_run_ctx(run_ctx->saved_run_ctx); in __bpf_prog_exit_lsm_cgroup()
929 run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx); in __bpf_prog_enter_sleepable_recur()
931 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { in __bpf_prog_enter_sleepable_recur()
941 bpf_reset_run_ctx(run_ctx->saved_run_ctx); in __bpf_prog_exit_sleepable_recur()
944 this_cpu_dec(*(prog->active)); in __bpf_prog_exit_sleepable_recur()
956 run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx); in __bpf_prog_enter_sleepable()
964 bpf_reset_run_ctx(run_ctx->saved_run_ctx); in __bpf_prog_exit_sleepable()
978 run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx); in __bpf_prog_enter()
987 bpf_reset_run_ctx(run_ctx->saved_run_ctx); in __bpf_prog_exit()
996 percpu_ref_get(&tr->pcref); in __bpf_tramp_enter()
1001 percpu_ref_put(&tr->pcref); in __bpf_tramp_exit()
1006 bool sleepable = prog->aux->sleepable; in bpf_trampoline_enter()
1013 prog->expected_attach_type == BPF_LSM_CGROUP) in bpf_trampoline_enter()
1021 bool sleepable = prog->aux->sleepable; in bpf_trampoline_exit()
1028 prog->expected_attach_type == BPF_LSM_CGROUP) in bpf_trampoline_exit()
1040 return -ENOTSUPP; in arch_prepare_bpf_trampoline()