/openbmc/linux/tools/testing/selftests/bpf/progs/ |
H A D | cpumask_failure.c | 88 cpumask = bpf_kptr_xchg(&v->cpumask, NULL); in BPF_PROG() 114 prev = bpf_kptr_xchg(&global_mask, local); in BPF_PROG() 148 prev = bpf_kptr_xchg(&global_mask, local); in BPF_PROG() 176 prev = bpf_kptr_xchg(&global_mask, curr); in BPF_PROG() 183 prev = bpf_kptr_xchg(&global_mask, curr); in BPF_PROG()
|
H A D | local_kptr_stash.c | 63 res = bpf_kptr_xchg(&mapval->node, res); in create_and_stash() 91 res = bpf_kptr_xchg(&mapval->plain, res); in stash_plain() 109 res = bpf_kptr_xchg(&mapval->node, NULL); in unstash_rb_node() 129 res = bpf_kptr_xchg(&mapval->val, NULL); in stash_test_ref_kfunc()
|
H A D | map_kptr_fail.c | 71 bpf_kptr_xchg((void *)v + id, NULL); in non_const_var_off_kptr_xchg() 215 bpf_kptr_xchg(&v->unref_ptr, NULL); in reject_kptr_xchg_on_unref() 269 bpf_kptr_xchg(&v->ref_ptr, p); in reject_untrusted_xchg() 289 bpf_kptr_xchg(&v->ref_memb_ptr, ref_ptr); in reject_bad_type_xchg() 308 bpf_kptr_xchg(&v->ref_memb_ptr, &ref_ptr->memb); in reject_member_of_ref_xchg() 362 bpf_kptr_xchg(&v->ref_ptr, p); in kptr_xchg_ref_state() 381 p = bpf_kptr_xchg(&v->ref_ptr, p); in kptr_xchg_possibly_null()
|
H A D | map_kptr.c | 155 p = bpf_kptr_xchg(&v->ref_ptr, NULL); in test_kptr_ref() 175 p = bpf_kptr_xchg(&v->ref_ptr, p); in test_kptr_ref() 313 p = bpf_kptr_xchg(&v->ref_ptr, p); in test_map_kptr_ref_pre() 321 p = bpf_kptr_xchg(&v->ref_ptr, NULL); in test_map_kptr_ref_pre() 333 p = bpf_kptr_xchg(&v->ref_ptr, p); in test_map_kptr_ref_pre() 358 p = bpf_kptr_xchg(&v->ref_ptr, NULL); in test_map_kptr_ref_post() 366 p = bpf_kptr_xchg(&v->ref_ptr, p); in test_map_kptr_ref_post()
|
H A D | test_bpf_ma.c | 61 old = bpf_kptr_xchg(&value->data, new); in batch_alloc_free() 75 old = bpf_kptr_xchg(&value->data, NULL); in batch_alloc_free()
|
H A D | refcounted_kptr.c | 123 n = bpf_kptr_xchg(&mapval->node, n); in __stash_map_insert_tree() 216 n = bpf_kptr_xchg(&mapval->node, n); in __read_from_unstash() 422 n = bpf_kptr_xchg(&mapval->node, n); in __stash_map_empty_xchg() 462 n = bpf_kptr_xchg(&mapval->node, NULL); in rbtree_wrong_owner_remove_fail_b() 486 m = bpf_kptr_xchg(&mapval->node, NULL); in rbtree_wrong_owner_remove_fail_a2()
|
H A D | cgrp_kfunc_failure.c | 147 kptr = bpf_kptr_xchg(&v->cgrp, NULL); in BPF_PROG() 231 old = bpf_kptr_xchg(&v->cgrp, acquired); in BPF_PROG()
|
H A D | jit_probe_mem.c | 21 p = bpf_kptr_xchg(&v, p); in test_jit_probe_mem()
|
H A D | task_kfunc_failure.c | 142 kptr = bpf_kptr_xchg(&v->task, NULL); in BPF_PROG() 218 old = bpf_kptr_xchg(&v->task, acquired); in BPF_PROG()
|
H A D | local_kptr_stash_fail.c | 59 res = bpf_kptr_xchg(&mapval->node, res); in stash_rb_nodes()
|
H A D | task_kfunc_common.h | 67 old = bpf_kptr_xchg(&v->task, acquired); in tasks_kfunc_map_insert()
|
H A D | cgrp_kfunc_common.h | 70 old = bpf_kptr_xchg(&v->cgrp, acquired); in cgrps_kfunc_map_insert()
|
H A D | cb_refs.c | 61 p = bpf_kptr_xchg(&v->ptr, p); in leak_prog()
|
H A D | cpumask_success.c | 421 cpumask = bpf_kptr_xchg(&v->cpumask, NULL); in BPF_PROG() 442 prev = bpf_kptr_xchg(&global_mask, local); in BPF_PROG()
|
H A D | cgrp_kfunc_success.c | 96 kptr = bpf_kptr_xchg(&v->cgrp, NULL); in BPF_PROG()
|
H A D | cpumask_common.h | 112 old = bpf_kptr_xchg(&v->cpumask, mask); in cpumask_map_insert()
|
H A D | task_kfunc_success.c | 164 kptr = bpf_kptr_xchg(&v->task, NULL); in BPF_PROG()
|
/openbmc/linux/Documentation/bpf/ |
H A D | cpumasks.rst | 84 old = bpf_kptr_xchg(&v->cpumask, mask); 119 a map, the reference can be removed from the map with bpf_kptr_xchg(), or 162 * bpf_kptr_xchg() between the bpf_map_lookup_elem()
|
H A D | bpf_design_QA.rst | 325 fields and bpf_kptr_xchg() helper will continue to be supported across kernel
|
H A D | kfuncs.rst | 186 referenced kptr (by invoking bpf_kptr_xchg). If not, the verifier fails the
|
/openbmc/linux/kernel/bpf/ |
H A D | helpers.c | 1488 BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr) in BPF_CALL_2() argument 1500 .func = bpf_kptr_xchg,
|