/openbmc/linux/tools/testing/selftests/bpf/progs/ |
H A D | linked_list.c | 24 bpf_spin_lock(lock); in list_push_pop() 33 bpf_spin_lock(lock); in list_push_pop() 43 bpf_spin_lock(lock); in list_push_pop() 49 bpf_spin_lock(lock); in list_push_pop() 60 bpf_spin_lock(lock); in list_push_pop() 64 bpf_spin_lock(lock); in list_push_pop() 76 bpf_spin_lock(lock); in list_push_pop() 84 bpf_spin_lock(lock); in list_push_pop() 121 bpf_spin_lock(lock); in list_push_pop_multiple() 159 bpf_spin_lock(lock); in list_push_pop_multiple() [all …]
|
H A D | verifier_spin_lock.c | 10 struct bpf_spin_lock l; 47 __imm(bpf_spin_lock), in spin_lock_test1_success() 80 __imm(bpf_spin_lock), in lock_test2_direct_ld_st() 114 __imm(bpf_spin_lock), in __flag() 148 __imm(bpf_spin_lock), in __flag() 182 __imm(bpf_spin_lock), in call_within_a_locked_region() 216 __imm(bpf_spin_lock), in spin_lock_test6_missing_unlock() 250 __imm(bpf_spin_lock), in lock_test7_unlock_without_lock() 286 __imm(bpf_spin_lock), in spin_lock_test8_double_lock() 325 __imm(bpf_spin_lock), in spin_lock_test9_different_lock() [all …]
|
H A D | rbtree_fail.c | 55 bpf_spin_lock(&glock); in rbtree_api_nolock_remove() 88 bpf_spin_lock(&glock); in rbtree_api_remove_unadded_node() 114 bpf_spin_lock(&glock); in rbtree_api_remove_no_drop() 145 bpf_spin_lock(&glock); in rbtree_api_add_to_multiple_trees() 160 bpf_spin_lock(&glock); in rbtree_api_use_unchecked_remove_retval() 169 bpf_spin_lock(&glock); in rbtree_api_use_unchecked_remove_retval() 190 bpf_spin_lock(&glock); in rbtree_api_add_release_unlock_escape() 194 bpf_spin_lock(&glock); in rbtree_api_add_release_unlock_escape() 211 bpf_spin_lock(&glock); in rbtree_api_first_release_unlock_escape() 220 bpf_spin_lock(&glock); in rbtree_api_first_release_unlock_escape() [all …]
|
H A D | refcounted_kptr.c | 87 bpf_spin_lock(lock); in __insert_in_tree_and_list() 96 bpf_spin_lock(lock); in __insert_in_tree_and_list() 130 bpf_spin_lock(lock); in __stash_map_insert_tree() 148 bpf_spin_lock(lock); in __read_from_tree() 181 bpf_spin_lock(lock); in __read_from_list() 380 bpf_spin_lock(&alock); in rbtree_refcounted_node_ref_escapes() 405 bpf_spin_lock(&alock); in rbtree_refcounted_node_ref_escapes_owning_input() 466 bpf_spin_lock(&block); in rbtree_wrong_owner_remove_fail_b() 489 bpf_spin_lock(&lock); in rbtree_wrong_owner_remove_fail_a2() 517 bpf_spin_lock(&lock); in BPF_PROG() [all …]
|
H A D | test_spin_lock.c | 10 struct bpf_spin_lock lock; 22 struct bpf_spin_lock lock; 33 struct bpf_spin_lock lock; 70 bpf_spin_lock(&val->lock); in bpf_spin_lock_test() 84 bpf_spin_lock(&q->lock); in bpf_spin_lock_test() 97 bpf_spin_lock(&cls->lock); in bpf_spin_lock_test()
|
H A D | rbtree.c | 21 private(A) struct bpf_spin_lock glock; 36 static long __add_three(struct bpf_rb_root *root, struct bpf_spin_lock *lock) in __add_three() 52 bpf_spin_lock(&glock); in __add_three() 62 bpf_spin_lock(&glock); in __add_three() 90 bpf_spin_lock(&glock); in rbtree_add_and_remove() 136 bpf_spin_lock(&glock); in rbtree_first_and_remove() 160 bpf_spin_lock(&glock); in rbtree_first_and_remove() 192 bpf_spin_lock(&glock); in rbtree_api_release_aliasing() 196 bpf_spin_lock(&glock); in rbtree_api_release_aliasing()
|
H A D | linked_list_fail.c | 364 bpf_spin_lock(&glock); in use_after_unlock() 395 bpf_spin_lock(&glock); in list_double_add() 428 bpf_spin_lock(&glock); in no_node_value_type() 443 bpf_spin_lock(&glock); in incorrect_value_type() 458 bpf_spin_lock(&glock); in incorrect_node_var_off() 473 bpf_spin_lock(&glock); in incorrect_node_off1() 488 bpf_spin_lock(&glock); in incorrect_node_off2() 503 bpf_spin_lock(&glock); in no_head_type() 505 bpf_spin_lock(&glock); in no_head_type() 518 bpf_spin_lock(&glock); in incorrect_head_var_off1() [all …]
|
H A D | test_spin_lock_fail.c | 8 struct bpf_spin_lock lock; 31 SEC(".data.A") struct bpf_spin_lock lockA; 32 SEC(".data.B") struct bpf_spin_lock lockB; 108 bpf_spin_lock(A); \ 136 bpf_spin_lock(&f1->lock); in lock_id_mismatch_mapval_mapval() 164 bpf_spin_lock(&f1->lock); in lock_id_mismatch_innermapval_innermapval1() 191 bpf_spin_lock(&f1->lock); in lock_id_mismatch_innermapval_innermapval2()
|
H A D | linked_list.h | 17 struct bpf_spin_lock lock; 23 struct bpf_spin_lock lock; 52 private(A) struct bpf_spin_lock glock; 54 private(B) struct bpf_spin_lock glock2;
|
H A D | test_map_lock.c | 10 struct bpf_spin_lock lock; 22 struct bpf_spin_lock lock; 45 bpf_spin_lock(&val->lock); in bpf_map_lock_test() 54 bpf_spin_lock(&q->lock); in bpf_map_lock_test()
|
H A D | verifier_helper_restricted.c | 10 struct bpf_spin_lock l; 205 __imm(bpf_spin_lock), in in_bpf_prog_type_kprobe_3() 228 __imm(bpf_spin_lock), in in_bpf_prog_type_tracepoint_3() 251 __imm(bpf_spin_lock), in bpf_prog_type_perf_event_3() 274 __imm(bpf_spin_lock), in bpf_prog_type_raw_tracepoint_3()
|
H A D | refcounted_kptr_fail.c | 20 private(A) struct bpf_spin_lock glock; 44 bpf_spin_lock(&glock); in rbtree_refcounted_node_ref_escapes() 70 bpf_spin_lock(&glock); in rbtree_refcounted_node_ref_escapes_owning_input() 91 bpf_spin_lock(&glock); in BPF_PROG()
|
H A D | rbtree_btf_fail__wrong_node_type.c | 20 private(A) struct bpf_spin_lock glock; 32 bpf_spin_lock(&glock); in rbtree_api_add__wrong_node_type()
|
H A D | freplace_attach_probe.c | 12 struct bpf_spin_lock lock; 33 bpf_spin_lock(&val->lock); in new_handle_kprobe()
|
H A D | rbtree_btf_fail__add_wrong_type.c | 34 private(A) struct bpf_spin_lock glock; 46 bpf_spin_lock(&glock); in rbtree_api_add__add_wrong_type()
|
H A D | test_helper_restricted.c | 11 struct bpf_spin_lock l; 54 bpf_spin_lock(&lock->l); in spin_lock_work()
|
H A D | htab_reuse.c | 9 struct bpf_spin_lock lock;
|
H A D | timer_crash.c | 9 struct bpf_spin_lock lock;
|
H A D | test_sock_fields.c | 27 struct bpf_spin_lock lock; 210 bpf_spin_lock(&pkt_out_cnt10->lock); in egress_read_sock_fields()
|
H A D | timer.c | 13 struct bpf_spin_lock lock; /* unused */
|
/openbmc/linux/samples/bpf/ |
H A D | hbm.h | 12 struct bpf_spin_lock lock;
|
H A D | hbm_edt_kern.c | 92 bpf_spin_lock(&qdp->lock); in _hbm_out_cg()
|
H A D | hbm_out_kern.c | 94 bpf_spin_lock(&qdp->lock); in _hbm_out_cg()
|
/openbmc/linux/Documentation/bpf/ |
H A D | graph_ds_impl.rst | 70 struct bpf_spin_lock glock; 74 which also contains a ``bpf_spin_lock`` - in the above example both global 92 bpf_spin_lock(&lock); 125 bpf_spin_lock(&lock); 136 * Graph data structure APIs can only be used when the ``bpf_spin_lock`` 146 Because the associated ``bpf_spin_lock`` must be held by any program adding 217 bpf_spin_lock(&lock);
|
/openbmc/linux/kernel/bpf/ |
H A D | helpers.c | 281 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) in __bpf_spin_lock() 296 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) in __bpf_spin_unlock() 306 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) in __bpf_spin_lock() 316 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) in __bpf_spin_unlock() 336 NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) in NOTRACE_BPF_CALL_1() argument 343 .func = bpf_spin_lock, 359 NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) in NOTRACE_BPF_CALL_1() argument 376 struct bpf_spin_lock *lock; in copy_map_value_locked() 1126 struct bpf_spin_lock lock; 1902 struct bpf_spin_lock *spin_lock) in bpf_list_head_free() [all …]
|