Home
last modified time | relevance | path

Searched refs:skel (Results 1 – 25 of 266) sorted by relevance

1234567891011

/openbmc/linux/tools/testing/selftests/bpf/prog_tests/
H A Dattach_probe.c43 struct test_attach_probe_manual *skel; in test_attach_probe_manual() local
46 skel = test_attach_probe_manual__open_and_load(); in test_attach_probe_manual()
47 if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load")) in test_attach_probe_manual()
57 kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, in test_attach_probe_manual()
62 skel->links.handle_kprobe = kprobe_link; in test_attach_probe_manual()
65 kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, in test_attach_probe_manual()
70 skel->links.handle_kretprobe = kretprobe_link; in test_attach_probe_manual()
76 uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, in test_attach_probe_manual()
83 skel->links.handle_uprobe = uprobe_link; in test_attach_probe_manual()
86 uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, in test_attach_probe_manual()
[all …]
H A Dtracing_struct.c9 struct tracing_struct *skel; in test_fentry() local
12 skel = tracing_struct__open_and_load(); in test_fentry()
13 if (!ASSERT_OK_PTR(skel, "tracing_struct__open_and_load")) in test_fentry()
16 err = tracing_struct__attach(skel); in test_fentry()
22 ASSERT_EQ(skel->bss->t1_a_a, 2, "t1:a.a"); in test_fentry()
23 ASSERT_EQ(skel->bss->t1_a_b, 3, "t1:a.b"); in test_fentry()
24 ASSERT_EQ(skel->bss->t1_b, 1, "t1:b"); in test_fentry()
25 ASSERT_EQ(skel->bss->t1_c, 4, "t1:c"); in test_fentry()
27 ASSERT_EQ(skel->bss->t1_nregs, 4, "t1 nregs"); in test_fentry()
28 ASSERT_EQ(skel->bss->t1_reg0, 2, "t1 reg0"); in test_fentry()
[all …]
H A Datomics.c7 static void test_add(struct atomics_lskel *skel) in test_add() argument
13 prog_fd = skel->progs.add.prog_fd; in test_add()
20 ASSERT_EQ(skel->data->add64_value, 3, "add64_value"); in test_add()
21 ASSERT_EQ(skel->bss->add64_result, 1, "add64_result"); in test_add()
23 ASSERT_EQ(skel->data->add32_value, 3, "add32_value"); in test_add()
24 ASSERT_EQ(skel->bss->add32_result, 1, "add32_result"); in test_add()
26 ASSERT_EQ(skel->bss->add_stack_value_copy, 3, "add_stack_value"); in test_add()
27 ASSERT_EQ(skel->bss->add_stack_result, 1, "add_stack_result"); in test_add()
29 ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value"); in test_add()
32 static void test_sub(struct atomics_lskel *skel) in test_sub() argument
[all …]
H A Dtest_strncmp.c6 static int trigger_strncmp(const struct strncmp_test *skel) in trigger_strncmp() argument
12 cmp = skel->bss->cmp_ret; in trigger_strncmp()
24 static void strncmp_full_str_cmp(struct strncmp_test *skel, const char *name, in strncmp_full_str_cmp() argument
27 size_t nr = sizeof(skel->bss->str); in strncmp_full_str_cmp()
28 char *str = skel->bss->str; in strncmp_full_str_cmp()
33 memcpy(str, skel->rodata->target, nr); in strncmp_full_str_cmp()
37 got = trigger_strncmp(skel); in strncmp_full_str_cmp()
46 struct strncmp_test *skel; in test_strncmp_ret() local
49 skel = strncmp_test__open(); in test_strncmp_ret()
50 if (!ASSERT_OK_PTR(skel, "strncmp_test open")) in test_strncmp_ret()
[all …]
H A Dbpf_loop.c8 static void check_nr_loops(struct bpf_loop *skel) in check_nr_loops() argument
12 link = bpf_program__attach(skel->progs.test_prog); in check_nr_loops()
17 skel->bss->nr_loops = 0; in check_nr_loops()
21 ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops, in check_nr_loops()
25 skel->bss->nr_loops = 500; in check_nr_loops()
29 ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops, in check_nr_loops()
31 ASSERT_EQ(skel->bss->g_output, (500 * 499) / 2, "g_output"); in check_nr_loops()
34 skel->bss->nr_loops = -1; in check_nr_loops()
38 ASSERT_EQ(skel->bss->err, -E2BIG, "over max limit"); in check_nr_loops()
43 static void check_callback_fn_stop(struct bpf_loop *skel) in check_callback_fn_stop() argument
[all …]
H A Dbpf_iter.c41 struct bpf_iter_test_kern3 *skel; in test_btf_id_or_null() local
43 skel = bpf_iter_test_kern3__open_and_load(); in test_btf_id_or_null()
44 if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) { in test_btf_id_or_null()
45 bpf_iter_test_kern3__destroy(skel); in test_btf_id_or_null()
80 static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog, in do_read_map_iter_fd() argument
105 bpf_object__destroy_skeleton(*skel); in do_read_map_iter_fd()
106 *skel = NULL; in do_read_map_iter_fd()
142 struct bpf_iter_ipv6_route *skel; in test_ipv6_route() local
144 skel = bpf_iter_ipv6_route__open_and_load(); in test_ipv6_route()
145 if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load")) in test_ipv6_route()
[all …]
H A Dcgrp_local_storage.c24 struct cgrp_ls_tp_btf *skel; in test_tp_btf() local
28 skel = cgrp_ls_tp_btf__open_and_load(); in test_tp_btf()
29 if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) in test_tp_btf()
33 err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val1, BPF_ANY); in test_tp_btf()
38 err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val2); in test_tp_btf()
45 err = bpf_map_delete_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd); in test_tp_btf()
49 skel->bss->target_pid = syscall(SYS_gettid); in test_tp_btf()
51 err = cgrp_ls_tp_btf__attach(skel); in test_tp_btf()
58 skel->bss->target_pid = 0; in test_tp_btf()
61 ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt"); in test_tp_btf()
[all …]
H A Dmap_ops.c46 static int setup(struct test_map_ops **skel) in setup() argument
50 if (!skel) in setup()
53 *skel = test_map_ops__open(); in setup()
54 if (!ASSERT_OK_PTR(*skel, "test_map_ops__open")) in setup()
57 (*skel)->rodata->pid = getpid(); in setup()
59 err = test_map_ops__load(*skel); in setup()
63 err = test_map_ops__attach(*skel); in setup()
70 static void teardown(struct test_map_ops **skel) in teardown() argument
72 if (skel && *skel) in teardown()
73 test_map_ops__destroy(*skel); in teardown()
[all …]
H A Dtc_links.c22 struct test_tc_link *skel; in serial_test_tc_links_basic() local
26 skel = test_tc_link__open_and_load(); in serial_test_tc_links_basic()
27 if (!ASSERT_OK_PTR(skel, "skel_load")) in serial_test_tc_links_basic()
30 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); in serial_test_tc_links_basic()
31 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); in serial_test_tc_links_basic()
38 ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1"); in serial_test_tc_links_basic()
39 ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); in serial_test_tc_links_basic()
41 link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); in serial_test_tc_links_basic()
45 skel->links.tc1 = link; in serial_test_tc_links_basic()
47 lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); in serial_test_tc_links_basic()
[all …]
H A Dfind_vma.c10 static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret, bool need_test) in test_and_reset_skel() argument
13 ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec"); in test_and_reset_skel()
14 ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret"); in test_and_reset_skel()
15 ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret"); in test_and_reset_skel()
16 ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs"); in test_and_reset_skel()
19 skel->bss->found_vm_exec = 0; in test_and_reset_skel()
20 skel->data->find_addr_ret = -1; in test_and_reset_skel()
21 skel->data->find_zero_ret = -1; in test_and_reset_skel()
22 skel->bss->d_iname[0] = 0; in test_and_reset_skel()
41 static bool find_vma_pe_condition(struct find_vma *skel) in find_vma_pe_condition() argument
[all …]
H A Dbpf_cookie.c22 static void kprobe_subtest(struct test_bpf_cookie *skel) in kprobe_subtest() argument
31 link1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, in kprobe_subtest()
38 link2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, in kprobe_subtest()
46 retlink1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, in kprobe_subtest()
53 retlink2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, in kprobe_subtest()
61 ASSERT_EQ(skel->bss->kprobe_res, 0x1 | 0x2, "kprobe_res"); in kprobe_subtest()
62 ASSERT_EQ(skel->bss->kretprobe_res, 0x10 | 0x20, "kretprobe_res"); in kprobe_subtest()
71 static void kprobe_multi_test_run(struct kprobe_multi *skel) in kprobe_multi_test_run() argument
76 prog_fd = bpf_program__fd(skel->progs.trigger); in kprobe_multi_test_run()
81 ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result"); in kprobe_multi_test_run()
[all …]
H A Dbtf_tag.c17 struct test_btf_decl_tag *skel; in test_btf_decl_tag() local
19 skel = test_btf_decl_tag__open_and_load(); in test_btf_decl_tag()
20 if (!ASSERT_OK_PTR(skel, "btf_decl_tag")) in test_btf_decl_tag()
23 if (skel->rodata->skip_tests) { in test_btf_decl_tag()
28 test_btf_decl_tag__destroy(skel); in test_btf_decl_tag()
33 struct btf_type_tag *skel; in test_btf_type_tag() local
35 skel = btf_type_tag__open_and_load(); in test_btf_type_tag()
36 if (!ASSERT_OK_PTR(skel, "btf_type_tag")) in test_btf_type_tag()
39 if (skel->rodata->skip_tests) { in test_btf_type_tag()
44 btf_type_tag__destroy(skel); in test_btf_type_tag()
[all …]
H A Dglobal_map_resize.c22 struct test_global_map_resize *skel; in global_map_resize_bss_subtest() local
24 const __u32 desired_sz = sizeof(skel->bss->sum) + sysconf(_SC_PAGE_SIZE) * 2; in global_map_resize_bss_subtest()
27 skel = test_global_map_resize__open(); in global_map_resize_bss_subtest()
28 if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open")) in global_map_resize_bss_subtest()
35 skel->bss->array[0] = 1; in global_map_resize_bss_subtest()
38 map = skel->maps.bss; in global_map_resize_bss_subtest()
45 new_sz = sizeof(skel->data_percpu_arr->percpu_arr[0]) * libbpf_num_possible_cpus(); in global_map_resize_bss_subtest()
46 err = bpf_map__set_value_size(skel->maps.data_percpu_arr, new_sz); in global_map_resize_bss_subtest()
50 array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->bss->array[0]); in global_map_resize_bss_subtest()
54 skel->bss = bpf_map__initial_value(skel->maps.bss, &actual_sz); in global_map_resize_bss_subtest()
[all …]
H A Dcheck_mtu.c43 struct test_check_mtu *skel; in test_check_mtu_xdp_attach() local
49 skel = test_check_mtu__open_and_load(); in test_check_mtu_xdp_attach()
50 if (CHECK(!skel, "open and load skel", "failed")) in test_check_mtu_xdp_attach()
53 prog = skel->progs.xdp_use_helper_basic; in test_check_mtu_xdp_attach()
58 skel->links.xdp_use_helper_basic = link; in test_check_mtu_xdp_attach()
75 test_check_mtu__destroy(skel); in test_check_mtu_xdp_attach()
78 static void test_check_mtu_run_xdp(struct test_check_mtu *skel, in test_check_mtu_run_xdp() argument
99 mtu_result = skel->bss->global_bpf_mtu_xdp; in test_check_mtu_run_xdp()
106 struct test_check_mtu *skel; in test_check_mtu_xdp() local
109 skel = test_check_mtu__open(); in test_check_mtu_xdp()
[all …]
H A Drcu_read_lock.c17 struct rcu_read_lock *skel; in test_success() local
20 skel = rcu_read_lock__open(); in test_success()
21 if (!ASSERT_OK_PTR(skel, "skel_open")) in test_success()
24 skel->bss->target_pid = syscall(SYS_gettid); in test_success()
26 bpf_program__set_autoload(skel->progs.get_cgroup_id, true); in test_success()
27 bpf_program__set_autoload(skel->progs.task_succ, true); in test_success()
28 bpf_program__set_autoload(skel->progs.two_regions, true); in test_success()
29 bpf_program__set_autoload(skel->progs.non_sleepable_1, true); in test_success()
30 bpf_program__set_autoload(skel->progs.non_sleepable_2, true); in test_success()
31 bpf_program__set_autoload(skel->progs.task_trusted_non_rcuptr, true); in test_success()
[all …]
H A Dtest_ldsx_insn.c10 struct test_ldsx_insn *skel; in test_map_val_and_probed_memory() local
13 skel = test_ldsx_insn__open(); in test_map_val_and_probed_memory()
14 if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open")) in test_map_val_and_probed_memory()
17 if (skel->rodata->skip) { in test_map_val_and_probed_memory()
22 bpf_program__set_autoload(skel->progs.rdonly_map_prog, true); in test_map_val_and_probed_memory()
23 bpf_program__set_autoload(skel->progs.map_val_prog, true); in test_map_val_and_probed_memory()
24 bpf_program__set_autoload(skel->progs.test_ptr_struct_arg, true); in test_map_val_and_probed_memory()
26 err = test_ldsx_insn__load(skel); in test_map_val_and_probed_memory()
30 err = test_ldsx_insn__attach(skel); in test_map_val_and_probed_memory()
36 ASSERT_EQ(skel->bss->done1, 1, "done1"); in test_map_val_and_probed_memory()
[all …]
H A Dtest_bpf_syscall_macro.c11 struct bpf_syscall_macro *skel = NULL; in test_bpf_syscall_macro() local
22 skel = bpf_syscall_macro__open(); in test_bpf_syscall_macro()
23 if (!ASSERT_OK_PTR(skel, "bpf_syscall_macro__open")) in test_bpf_syscall_macro()
26 skel->rodata->filter_pid = getpid(); in test_bpf_syscall_macro()
29 err = bpf_syscall_macro__load(skel); in test_bpf_syscall_macro()
34 err = bpf_syscall_macro__attach(skel); in test_bpf_syscall_macro()
42 ASSERT_NEQ(skel->bss->arg1, exp_arg1, "syscall_arg1"); in test_bpf_syscall_macro()
44 ASSERT_EQ(skel->bss->arg1, exp_arg1, "syscall_arg1"); in test_bpf_syscall_macro()
46 ASSERT_EQ(skel->bss->arg2, exp_arg2, "syscall_arg2"); in test_bpf_syscall_macro()
47 ASSERT_EQ(skel->bss->arg3, exp_arg3, "syscall_arg3"); in test_bpf_syscall_macro()
[all …]
H A Dget_func_ip_test.c12 struct get_func_ip_test *skel = NULL; in test_function_entry() local
16 skel = get_func_ip_test__open(); in test_function_entry()
17 if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open")) in test_function_entry()
20 err = get_func_ip_test__load(skel); in test_function_entry()
24 err = get_func_ip_test__attach(skel); in test_function_entry()
28 skel->bss->uprobe_trigger = (unsigned long) uprobe_trigger; in test_function_entry()
30 prog_fd = bpf_program__fd(skel->progs.test1); in test_function_entry()
35 prog_fd = bpf_program__fd(skel->progs.test5); in test_function_entry()
42 ASSERT_EQ(skel->bss->test1_result, 1, "test1_result"); in test_function_entry()
43 ASSERT_EQ(skel->bss->test2_result, 1, "test2_result"); in test_function_entry()
[all …]
H A Duprobe_multi_test.c92 static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child) in uprobe_multi_test_run() argument
94 skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1; in uprobe_multi_test_run()
95 skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2; in uprobe_multi_test_run()
96 skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3; in uprobe_multi_test_run()
98 skel->bss->user_ptr = test_data; in uprobe_multi_test_run()
105 skel->bss->pid = child ? 0 : getpid(); in uprobe_multi_test_run()
119 ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 2, "uprobe_multi_func_1_result"); in uprobe_multi_test_run()
120 ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 2, "uprobe_multi_func_2_result"); in uprobe_multi_test_run()
121 ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 2, "uprobe_multi_func_3_result"); in uprobe_multi_test_run()
123 ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 2, "uretprobe_multi_func_1_result"); in uprobe_multi_test_run()
[all …]
H A Dtype_cast.c9 struct type_cast *skel; in test_xdp() local
21 skel = type_cast__open(); in test_xdp()
22 if (!ASSERT_OK_PTR(skel, "skel_open")) in test_xdp()
25 bpf_program__set_autoload(skel->progs.md_xdp, true); in test_xdp()
26 err = type_cast__load(skel); in test_xdp()
30 prog_fd = bpf_program__fd(skel->progs.md_xdp); in test_xdp()
35 ASSERT_EQ(skel->bss->ifindex, 1, "xdp_md ifindex"); in test_xdp()
36 ASSERT_EQ(skel->bss->ifindex, skel->bss->ingress_ifindex, "xdp_md ingress_ifindex"); in test_xdp()
37 ASSERT_STREQ(skel->bss->name, "lo", "xdp_md name"); in test_xdp()
38 ASSERT_NEQ(skel->bss->inum, 0, "xdp_md inum"); in test_xdp()
[all …]
/openbmc/linux/samples/v4l/
H A Dv4l2-pci-skeleton.c121 struct skeleton *skel = dev_id; in skeleton_irq() local
128 spin_lock(&skel->qlock); in skeleton_irq()
130 spin_unlock(&skel->qlock); in skeleton_irq()
132 new_buf->vb.sequence = skel->sequence++; in skeleton_irq()
133 new_buf->vb.field = skel->field; in skeleton_irq()
134 if (skel->format.field == V4L2_FIELD_ALTERNATE) { in skeleton_irq()
135 if (skel->field == V4L2_FIELD_BOTTOM) in skeleton_irq()
136 skel->field = V4L2_FIELD_TOP; in skeleton_irq()
137 else if (skel->field == V4L2_FIELD_TOP) in skeleton_irq()
138 skel->field = V4L2_FIELD_BOTTOM; in skeleton_irq()
[all …]
/openbmc/linux/kernel/bpf/preload/iterators/
H A Diterators.lskel-big-endian.h24 iterators_bpf__dump_bpf_map__attach(struct iterators_bpf *skel) in iterators_bpf__dump_bpf_map__attach() argument
26 int prog_fd = skel->progs.dump_bpf_map.prog_fd; in iterators_bpf__dump_bpf_map__attach()
30 skel->links.dump_bpf_map_fd = fd; in iterators_bpf__dump_bpf_map__attach()
35 iterators_bpf__dump_bpf_prog__attach(struct iterators_bpf *skel) in iterators_bpf__dump_bpf_prog__attach() argument
37 int prog_fd = skel->progs.dump_bpf_prog.prog_fd; in iterators_bpf__dump_bpf_prog__attach()
41 skel->links.dump_bpf_prog_fd = fd; in iterators_bpf__dump_bpf_prog__attach()
46 iterators_bpf__attach(struct iterators_bpf *skel) in iterators_bpf__attach() argument
50 ret = ret < 0 ? ret : iterators_bpf__dump_bpf_map__attach(skel); in iterators_bpf__attach()
51 ret = ret < 0 ? ret : iterators_bpf__dump_bpf_prog__attach(skel); in iterators_bpf__attach()
56 iterators_bpf__detach(struct iterators_bpf *skel) in iterators_bpf__detach() argument
[all …]
H A Diterators.lskel-little-endian.h24 iterators_bpf__dump_bpf_map__attach(struct iterators_bpf *skel) in iterators_bpf__dump_bpf_map__attach() argument
26 int prog_fd = skel->progs.dump_bpf_map.prog_fd; in iterators_bpf__dump_bpf_map__attach()
30 skel->links.dump_bpf_map_fd = fd; in iterators_bpf__dump_bpf_map__attach()
35 iterators_bpf__dump_bpf_prog__attach(struct iterators_bpf *skel) in iterators_bpf__dump_bpf_prog__attach() argument
37 int prog_fd = skel->progs.dump_bpf_prog.prog_fd; in iterators_bpf__dump_bpf_prog__attach()
41 skel->links.dump_bpf_prog_fd = fd; in iterators_bpf__dump_bpf_prog__attach()
46 iterators_bpf__attach(struct iterators_bpf *skel) in iterators_bpf__attach() argument
50 ret = ret < 0 ? ret : iterators_bpf__dump_bpf_map__attach(skel); in iterators_bpf__attach()
51 ret = ret < 0 ? ret : iterators_bpf__dump_bpf_prog__attach(skel); in iterators_bpf__attach()
56 iterators_bpf__detach(struct iterators_bpf *skel) in iterators_bpf__detach() argument
[all …]
/openbmc/linux/drivers/hid/bpf/entrypoints/
H A Dentrypoints.lskel.h22 entrypoints_bpf__hid_tail_call__attach(struct entrypoints_bpf *skel) in entrypoints_bpf__hid_tail_call__attach() argument
24 int prog_fd = skel->progs.hid_tail_call.prog_fd; in entrypoints_bpf__hid_tail_call__attach()
28 skel->links.hid_tail_call_fd = fd; in entrypoints_bpf__hid_tail_call__attach()
33 entrypoints_bpf__attach(struct entrypoints_bpf *skel) in entrypoints_bpf__attach() argument
37 ret = ret < 0 ? ret : entrypoints_bpf__hid_tail_call__attach(skel); in entrypoints_bpf__attach()
42 entrypoints_bpf__detach(struct entrypoints_bpf *skel) in entrypoints_bpf__detach() argument
44 skel_closenz(skel->links.hid_tail_call_fd); in entrypoints_bpf__detach()
47 entrypoints_bpf__destroy(struct entrypoints_bpf *skel) in entrypoints_bpf__destroy() argument
49 if (!skel) in entrypoints_bpf__destroy()
51 entrypoints_bpf__detach(skel); in entrypoints_bpf__destroy()
[all …]
/openbmc/linux/tools/testing/selftests/bpf/
H A Dtest_cpp.cpp18 T *skel; member in Skeleton
20 Skeleton(): skel(nullptr) { } in Skeleton()
22 ~Skeleton() { if (skel) T::destroy(skel); } in ~Skeleton()
28 if (skel) in open()
31 skel = T::open(opts); in open()
32 err = libbpf_get_error(skel); in open()
34 skel = nullptr; in open()
41 int load() { return T::load(skel); } in load()
43 int attach() { return T::attach(skel); } in attach()
45 void detach() { return T::detach(skel); } in detach()
[all …]

1234567891011