1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <test_progs.h> 4 5 #include "atomics.lskel.h" 6 7 static void test_add(struct atomics_lskel *skel) 8 { 9 int err, prog_fd; 10 __u32 duration = 0, retval; 11 int link_fd; 12 13 link_fd = atomics_lskel__add__attach(skel); 14 if (!ASSERT_GT(link_fd, 0, "attach(add)")) 15 return; 16 17 prog_fd = skel->progs.add.prog_fd; 18 err = bpf_prog_test_run(prog_fd, 1, NULL, 0, 19 NULL, NULL, &retval, &duration); 20 if (CHECK(err || retval, "test_run add", 21 "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) 22 goto cleanup; 23 24 ASSERT_EQ(skel->data->add64_value, 3, "add64_value"); 25 ASSERT_EQ(skel->bss->add64_result, 1, "add64_result"); 26 27 ASSERT_EQ(skel->data->add32_value, 3, "add32_value"); 28 ASSERT_EQ(skel->bss->add32_result, 1, "add32_result"); 29 30 ASSERT_EQ(skel->bss->add_stack_value_copy, 3, "add_stack_value"); 31 ASSERT_EQ(skel->bss->add_stack_result, 1, "add_stack_result"); 32 33 ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value"); 34 35 cleanup: 36 close(link_fd); 37 } 38 39 static void test_sub(struct atomics_lskel *skel) 40 { 41 int err, prog_fd; 42 __u32 duration = 0, retval; 43 int link_fd; 44 45 link_fd = atomics_lskel__sub__attach(skel); 46 if (!ASSERT_GT(link_fd, 0, "attach(sub)")) 47 return; 48 49 prog_fd = skel->progs.sub.prog_fd; 50 err = bpf_prog_test_run(prog_fd, 1, NULL, 0, 51 NULL, NULL, &retval, &duration); 52 if (CHECK(err || retval, "test_run sub", 53 "err %d errno %d retval %d duration %d\n", 54 err, errno, retval, duration)) 55 goto cleanup; 56 57 ASSERT_EQ(skel->data->sub64_value, -1, "sub64_value"); 58 ASSERT_EQ(skel->bss->sub64_result, 1, "sub64_result"); 59 60 ASSERT_EQ(skel->data->sub32_value, -1, "sub32_value"); 61 ASSERT_EQ(skel->bss->sub32_result, 1, "sub32_result"); 62 63 ASSERT_EQ(skel->bss->sub_stack_value_copy, -1, "sub_stack_value"); 64 ASSERT_EQ(skel->bss->sub_stack_result, 1, "sub_stack_result"); 65 66 ASSERT_EQ(skel->data->sub_noreturn_value, -1, "sub_noreturn_value"); 67 68 cleanup: 69 close(link_fd); 70 } 71 72 static void test_and(struct atomics_lskel *skel) 73 { 74 int err, prog_fd; 75 __u32 duration = 0, retval; 76 int link_fd; 77 78 link_fd = atomics_lskel__and__attach(skel); 79 if (!ASSERT_GT(link_fd, 0, "attach(and)")) 80 return; 81 82 prog_fd = skel->progs.and.prog_fd; 83 err = bpf_prog_test_run(prog_fd, 1, NULL, 0, 84 NULL, NULL, &retval, &duration); 85 if (CHECK(err || retval, "test_run and", 86 "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) 87 goto cleanup; 88 89 ASSERT_EQ(skel->data->and64_value, 0x010ull << 32, "and64_value"); 90 ASSERT_EQ(skel->bss->and64_result, 0x110ull << 32, "and64_result"); 91 92 ASSERT_EQ(skel->data->and32_value, 0x010, "and32_value"); 93 ASSERT_EQ(skel->bss->and32_result, 0x110, "and32_result"); 94 95 ASSERT_EQ(skel->data->and_noreturn_value, 0x010ull << 32, "and_noreturn_value"); 96 cleanup: 97 close(link_fd); 98 } 99 100 static void test_or(struct atomics_lskel *skel) 101 { 102 int err, prog_fd; 103 __u32 duration = 0, retval; 104 int link_fd; 105 106 link_fd = atomics_lskel__or__attach(skel); 107 if (!ASSERT_GT(link_fd, 0, "attach(or)")) 108 return; 109 110 prog_fd = skel->progs.or.prog_fd; 111 err = bpf_prog_test_run(prog_fd, 1, NULL, 0, 112 NULL, NULL, &retval, &duration); 113 if (CHECK(err || retval, "test_run or", 114 "err %d errno %d retval %d duration %d\n", 115 err, errno, retval, duration)) 116 goto cleanup; 117 118 ASSERT_EQ(skel->data->or64_value, 0x111ull << 32, "or64_value"); 119 ASSERT_EQ(skel->bss->or64_result, 0x110ull << 32, "or64_result"); 120 121 ASSERT_EQ(skel->data->or32_value, 0x111, "or32_value"); 122 ASSERT_EQ(skel->bss->or32_result, 0x110, "or32_result"); 123 124 ASSERT_EQ(skel->data->or_noreturn_value, 0x111ull << 32, "or_noreturn_value"); 125 cleanup: 126 close(link_fd); 127 } 128 129 static void test_xor(struct atomics_lskel *skel) 130 { 131 int err, prog_fd; 132 __u32 duration = 0, retval; 133 int link_fd; 134 135 link_fd = atomics_lskel__xor__attach(skel); 136 if (!ASSERT_GT(link_fd, 0, "attach(xor)")) 137 return; 138 139 prog_fd = skel->progs.xor.prog_fd; 140 err = bpf_prog_test_run(prog_fd, 1, NULL, 0, 141 NULL, NULL, &retval, &duration); 142 if (CHECK(err || retval, "test_run xor", 143 "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) 144 goto cleanup; 145 146 ASSERT_EQ(skel->data->xor64_value, 0x101ull << 32, "xor64_value"); 147 ASSERT_EQ(skel->bss->xor64_result, 0x110ull << 32, "xor64_result"); 148 149 ASSERT_EQ(skel->data->xor32_value, 0x101, "xor32_value"); 150 ASSERT_EQ(skel->bss->xor32_result, 0x110, "xor32_result"); 151 152 ASSERT_EQ(skel->data->xor_noreturn_value, 0x101ull << 32, "xor_nxoreturn_value"); 153 cleanup: 154 close(link_fd); 155 } 156 157 static void test_cmpxchg(struct atomics_lskel *skel) 158 { 159 int err, prog_fd; 160 __u32 duration = 0, retval; 161 int link_fd; 162 163 link_fd = atomics_lskel__cmpxchg__attach(skel); 164 if (!ASSERT_GT(link_fd, 0, "attach(cmpxchg)")) 165 return; 166 167 prog_fd = skel->progs.cmpxchg.prog_fd; 168 err = bpf_prog_test_run(prog_fd, 1, NULL, 0, 169 NULL, NULL, &retval, &duration); 170 if (CHECK(err || retval, "test_run add", 171 "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) 172 goto cleanup; 173 174 ASSERT_EQ(skel->data->cmpxchg64_value, 2, "cmpxchg64_value"); 175 ASSERT_EQ(skel->bss->cmpxchg64_result_fail, 1, "cmpxchg_result_fail"); 176 ASSERT_EQ(skel->bss->cmpxchg64_result_succeed, 1, "cmpxchg_result_succeed"); 177 178 ASSERT_EQ(skel->data->cmpxchg32_value, 2, "lcmpxchg32_value"); 179 ASSERT_EQ(skel->bss->cmpxchg32_result_fail, 1, "cmpxchg_result_fail"); 180 ASSERT_EQ(skel->bss->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed"); 181 182 cleanup: 183 close(link_fd); 184 } 185 186 static void test_xchg(struct atomics_lskel *skel) 187 { 188 int err, prog_fd; 189 __u32 duration = 0, retval; 190 int link_fd; 191 192 link_fd = atomics_lskel__xchg__attach(skel); 193 if (!ASSERT_GT(link_fd, 0, "attach(xchg)")) 194 return; 195 196 prog_fd = skel->progs.xchg.prog_fd; 197 err = bpf_prog_test_run(prog_fd, 1, NULL, 0, 198 NULL, NULL, &retval, &duration); 199 if (CHECK(err || retval, "test_run add", 200 "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) 201 goto cleanup; 202 203 ASSERT_EQ(skel->data->xchg64_value, 2, "xchg64_value"); 204 ASSERT_EQ(skel->bss->xchg64_result, 1, "xchg64_result"); 205 206 ASSERT_EQ(skel->data->xchg32_value, 2, "xchg32_value"); 207 ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result"); 208 209 cleanup: 210 close(link_fd); 211 } 212 213 void test_atomics(void) 214 { 215 struct atomics_lskel *skel; 216 __u32 duration = 0; 217 218 skel = atomics_lskel__open_and_load(); 219 if (CHECK(!skel, "skel_load", "atomics skeleton failed\n")) 220 return; 221 222 if (skel->data->skip_tests) { 223 printf("%s:SKIP:no ENABLE_ATOMICS_TESTS (missing Clang BPF atomics support)", 224 __func__); 225 test__skip(); 226 goto cleanup; 227 } 228 skel->bss->pid = getpid(); 229 230 if (test__start_subtest("add")) 231 test_add(skel); 232 if (test__start_subtest("sub")) 233 test_sub(skel); 234 if (test__start_subtest("and")) 235 test_and(skel); 236 if (test__start_subtest("or")) 237 test_or(skel); 238 if (test__start_subtest("xor")) 239 test_xor(skel); 240 if (test__start_subtest("cmpxchg")) 241 test_cmpxchg(skel); 242 if (test__start_subtest("xchg")) 243 test_xchg(skel); 244 245 cleanup: 246 atomics_lskel__destroy(skel); 247 } 248