1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2021 Facebook */ 3 #define _GNU_SOURCE 4 #include <pthread.h> 5 #include <sched.h> 6 #include <sys/syscall.h> 7 #include <unistd.h> 8 #include <test_progs.h> 9 #include "test_bpf_cookie.skel.h" 10 #include "kprobe_multi.skel.h" 11 12 /* uprobe attach point */ 13 static void trigger_func(void) 14 { 15 asm volatile (""); 16 } 17 18 static void kprobe_subtest(struct test_bpf_cookie *skel) 19 { 20 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); 21 struct bpf_link *link1 = NULL, *link2 = NULL; 22 struct bpf_link *retlink1 = NULL, *retlink2 = NULL; 23 24 /* attach two kprobes */ 25 opts.bpf_cookie = 0x1; 26 opts.retprobe = false; 27 link1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, 28 SYS_NANOSLEEP_KPROBE_NAME, &opts); 29 if (!ASSERT_OK_PTR(link1, "link1")) 30 goto cleanup; 31 32 opts.bpf_cookie = 0x2; 33 opts.retprobe = false; 34 link2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, 35 SYS_NANOSLEEP_KPROBE_NAME, &opts); 36 if (!ASSERT_OK_PTR(link2, "link2")) 37 goto cleanup; 38 39 /* attach two kretprobes */ 40 opts.bpf_cookie = 0x10; 41 opts.retprobe = true; 42 retlink1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, 43 SYS_NANOSLEEP_KPROBE_NAME, &opts); 44 if (!ASSERT_OK_PTR(retlink1, "retlink1")) 45 goto cleanup; 46 47 opts.bpf_cookie = 0x20; 48 opts.retprobe = true; 49 retlink2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, 50 SYS_NANOSLEEP_KPROBE_NAME, &opts); 51 if (!ASSERT_OK_PTR(retlink2, "retlink2")) 52 goto cleanup; 53 54 /* trigger kprobe && kretprobe */ 55 usleep(1); 56 57 ASSERT_EQ(skel->bss->kprobe_res, 0x1 | 0x2, "kprobe_res"); 58 ASSERT_EQ(skel->bss->kretprobe_res, 0x10 | 0x20, "kretprobe_res"); 59 60 cleanup: 61 bpf_link__destroy(link1); 62 bpf_link__destroy(link2); 63 bpf_link__destroy(retlink1); 64 bpf_link__destroy(retlink2); 65 } 66 67 static void kprobe_multi_test_run(struct kprobe_multi *skel) 68 { 69 LIBBPF_OPTS(bpf_test_run_opts, topts); 70 int err, prog_fd; 71 72 prog_fd = bpf_program__fd(skel->progs.trigger); 73 err = bpf_prog_test_run_opts(prog_fd, &topts); 74 ASSERT_OK(err, "test_run"); 75 ASSERT_EQ(topts.retval, 0, "test_run"); 76 77 ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result"); 78 ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result"); 79 ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result"); 80 ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result"); 81 ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result"); 82 ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result"); 83 ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result"); 84 ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result"); 85 86 ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result"); 87 ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result"); 88 ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result"); 89 ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result"); 90 ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result"); 91 ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result"); 92 ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result"); 93 ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result"); 94 } 95 96 static void kprobe_multi_link_api_subtest(void) 97 { 98 int prog_fd, link1_fd = -1, link2_fd = -1; 99 struct kprobe_multi *skel = NULL; 100 LIBBPF_OPTS(bpf_link_create_opts, opts); 101 unsigned long long addrs[8]; 102 __u64 cookies[8]; 103 104 if (!ASSERT_OK(load_kallsyms(), "load_kallsyms")) 105 goto cleanup; 106 107 skel = kprobe_multi__open_and_load(); 108 if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) 109 goto cleanup; 110 111 skel->bss->pid = getpid(); 112 skel->bss->test_cookie = true; 113 114 #define GET_ADDR(__sym, __addr) ({ \ 115 __addr = ksym_get_addr(__sym); \ 116 if (!ASSERT_NEQ(__addr, 0, "ksym_get_addr " #__sym)) \ 117 goto cleanup; \ 118 }) 119 120 GET_ADDR("bpf_fentry_test1", addrs[0]); 121 GET_ADDR("bpf_fentry_test2", addrs[1]); 122 GET_ADDR("bpf_fentry_test3", addrs[2]); 123 GET_ADDR("bpf_fentry_test4", addrs[3]); 124 GET_ADDR("bpf_fentry_test5", addrs[4]); 125 GET_ADDR("bpf_fentry_test6", addrs[5]); 126 GET_ADDR("bpf_fentry_test7", addrs[6]); 127 GET_ADDR("bpf_fentry_test8", addrs[7]); 128 129 #undef GET_ADDR 130 131 cookies[0] = 1; 132 cookies[1] = 2; 133 cookies[2] = 3; 134 cookies[3] = 4; 135 cookies[4] = 5; 136 cookies[5] = 6; 137 cookies[6] = 7; 138 cookies[7] = 8; 139 140 opts.kprobe_multi.addrs = (const unsigned long *) &addrs; 141 opts.kprobe_multi.cnt = ARRAY_SIZE(addrs); 142 opts.kprobe_multi.cookies = (const __u64 *) &cookies; 143 prog_fd = bpf_program__fd(skel->progs.test_kprobe); 144 145 link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts); 146 if (!ASSERT_GE(link1_fd, 0, "link1_fd")) 147 goto cleanup; 148 149 cookies[0] = 8; 150 cookies[1] = 7; 151 cookies[2] = 6; 152 cookies[3] = 5; 153 cookies[4] = 4; 154 cookies[5] = 3; 155 cookies[6] = 2; 156 cookies[7] = 1; 157 158 opts.kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN; 159 prog_fd = bpf_program__fd(skel->progs.test_kretprobe); 160 161 link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts); 162 if (!ASSERT_GE(link2_fd, 0, "link2_fd")) 163 goto cleanup; 164 165 kprobe_multi_test_run(skel); 166 167 cleanup: 168 close(link1_fd); 169 close(link2_fd); 170 kprobe_multi__destroy(skel); 171 } 172 173 static void kprobe_multi_attach_api_subtest(void) 174 { 175 struct bpf_link *link1 = NULL, *link2 = NULL; 176 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); 177 LIBBPF_OPTS(bpf_test_run_opts, topts); 178 struct kprobe_multi *skel = NULL; 179 const char *syms[8] = { 180 "bpf_fentry_test1", 181 "bpf_fentry_test2", 182 "bpf_fentry_test3", 183 "bpf_fentry_test4", 184 "bpf_fentry_test5", 185 "bpf_fentry_test6", 186 "bpf_fentry_test7", 187 "bpf_fentry_test8", 188 }; 189 __u64 cookies[8]; 190 191 skel = kprobe_multi__open_and_load(); 192 if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) 193 goto cleanup; 194 195 skel->bss->pid = getpid(); 196 skel->bss->test_cookie = true; 197 198 cookies[0] = 1; 199 cookies[1] = 2; 200 cookies[2] = 3; 201 cookies[3] = 4; 202 cookies[4] = 5; 203 cookies[5] = 6; 204 cookies[6] = 7; 205 cookies[7] = 8; 206 207 opts.syms = syms; 208 opts.cnt = ARRAY_SIZE(syms); 209 opts.cookies = cookies; 210 211 link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, 212 NULL, &opts); 213 if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts")) 214 goto cleanup; 215 216 cookies[0] = 8; 217 cookies[1] = 7; 218 cookies[2] = 6; 219 cookies[3] = 5; 220 cookies[4] = 4; 221 cookies[5] = 3; 222 cookies[6] = 2; 223 cookies[7] = 1; 224 225 opts.retprobe = true; 226 227 link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe, 228 NULL, &opts); 229 if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts")) 230 goto cleanup; 231 232 kprobe_multi_test_run(skel); 233 234 cleanup: 235 bpf_link__destroy(link2); 236 bpf_link__destroy(link1); 237 kprobe_multi__destroy(skel); 238 } 239 static void uprobe_subtest(struct test_bpf_cookie *skel) 240 { 241 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts); 242 struct bpf_link *link1 = NULL, *link2 = NULL; 243 struct bpf_link *retlink1 = NULL, *retlink2 = NULL; 244 ssize_t uprobe_offset; 245 246 uprobe_offset = get_uprobe_offset(&trigger_func); 247 if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset")) 248 goto cleanup; 249 250 /* attach two uprobes */ 251 opts.bpf_cookie = 0x100; 252 opts.retprobe = false; 253 link1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, 0 /* self pid */, 254 "/proc/self/exe", uprobe_offset, &opts); 255 if (!ASSERT_OK_PTR(link1, "link1")) 256 goto cleanup; 257 258 opts.bpf_cookie = 0x200; 259 opts.retprobe = false; 260 link2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, -1 /* any pid */, 261 "/proc/self/exe", uprobe_offset, &opts); 262 if (!ASSERT_OK_PTR(link2, "link2")) 263 goto cleanup; 264 265 /* attach two uretprobes */ 266 opts.bpf_cookie = 0x1000; 267 opts.retprobe = true; 268 retlink1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, -1 /* any pid */, 269 "/proc/self/exe", uprobe_offset, &opts); 270 if (!ASSERT_OK_PTR(retlink1, "retlink1")) 271 goto cleanup; 272 273 opts.bpf_cookie = 0x2000; 274 opts.retprobe = true; 275 retlink2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, 0 /* self pid */, 276 "/proc/self/exe", uprobe_offset, &opts); 277 if (!ASSERT_OK_PTR(retlink2, "retlink2")) 278 goto cleanup; 279 280 /* trigger uprobe && uretprobe */ 281 trigger_func(); 282 283 ASSERT_EQ(skel->bss->uprobe_res, 0x100 | 0x200, "uprobe_res"); 284 ASSERT_EQ(skel->bss->uretprobe_res, 0x1000 | 0x2000, "uretprobe_res"); 285 286 cleanup: 287 bpf_link__destroy(link1); 288 bpf_link__destroy(link2); 289 bpf_link__destroy(retlink1); 290 bpf_link__destroy(retlink2); 291 } 292 293 static void tp_subtest(struct test_bpf_cookie *skel) 294 { 295 DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts); 296 struct bpf_link *link1 = NULL, *link2 = NULL, *link3 = NULL; 297 298 /* attach first tp prog */ 299 opts.bpf_cookie = 0x10000; 300 link1 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp1, 301 "syscalls", "sys_enter_nanosleep", &opts); 302 if (!ASSERT_OK_PTR(link1, "link1")) 303 goto cleanup; 304 305 /* attach second tp prog */ 306 opts.bpf_cookie = 0x20000; 307 link2 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp2, 308 "syscalls", "sys_enter_nanosleep", &opts); 309 if (!ASSERT_OK_PTR(link2, "link2")) 310 goto cleanup; 311 312 /* trigger tracepoints */ 313 usleep(1); 314 315 ASSERT_EQ(skel->bss->tp_res, 0x10000 | 0x20000, "tp_res1"); 316 317 /* now we detach first prog and will attach third one, which causes 318 * two internal calls to bpf_prog_array_copy(), shuffling 319 * bpf_prog_array_items around. We test here that we don't lose track 320 * of associated bpf_cookies. 321 */ 322 bpf_link__destroy(link1); 323 link1 = NULL; 324 kern_sync_rcu(); 325 skel->bss->tp_res = 0; 326 327 /* attach third tp prog */ 328 opts.bpf_cookie = 0x40000; 329 link3 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp3, 330 "syscalls", "sys_enter_nanosleep", &opts); 331 if (!ASSERT_OK_PTR(link3, "link3")) 332 goto cleanup; 333 334 /* trigger tracepoints */ 335 usleep(1); 336 337 ASSERT_EQ(skel->bss->tp_res, 0x20000 | 0x40000, "tp_res2"); 338 339 cleanup: 340 bpf_link__destroy(link1); 341 bpf_link__destroy(link2); 342 bpf_link__destroy(link3); 343 } 344 345 static void burn_cpu(void) 346 { 347 volatile int j = 0; 348 cpu_set_t cpu_set; 349 int i, err; 350 351 /* generate some branches on cpu 0 */ 352 CPU_ZERO(&cpu_set); 353 CPU_SET(0, &cpu_set); 354 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set); 355 ASSERT_OK(err, "set_thread_affinity"); 356 357 /* spin the loop for a while (random high number) */ 358 for (i = 0; i < 1000000; ++i) 359 ++j; 360 } 361 362 static void pe_subtest(struct test_bpf_cookie *skel) 363 { 364 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, opts); 365 struct bpf_link *link = NULL; 366 struct perf_event_attr attr; 367 int pfd = -1; 368 369 /* create perf event */ 370 memset(&attr, 0, sizeof(attr)); 371 attr.size = sizeof(attr); 372 attr.type = PERF_TYPE_SOFTWARE; 373 attr.config = PERF_COUNT_SW_CPU_CLOCK; 374 attr.freq = 1; 375 attr.sample_freq = 1000; 376 pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); 377 if (!ASSERT_GE(pfd, 0, "perf_fd")) 378 goto cleanup; 379 380 opts.bpf_cookie = 0x100000; 381 link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts); 382 if (!ASSERT_OK_PTR(link, "link1")) 383 goto cleanup; 384 385 burn_cpu(); /* trigger BPF prog */ 386 387 ASSERT_EQ(skel->bss->pe_res, 0x100000, "pe_res1"); 388 389 /* prevent bpf_link__destroy() closing pfd itself */ 390 bpf_link__disconnect(link); 391 /* close BPF link's FD explicitly */ 392 close(bpf_link__fd(link)); 393 /* free up memory used by struct bpf_link */ 394 bpf_link__destroy(link); 395 link = NULL; 396 kern_sync_rcu(); 397 skel->bss->pe_res = 0; 398 399 opts.bpf_cookie = 0x200000; 400 link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts); 401 if (!ASSERT_OK_PTR(link, "link2")) 402 goto cleanup; 403 404 burn_cpu(); /* trigger BPF prog */ 405 406 ASSERT_EQ(skel->bss->pe_res, 0x200000, "pe_res2"); 407 408 cleanup: 409 close(pfd); 410 bpf_link__destroy(link); 411 } 412 413 void test_bpf_cookie(void) 414 { 415 struct test_bpf_cookie *skel; 416 417 skel = test_bpf_cookie__open_and_load(); 418 if (!ASSERT_OK_PTR(skel, "skel_open")) 419 return; 420 421 skel->bss->my_tid = syscall(SYS_gettid); 422 423 if (test__start_subtest("kprobe")) 424 kprobe_subtest(skel); 425 if (test__start_subtest("multi_kprobe_link_api")) 426 kprobe_multi_link_api_subtest(); 427 if (test__start_subtest("multi_kprobe_attach_api")) 428 kprobe_multi_attach_api_subtest(); 429 if (test__start_subtest("uprobe")) 430 uprobe_subtest(skel); 431 if (test__start_subtest("tracepoint")) 432 tp_subtest(skel); 433 if (test__start_subtest("perf_event")) 434 pe_subtest(skel); 435 436 test_bpf_cookie__destroy(skel); 437 } 438