1 // SPDX-License-Identifier: GPL-2.0 2 #define _GNU_SOURCE 3 #include <pthread.h> 4 #include <sched.h> 5 #include <sys/socket.h> 6 #include <test_progs.h> 7 8 #ifdef __x86_64__ 9 #define SYS_KPROBE_NAME "__x64_sys_nanosleep" 10 #else 11 #define SYS_KPROBE_NAME "sys_nanosleep" 12 #endif 13 14 static void on_sample(void *ctx, int cpu, void *data, __u32 size) 15 { 16 int cpu_data = *(int *)data, duration = 0; 17 cpu_set_t *cpu_seen = ctx; 18 19 if (cpu_data != cpu) 20 CHECK(cpu_data != cpu, "check_cpu_data", 21 "cpu_data %d != cpu %d\n", cpu_data, cpu); 22 23 CPU_SET(cpu, cpu_seen); 24 } 25 26 void test_perf_buffer(void) 27 { 28 int err, prog_fd, nr_cpus, i, duration = 0; 29 const char *prog_name = "kprobe/sys_nanosleep"; 30 const char *file = "./test_perf_buffer.o"; 31 struct perf_buffer_opts pb_opts = {}; 32 struct bpf_map *perf_buf_map; 33 cpu_set_t cpu_set, cpu_seen; 34 struct bpf_program *prog; 35 struct bpf_object *obj; 36 struct perf_buffer *pb; 37 struct bpf_link *link; 38 39 nr_cpus = libbpf_num_possible_cpus(); 40 if (CHECK(nr_cpus < 0, "nr_cpus", "err %d\n", nr_cpus)) 41 return; 42 43 /* load program */ 44 err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd); 45 if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) 46 return; 47 48 prog = bpf_object__find_program_by_title(obj, prog_name); 49 if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name)) 50 goto out_close; 51 52 /* load map */ 53 perf_buf_map = bpf_object__find_map_by_name(obj, "perf_buf_map"); 54 if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n")) 55 goto out_close; 56 57 /* attach kprobe */ 58 link = bpf_program__attach_kprobe(prog, false /* retprobe */, 59 SYS_KPROBE_NAME); 60 if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link))) 61 goto out_close; 62 63 /* set up perf buffer */ 64 pb_opts.sample_cb = on_sample; 65 pb_opts.ctx = &cpu_seen; 66 pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts); 67 if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb))) 68 goto out_detach; 69 70 /* trigger kprobe on every CPU */ 71 CPU_ZERO(&cpu_seen); 72 for (i = 0; i < nr_cpus; i++) { 73 CPU_ZERO(&cpu_set); 74 CPU_SET(i, &cpu_set); 75 76 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), 77 &cpu_set); 78 if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n", 79 i, err)) 80 goto out_detach; 81 82 usleep(1); 83 } 84 85 /* read perf buffer */ 86 err = perf_buffer__poll(pb, 100); 87 if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err)) 88 goto out_free_pb; 89 90 if (CHECK(CPU_COUNT(&cpu_seen) != nr_cpus, "seen_cpu_cnt", 91 "expect %d, seen %d\n", nr_cpus, CPU_COUNT(&cpu_seen))) 92 goto out_free_pb; 93 94 out_free_pb: 95 perf_buffer__free(pb); 96 out_detach: 97 bpf_link__destroy(link); 98 out_close: 99 bpf_object__close(obj); 100 } 101