1 // SPDX-License-Identifier: GPL-2.0 2 #define _GNU_SOURCE 3 #include <pthread.h> 4 #include <sched.h> 5 #include <sys/socket.h> 6 #include <test_progs.h> 7 #include "bpf/libbpf_internal.h" 8 9 /* AddressSanitizer sometimes crashes due to data dereference below, due to 10 * this being mmap()'ed memory. Disable instrumentation with 11 * no_sanitize_address attribute 12 */ 13 __attribute__((no_sanitize_address)) 14 static void on_sample(void *ctx, int cpu, void *data, __u32 size) 15 { 16 int cpu_data = *(int *)data, duration = 0; 17 cpu_set_t *cpu_seen = ctx; 18 19 if (cpu_data != cpu) 20 CHECK(cpu_data != cpu, "check_cpu_data", 21 "cpu_data %d != cpu %d\n", cpu_data, cpu); 22 23 CPU_SET(cpu, cpu_seen); 24 } 25 26 void test_perf_buffer(void) 27 { 28 int err, prog_fd, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0; 29 const char *prog_name = "kprobe/sys_nanosleep"; 30 const char *file = "./test_perf_buffer.o"; 31 struct perf_buffer_opts pb_opts = {}; 32 struct bpf_map *perf_buf_map; 33 cpu_set_t cpu_set, cpu_seen; 34 struct bpf_program *prog; 35 struct bpf_object *obj; 36 struct perf_buffer *pb; 37 struct bpf_link *link; 38 bool *online; 39 40 nr_cpus = libbpf_num_possible_cpus(); 41 if (CHECK(nr_cpus < 0, "nr_cpus", "err %d\n", nr_cpus)) 42 return; 43 44 err = parse_cpu_mask_file("/sys/devices/system/cpu/online", 45 &online, &on_len); 46 if (CHECK(err, "nr_on_cpus", "err %d\n", err)) 47 return; 48 49 for (i = 0; i < on_len; i++) 50 if (online[i]) 51 nr_on_cpus++; 52 53 /* load program */ 54 err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd); 55 if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) { 56 obj = NULL; 57 goto out_close; 58 } 59 60 prog = bpf_object__find_program_by_title(obj, prog_name); 61 if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name)) 62 goto out_close; 63 64 /* load map */ 65 perf_buf_map = bpf_object__find_map_by_name(obj, "perf_buf_map"); 66 if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n")) 67 goto out_close; 68 69 /* attach kprobe */ 70 link = bpf_program__attach_kprobe(prog, false /* retprobe */, 71 SYS_NANOSLEEP_KPROBE_NAME); 72 if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link))) 73 goto out_close; 74 75 /* set up perf buffer */ 76 pb_opts.sample_cb = on_sample; 77 pb_opts.ctx = &cpu_seen; 78 pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts); 79 if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb))) 80 goto out_detach; 81 82 /* trigger kprobe on every CPU */ 83 CPU_ZERO(&cpu_seen); 84 for (i = 0; i < nr_cpus; i++) { 85 if (i >= on_len || !online[i]) { 86 printf("skipping offline CPU #%d\n", i); 87 continue; 88 } 89 90 CPU_ZERO(&cpu_set); 91 CPU_SET(i, &cpu_set); 92 93 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), 94 &cpu_set); 95 if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n", 96 i, err)) 97 goto out_detach; 98 99 usleep(1); 100 } 101 102 /* read perf buffer */ 103 err = perf_buffer__poll(pb, 100); 104 if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err)) 105 goto out_free_pb; 106 107 if (CHECK(CPU_COUNT(&cpu_seen) != nr_on_cpus, "seen_cpu_cnt", 108 "expect %d, seen %d\n", nr_on_cpus, CPU_COUNT(&cpu_seen))) 109 goto out_free_pb; 110 111 out_free_pb: 112 perf_buffer__free(pb); 113 out_detach: 114 bpf_link__destroy(link); 115 out_close: 116 bpf_object__close(obj); 117 free(online); 118 } 119