1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */ 3 4 #include <string.h> 5 #include <linux/bpf.h> 6 #include <linux/limits.h> 7 #include <test_progs.h> 8 #include "trace_helpers.h" 9 #include "test_fill_link_info.skel.h" 10 11 #define TP_CAT "sched" 12 #define TP_NAME "sched_switch" 13 14 static const char *kmulti_syms[] = { 15 "bpf_fentry_test2", 16 "bpf_fentry_test1", 17 "bpf_fentry_test3", 18 }; 19 #define KMULTI_CNT ARRAY_SIZE(kmulti_syms) 20 static __u64 kmulti_addrs[KMULTI_CNT]; 21 22 #define KPROBE_FUNC "bpf_fentry_test1" 23 static __u64 kprobe_addr; 24 25 #define UPROBE_FILE "/proc/self/exe" 26 static ssize_t uprobe_offset; 27 /* uprobe attach point */ 28 static noinline void uprobe_func(void) 29 { 30 asm volatile (""); 31 } 32 33 #define PERF_EVENT_COOKIE 0xdeadbeef 34 35 static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long addr, 36 ssize_t offset, ssize_t entry_offset) 37 { 38 struct bpf_link_info info; 39 __u32 len = sizeof(info); 40 char buf[PATH_MAX]; 41 int err; 42 43 memset(&info, 0, sizeof(info)); 44 buf[0] = '\0'; 45 46 again: 47 err = bpf_link_get_info_by_fd(fd, &info, &len); 48 if (!ASSERT_OK(err, "get_link_info")) 49 return -1; 50 51 if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_PERF_EVENT, "link_type")) 52 return -1; 53 if (!ASSERT_EQ(info.perf_event.type, type, "perf_type_match")) 54 return -1; 55 56 switch (info.perf_event.type) { 57 case BPF_PERF_EVENT_KPROBE: 58 case BPF_PERF_EVENT_KRETPROBE: 59 ASSERT_EQ(info.perf_event.kprobe.offset, offset, "kprobe_offset"); 60 61 /* In case kernel.kptr_restrict is not permitted or MAX_SYMS is reached */ 62 if (addr) 63 ASSERT_EQ(info.perf_event.kprobe.addr, addr + entry_offset, 64 "kprobe_addr"); 65 66 ASSERT_EQ(info.perf_event.kprobe.cookie, PERF_EVENT_COOKIE, "kprobe_cookie"); 67 68 ASSERT_EQ(info.perf_event.kprobe.name_len, strlen(KPROBE_FUNC) + 1, 69 "name_len"); 70 if (!info.perf_event.kprobe.func_name) { 71 info.perf_event.kprobe.func_name = ptr_to_u64(&buf); 72 info.perf_event.kprobe.name_len = sizeof(buf); 73 goto again; 74 } 75 76 err = strncmp(u64_to_ptr(info.perf_event.kprobe.func_name), KPROBE_FUNC, 77 strlen(KPROBE_FUNC)); 78 ASSERT_EQ(err, 0, "cmp_kprobe_func_name"); 79 break; 80 case BPF_PERF_EVENT_TRACEPOINT: 81 ASSERT_EQ(info.perf_event.tracepoint.name_len, strlen(TP_NAME) + 1, 82 "name_len"); 83 if (!info.perf_event.tracepoint.tp_name) { 84 info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf); 85 info.perf_event.tracepoint.name_len = sizeof(buf); 86 goto again; 87 } 88 89 ASSERT_EQ(info.perf_event.tracepoint.cookie, PERF_EVENT_COOKIE, "tracepoint_cookie"); 90 91 err = strncmp(u64_to_ptr(info.perf_event.tracepoint.tp_name), TP_NAME, 92 strlen(TP_NAME)); 93 ASSERT_EQ(err, 0, "cmp_tp_name"); 94 break; 95 case BPF_PERF_EVENT_UPROBE: 96 case BPF_PERF_EVENT_URETPROBE: 97 ASSERT_EQ(info.perf_event.uprobe.offset, offset, "uprobe_offset"); 98 99 ASSERT_EQ(info.perf_event.uprobe.name_len, strlen(UPROBE_FILE) + 1, 100 "name_len"); 101 if (!info.perf_event.uprobe.file_name) { 102 info.perf_event.uprobe.file_name = ptr_to_u64(&buf); 103 info.perf_event.uprobe.name_len = sizeof(buf); 104 goto again; 105 } 106 107 ASSERT_EQ(info.perf_event.uprobe.cookie, PERF_EVENT_COOKIE, "uprobe_cookie"); 108 109 err = strncmp(u64_to_ptr(info.perf_event.uprobe.file_name), UPROBE_FILE, 110 strlen(UPROBE_FILE)); 111 ASSERT_EQ(err, 0, "cmp_file_name"); 112 break; 113 default: 114 err = -1; 115 break; 116 } 117 return err; 118 } 119 120 static void kprobe_fill_invalid_user_buffer(int fd) 121 { 122 struct bpf_link_info info; 123 __u32 len = sizeof(info); 124 int err; 125 126 memset(&info, 0, sizeof(info)); 127 128 info.perf_event.kprobe.func_name = 0x1; /* invalid address */ 129 err = bpf_link_get_info_by_fd(fd, &info, &len); 130 ASSERT_EQ(err, -EINVAL, "invalid_buff_and_len"); 131 132 info.perf_event.kprobe.name_len = 64; 133 err = bpf_link_get_info_by_fd(fd, &info, &len); 134 ASSERT_EQ(err, -EFAULT, "invalid_buff"); 135 136 info.perf_event.kprobe.func_name = 0; 137 err = bpf_link_get_info_by_fd(fd, &info, &len); 138 ASSERT_EQ(err, -EINVAL, "invalid_len"); 139 140 ASSERT_EQ(info.perf_event.kprobe.addr, 0, "func_addr"); 141 ASSERT_EQ(info.perf_event.kprobe.offset, 0, "func_offset"); 142 ASSERT_EQ(info.perf_event.type, 0, "type"); 143 } 144 145 static void test_kprobe_fill_link_info(struct test_fill_link_info *skel, 146 enum bpf_perf_event_type type, 147 bool invalid) 148 { 149 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts, 150 .attach_mode = PROBE_ATTACH_MODE_LINK, 151 .retprobe = type == BPF_PERF_EVENT_KRETPROBE, 152 .bpf_cookie = PERF_EVENT_COOKIE, 153 ); 154 ssize_t entry_offset = 0; 155 struct bpf_link *link; 156 int link_fd, err; 157 158 link = bpf_program__attach_kprobe_opts(skel->progs.kprobe_run, KPROBE_FUNC, &opts); 159 if (!ASSERT_OK_PTR(link, "attach_kprobe")) 160 return; 161 162 link_fd = bpf_link__fd(link); 163 if (!invalid) { 164 /* See also arch_adjust_kprobe_addr(). */ 165 if (skel->kconfig->CONFIG_X86_KERNEL_IBT) 166 entry_offset = 4; 167 err = verify_perf_link_info(link_fd, type, kprobe_addr, 0, entry_offset); 168 ASSERT_OK(err, "verify_perf_link_info"); 169 } else { 170 kprobe_fill_invalid_user_buffer(link_fd); 171 } 172 bpf_link__destroy(link); 173 } 174 175 static void test_tp_fill_link_info(struct test_fill_link_info *skel) 176 { 177 DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts, 178 .bpf_cookie = PERF_EVENT_COOKIE, 179 ); 180 struct bpf_link *link; 181 int link_fd, err; 182 183 link = bpf_program__attach_tracepoint_opts(skel->progs.tp_run, TP_CAT, TP_NAME, &opts); 184 if (!ASSERT_OK_PTR(link, "attach_tp")) 185 return; 186 187 link_fd = bpf_link__fd(link); 188 err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_TRACEPOINT, 0, 0, 0); 189 ASSERT_OK(err, "verify_perf_link_info"); 190 bpf_link__destroy(link); 191 } 192 193 static void test_uprobe_fill_link_info(struct test_fill_link_info *skel, 194 enum bpf_perf_event_type type) 195 { 196 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, 197 .retprobe = type == BPF_PERF_EVENT_URETPROBE, 198 .bpf_cookie = PERF_EVENT_COOKIE, 199 ); 200 struct bpf_link *link; 201 int link_fd, err; 202 203 link = bpf_program__attach_uprobe_opts(skel->progs.uprobe_run, 204 0, /* self pid */ 205 UPROBE_FILE, uprobe_offset, 206 &opts); 207 if (!ASSERT_OK_PTR(link, "attach_uprobe")) 208 return; 209 210 link_fd = bpf_link__fd(link); 211 err = verify_perf_link_info(link_fd, type, 0, uprobe_offset, 0); 212 ASSERT_OK(err, "verify_perf_link_info"); 213 bpf_link__destroy(link); 214 } 215 216 static int verify_kmulti_link_info(int fd, bool retprobe) 217 { 218 struct bpf_link_info info; 219 __u32 len = sizeof(info); 220 __u64 addrs[KMULTI_CNT]; 221 int flags, i, err; 222 223 memset(&info, 0, sizeof(info)); 224 225 again: 226 err = bpf_link_get_info_by_fd(fd, &info, &len); 227 if (!ASSERT_OK(err, "get_link_info")) 228 return -1; 229 230 if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_KPROBE_MULTI, "kmulti_type")) 231 return -1; 232 233 ASSERT_EQ(info.kprobe_multi.count, KMULTI_CNT, "func_cnt"); 234 flags = info.kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN; 235 if (!retprobe) 236 ASSERT_EQ(flags, 0, "kmulti_flags"); 237 else 238 ASSERT_NEQ(flags, 0, "kretmulti_flags"); 239 240 if (!info.kprobe_multi.addrs) { 241 info.kprobe_multi.addrs = ptr_to_u64(addrs); 242 goto again; 243 } 244 for (i = 0; i < KMULTI_CNT; i++) 245 ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs"); 246 return 0; 247 } 248 249 static void verify_kmulti_invalid_user_buffer(int fd) 250 { 251 struct bpf_link_info info; 252 __u32 len = sizeof(info); 253 __u64 addrs[KMULTI_CNT]; 254 int err, i; 255 256 memset(&info, 0, sizeof(info)); 257 258 info.kprobe_multi.count = KMULTI_CNT; 259 err = bpf_link_get_info_by_fd(fd, &info, &len); 260 ASSERT_EQ(err, -EINVAL, "no_addr"); 261 262 info.kprobe_multi.addrs = ptr_to_u64(addrs); 263 info.kprobe_multi.count = 0; 264 err = bpf_link_get_info_by_fd(fd, &info, &len); 265 ASSERT_EQ(err, -EINVAL, "no_cnt"); 266 267 for (i = 0; i < KMULTI_CNT; i++) 268 addrs[i] = 0; 269 info.kprobe_multi.count = KMULTI_CNT - 1; 270 err = bpf_link_get_info_by_fd(fd, &info, &len); 271 ASSERT_EQ(err, -ENOSPC, "smaller_cnt"); 272 for (i = 0; i < KMULTI_CNT - 1; i++) 273 ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs"); 274 ASSERT_EQ(addrs[i], 0, "kmulti_addrs"); 275 276 for (i = 0; i < KMULTI_CNT; i++) 277 addrs[i] = 0; 278 info.kprobe_multi.count = KMULTI_CNT + 1; 279 err = bpf_link_get_info_by_fd(fd, &info, &len); 280 ASSERT_EQ(err, 0, "bigger_cnt"); 281 for (i = 0; i < KMULTI_CNT; i++) 282 ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs"); 283 284 info.kprobe_multi.count = KMULTI_CNT; 285 info.kprobe_multi.addrs = 0x1; /* invalid addr */ 286 err = bpf_link_get_info_by_fd(fd, &info, &len); 287 ASSERT_EQ(err, -EFAULT, "invalid_buff"); 288 } 289 290 static int symbols_cmp_r(const void *a, const void *b) 291 { 292 const char **str_a = (const char **) a; 293 const char **str_b = (const char **) b; 294 295 return strcmp(*str_a, *str_b); 296 } 297 298 static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel, 299 bool retprobe, bool invalid) 300 { 301 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); 302 struct bpf_link *link; 303 int link_fd, err; 304 305 opts.syms = kmulti_syms; 306 opts.cnt = KMULTI_CNT; 307 opts.retprobe = retprobe; 308 link = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run, NULL, &opts); 309 if (!ASSERT_OK_PTR(link, "attach_kprobe_multi")) 310 return; 311 312 link_fd = bpf_link__fd(link); 313 if (!invalid) { 314 err = verify_kmulti_link_info(link_fd, retprobe); 315 ASSERT_OK(err, "verify_kmulti_link_info"); 316 } else { 317 verify_kmulti_invalid_user_buffer(link_fd); 318 } 319 bpf_link__destroy(link); 320 } 321 322 void test_fill_link_info(void) 323 { 324 struct test_fill_link_info *skel; 325 int i; 326 327 skel = test_fill_link_info__open_and_load(); 328 if (!ASSERT_OK_PTR(skel, "skel_open")) 329 return; 330 331 /* load kallsyms to compare the addr */ 332 if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh")) 333 goto cleanup; 334 335 kprobe_addr = ksym_get_addr(KPROBE_FUNC); 336 if (test__start_subtest("kprobe_link_info")) 337 test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, false); 338 if (test__start_subtest("kretprobe_link_info")) 339 test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KRETPROBE, false); 340 if (test__start_subtest("kprobe_invalid_ubuff")) 341 test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, true); 342 if (test__start_subtest("tracepoint_link_info")) 343 test_tp_fill_link_info(skel); 344 345 uprobe_offset = get_uprobe_offset(&uprobe_func); 346 if (test__start_subtest("uprobe_link_info")) 347 test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_UPROBE); 348 if (test__start_subtest("uretprobe_link_info")) 349 test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_URETPROBE); 350 351 qsort(kmulti_syms, KMULTI_CNT, sizeof(kmulti_syms[0]), symbols_cmp_r); 352 for (i = 0; i < KMULTI_CNT; i++) 353 kmulti_addrs[i] = ksym_get_addr(kmulti_syms[i]); 354 if (test__start_subtest("kprobe_multi_link_info")) 355 test_kprobe_multi_fill_link_info(skel, false, false); 356 if (test__start_subtest("kretprobe_multi_link_info")) 357 test_kprobe_multi_fill_link_info(skel, true, false); 358 if (test__start_subtest("kprobe_multi_invalid_ubuff")) 359 test_kprobe_multi_fill_link_info(skel, true, true); 360 361 cleanup: 362 test_fill_link_info__destroy(skel); 363 } 364