1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4 #ifndef _GNU_SOURCE
5 #define _GNU_SOURCE
6 #endif
7 #include <ctype.h>
8 #include <errno.h>
9 #include <fcntl.h>
10 #include <ftw.h>
11 #include <libgen.h>
12 #include <mntent.h>
13 #include <stdbool.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <unistd.h>
18 #include <net/if.h>
19 #include <sys/mount.h>
20 #include <sys/resource.h>
21 #include <sys/stat.h>
22 #include <sys/vfs.h>
23
24 #include <linux/filter.h>
25 #include <linux/limits.h>
26 #include <linux/magic.h>
27 #include <linux/unistd.h>
28
29 #include <bpf/bpf.h>
30 #include <bpf/hashmap.h>
31 #include <bpf/libbpf.h> /* libbpf_num_possible_cpus */
32 #include <bpf/btf.h>
33
34 #include "main.h"
35
36 #ifndef BPF_FS_MAGIC
37 #define BPF_FS_MAGIC 0xcafe4a11
38 #endif
39
p_err(const char * fmt,...)40 void p_err(const char *fmt, ...)
41 {
42 va_list ap;
43
44 va_start(ap, fmt);
45 if (json_output) {
46 jsonw_start_object(json_wtr);
47 jsonw_name(json_wtr, "error");
48 jsonw_vprintf_enquote(json_wtr, fmt, ap);
49 jsonw_end_object(json_wtr);
50 } else {
51 fprintf(stderr, "Error: ");
52 vfprintf(stderr, fmt, ap);
53 fprintf(stderr, "\n");
54 }
55 va_end(ap);
56 }
57
p_info(const char * fmt,...)58 void p_info(const char *fmt, ...)
59 {
60 va_list ap;
61
62 if (json_output)
63 return;
64
65 va_start(ap, fmt);
66 vfprintf(stderr, fmt, ap);
67 fprintf(stderr, "\n");
68 va_end(ap);
69 }
70
is_bpffs(const char * path)71 static bool is_bpffs(const char *path)
72 {
73 struct statfs st_fs;
74
75 if (statfs(path, &st_fs) < 0)
76 return false;
77
78 return (unsigned long)st_fs.f_type == BPF_FS_MAGIC;
79 }
80
81 /* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
82 * memcg-based memory accounting for BPF maps and programs. This was done in
83 * commit 97306be45fbe ("Merge branch 'switch to memcg-based memory
84 * accounting'"), in Linux 5.11.
85 *
86 * Libbpf also offers to probe for memcg-based accounting vs rlimit, but does
87 * so by checking for the availability of a given BPF helper and this has
88 * failed on some kernels with backports in the past, see commit 6b4384ff1088
89 * ("Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK"").
90 * Instead, we can probe by lowering the process-based rlimit to 0, trying to
91 * load a BPF object, and resetting the rlimit. If the load succeeds then
92 * memcg-based accounting is supported.
93 *
94 * This would be too dangerous to do in the library, because multithreaded
95 * applications might attempt to load items while the rlimit is at 0. Given
96 * that bpftool is single-threaded, this is fine to do here.
97 */
known_to_need_rlimit(void)98 static bool known_to_need_rlimit(void)
99 {
100 struct rlimit rlim_init, rlim_cur_zero = {};
101 struct bpf_insn insns[] = {
102 BPF_MOV64_IMM(BPF_REG_0, 0),
103 BPF_EXIT_INSN(),
104 };
105 size_t insn_cnt = ARRAY_SIZE(insns);
106 union bpf_attr attr;
107 int prog_fd, err;
108
109 memset(&attr, 0, sizeof(attr));
110 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
111 attr.insns = ptr_to_u64(insns);
112 attr.insn_cnt = insn_cnt;
113 attr.license = ptr_to_u64("GPL");
114
115 if (getrlimit(RLIMIT_MEMLOCK, &rlim_init))
116 return false;
117
118 /* Drop the soft limit to zero. We maintain the hard limit to its
119 * current value, because lowering it would be a permanent operation
120 * for unprivileged users.
121 */
122 rlim_cur_zero.rlim_max = rlim_init.rlim_max;
123 if (setrlimit(RLIMIT_MEMLOCK, &rlim_cur_zero))
124 return false;
125
126 /* Do not use bpf_prog_load() from libbpf here, because it calls
127 * bump_rlimit_memlock(), interfering with the current probe.
128 */
129 prog_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
130 err = errno;
131
132 /* reset soft rlimit to its initial value */
133 setrlimit(RLIMIT_MEMLOCK, &rlim_init);
134
135 if (prog_fd < 0)
136 return err == EPERM;
137
138 close(prog_fd);
139 return false;
140 }
141
set_max_rlimit(void)142 void set_max_rlimit(void)
143 {
144 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
145
146 if (known_to_need_rlimit())
147 setrlimit(RLIMIT_MEMLOCK, &rinf);
148 }
149
150 static int
mnt_fs(const char * target,const char * type,char * buff,size_t bufflen)151 mnt_fs(const char *target, const char *type, char *buff, size_t bufflen)
152 {
153 bool bind_done = false;
154
155 while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
156 if (errno != EINVAL || bind_done) {
157 snprintf(buff, bufflen,
158 "mount --make-private %s failed: %s",
159 target, strerror(errno));
160 return -1;
161 }
162
163 if (mount(target, target, "none", MS_BIND, NULL)) {
164 snprintf(buff, bufflen,
165 "mount --bind %s %s failed: %s",
166 target, target, strerror(errno));
167 return -1;
168 }
169
170 bind_done = true;
171 }
172
173 if (mount(type, target, type, 0, "mode=0700")) {
174 snprintf(buff, bufflen, "mount -t %s %s %s failed: %s",
175 type, type, target, strerror(errno));
176 return -1;
177 }
178
179 return 0;
180 }
181
mount_tracefs(const char * target)182 int mount_tracefs(const char *target)
183 {
184 char err_str[ERR_MAX_LEN];
185 int err;
186
187 err = mnt_fs(target, "tracefs", err_str, ERR_MAX_LEN);
188 if (err) {
189 err_str[ERR_MAX_LEN - 1] = '\0';
190 p_err("can't mount tracefs: %s", err_str);
191 }
192
193 return err;
194 }
195
open_obj_pinned(const char * path,bool quiet)196 int open_obj_pinned(const char *path, bool quiet)
197 {
198 char *pname;
199 int fd = -1;
200
201 pname = strdup(path);
202 if (!pname) {
203 if (!quiet)
204 p_err("mem alloc failed");
205 goto out_ret;
206 }
207
208 fd = bpf_obj_get(pname);
209 if (fd < 0) {
210 if (!quiet)
211 p_err("bpf obj get (%s): %s", pname,
212 errno == EACCES && !is_bpffs(dirname(pname)) ?
213 "directory not in bpf file system (bpffs)" :
214 strerror(errno));
215 goto out_free;
216 }
217
218 out_free:
219 free(pname);
220 out_ret:
221 return fd;
222 }
223
open_obj_pinned_any(const char * path,enum bpf_obj_type exp_type)224 int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type)
225 {
226 enum bpf_obj_type type;
227 int fd;
228
229 fd = open_obj_pinned(path, false);
230 if (fd < 0)
231 return -1;
232
233 type = get_fd_type(fd);
234 if (type < 0) {
235 close(fd);
236 return type;
237 }
238 if (type != exp_type) {
239 p_err("incorrect object type: %s", get_fd_type_name(type));
240 close(fd);
241 return -1;
242 }
243
244 return fd;
245 }
246
create_and_mount_bpffs_dir(const char * dir_name)247 int create_and_mount_bpffs_dir(const char *dir_name)
248 {
249 char err_str[ERR_MAX_LEN];
250 bool dir_exists;
251 int err = 0;
252
253 if (is_bpffs(dir_name))
254 return err;
255
256 dir_exists = access(dir_name, F_OK) == 0;
257
258 if (!dir_exists) {
259 char *temp_name;
260 char *parent_name;
261
262 temp_name = strdup(dir_name);
263 if (!temp_name) {
264 p_err("mem alloc failed");
265 return -1;
266 }
267
268 parent_name = dirname(temp_name);
269
270 if (is_bpffs(parent_name)) {
271 /* nothing to do if already mounted */
272 free(temp_name);
273 return err;
274 }
275
276 if (access(parent_name, F_OK) == -1) {
277 p_err("can't create dir '%s' to pin BPF object: parent dir '%s' doesn't exist",
278 dir_name, parent_name);
279 free(temp_name);
280 return -1;
281 }
282
283 free(temp_name);
284 }
285
286 if (block_mount) {
287 p_err("no BPF file system found, not mounting it due to --nomount option");
288 return -1;
289 }
290
291 if (!dir_exists) {
292 err = mkdir(dir_name, S_IRWXU);
293 if (err) {
294 p_err("failed to create dir '%s': %s", dir_name, strerror(errno));
295 return err;
296 }
297 }
298
299 err = mnt_fs(dir_name, "bpf", err_str, ERR_MAX_LEN);
300 if (err) {
301 err_str[ERR_MAX_LEN - 1] = '\0';
302 p_err("can't mount BPF file system on given dir '%s': %s",
303 dir_name, err_str);
304
305 if (!dir_exists)
306 rmdir(dir_name);
307 }
308
309 return err;
310 }
311
mount_bpffs_for_file(const char * file_name)312 int mount_bpffs_for_file(const char *file_name)
313 {
314 char err_str[ERR_MAX_LEN];
315 char *temp_name;
316 char *dir;
317 int err = 0;
318
319 if (access(file_name, F_OK) != -1) {
320 p_err("can't pin BPF object: path '%s' already exists", file_name);
321 return -1;
322 }
323
324 temp_name = strdup(file_name);
325 if (!temp_name) {
326 p_err("mem alloc failed");
327 return -1;
328 }
329
330 dir = dirname(temp_name);
331
332 if (is_bpffs(dir))
333 /* nothing to do if already mounted */
334 goto out_free;
335
336 if (access(dir, F_OK) == -1) {
337 p_err("can't pin BPF object: dir '%s' doesn't exist", dir);
338 err = -1;
339 goto out_free;
340 }
341
342 if (block_mount) {
343 p_err("no BPF file system found, not mounting it due to --nomount option");
344 err = -1;
345 goto out_free;
346 }
347
348 err = mnt_fs(dir, "bpf", err_str, ERR_MAX_LEN);
349 if (err) {
350 err_str[ERR_MAX_LEN - 1] = '\0';
351 p_err("can't mount BPF file system to pin the object '%s': %s",
352 file_name, err_str);
353 }
354
355 out_free:
356 free(temp_name);
357 return err;
358 }
359
do_pin_fd(int fd,const char * name)360 int do_pin_fd(int fd, const char *name)
361 {
362 int err;
363
364 err = mount_bpffs_for_file(name);
365 if (err)
366 return err;
367
368 err = bpf_obj_pin(fd, name);
369 if (err)
370 p_err("can't pin the object (%s): %s", name, strerror(errno));
371
372 return err;
373 }
374
do_pin_any(int argc,char ** argv,int (* get_fd)(int *,char ***))375 int do_pin_any(int argc, char **argv, int (*get_fd)(int *, char ***))
376 {
377 int err;
378 int fd;
379
380 if (!REQ_ARGS(3))
381 return -EINVAL;
382
383 fd = get_fd(&argc, &argv);
384 if (fd < 0)
385 return fd;
386
387 err = do_pin_fd(fd, *argv);
388
389 close(fd);
390 return err;
391 }
392
get_fd_type_name(enum bpf_obj_type type)393 const char *get_fd_type_name(enum bpf_obj_type type)
394 {
395 static const char * const names[] = {
396 [BPF_OBJ_UNKNOWN] = "unknown",
397 [BPF_OBJ_PROG] = "prog",
398 [BPF_OBJ_MAP] = "map",
399 [BPF_OBJ_LINK] = "link",
400 };
401
402 if (type < 0 || type >= ARRAY_SIZE(names) || !names[type])
403 return names[BPF_OBJ_UNKNOWN];
404
405 return names[type];
406 }
407
get_prog_full_name(const struct bpf_prog_info * prog_info,int prog_fd,char * name_buff,size_t buff_len)408 void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
409 char *name_buff, size_t buff_len)
410 {
411 const char *prog_name = prog_info->name;
412 const struct btf_type *func_type;
413 struct bpf_func_info finfo = {};
414 struct bpf_prog_info info = {};
415 __u32 info_len = sizeof(info);
416 struct btf *prog_btf = NULL;
417
418 if (buff_len <= BPF_OBJ_NAME_LEN ||
419 strlen(prog_info->name) < BPF_OBJ_NAME_LEN - 1)
420 goto copy_name;
421
422 if (!prog_info->btf_id || prog_info->nr_func_info == 0)
423 goto copy_name;
424
425 info.nr_func_info = 1;
426 info.func_info_rec_size = prog_info->func_info_rec_size;
427 if (info.func_info_rec_size > sizeof(finfo))
428 info.func_info_rec_size = sizeof(finfo);
429 info.func_info = ptr_to_u64(&finfo);
430
431 if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len))
432 goto copy_name;
433
434 prog_btf = btf__load_from_kernel_by_id(info.btf_id);
435 if (!prog_btf)
436 goto copy_name;
437
438 func_type = btf__type_by_id(prog_btf, finfo.type_id);
439 if (!func_type || !btf_is_func(func_type))
440 goto copy_name;
441
442 prog_name = btf__name_by_offset(prog_btf, func_type->name_off);
443
444 copy_name:
445 snprintf(name_buff, buff_len, "%s", prog_name);
446
447 if (prog_btf)
448 btf__free(prog_btf);
449 }
450
get_fd_type(int fd)451 int get_fd_type(int fd)
452 {
453 char path[PATH_MAX];
454 char buf[512];
455 ssize_t n;
456
457 snprintf(path, sizeof(path), "/proc/self/fd/%d", fd);
458
459 n = readlink(path, buf, sizeof(buf));
460 if (n < 0) {
461 p_err("can't read link type: %s", strerror(errno));
462 return -1;
463 }
464 if (n == sizeof(buf)) {
465 p_err("can't read link type: path too long!");
466 return -1;
467 }
468 buf[n] = '\0';
469
470 if (strstr(buf, "bpf-map"))
471 return BPF_OBJ_MAP;
472 else if (strstr(buf, "bpf-prog"))
473 return BPF_OBJ_PROG;
474 else if (strstr(buf, "bpf-link"))
475 return BPF_OBJ_LINK;
476
477 return BPF_OBJ_UNKNOWN;
478 }
479
get_fdinfo(int fd,const char * key)480 char *get_fdinfo(int fd, const char *key)
481 {
482 char path[PATH_MAX];
483 char *line = NULL;
484 size_t line_n = 0;
485 ssize_t n;
486 FILE *fdi;
487
488 snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd);
489
490 fdi = fopen(path, "r");
491 if (!fdi)
492 return NULL;
493
494 while ((n = getline(&line, &line_n, fdi)) > 0) {
495 char *value;
496 int len;
497
498 if (!strstr(line, key))
499 continue;
500
501 fclose(fdi);
502
503 value = strchr(line, '\t');
504 if (!value || !value[1]) {
505 free(line);
506 return NULL;
507 }
508 value++;
509
510 len = strlen(value);
511 memmove(line, value, len);
512 line[len - 1] = '\0';
513
514 return line;
515 }
516
517 free(line);
518 fclose(fdi);
519 return NULL;
520 }
521
print_data_json(uint8_t * data,size_t len)522 void print_data_json(uint8_t *data, size_t len)
523 {
524 unsigned int i;
525
526 jsonw_start_array(json_wtr);
527 for (i = 0; i < len; i++)
528 jsonw_printf(json_wtr, "%d", data[i]);
529 jsonw_end_array(json_wtr);
530 }
531
print_hex_data_json(uint8_t * data,size_t len)532 void print_hex_data_json(uint8_t *data, size_t len)
533 {
534 unsigned int i;
535
536 jsonw_start_array(json_wtr);
537 for (i = 0; i < len; i++)
538 jsonw_printf(json_wtr, "\"0x%02hhx\"", data[i]);
539 jsonw_end_array(json_wtr);
540 }
541
542 /* extra params for nftw cb */
543 static struct hashmap *build_fn_table;
544 static enum bpf_obj_type build_fn_type;
545
do_build_table_cb(const char * fpath,const struct stat * sb,int typeflag,struct FTW * ftwbuf)546 static int do_build_table_cb(const char *fpath, const struct stat *sb,
547 int typeflag, struct FTW *ftwbuf)
548 {
549 struct bpf_prog_info pinned_info;
550 __u32 len = sizeof(pinned_info);
551 enum bpf_obj_type objtype;
552 int fd, err = 0;
553 char *path;
554
555 if (typeflag != FTW_F)
556 goto out_ret;
557
558 fd = open_obj_pinned(fpath, true);
559 if (fd < 0)
560 goto out_ret;
561
562 objtype = get_fd_type(fd);
563 if (objtype != build_fn_type)
564 goto out_close;
565
566 memset(&pinned_info, 0, sizeof(pinned_info));
567 if (bpf_prog_get_info_by_fd(fd, &pinned_info, &len))
568 goto out_close;
569
570 path = strdup(fpath);
571 if (!path) {
572 err = -1;
573 goto out_close;
574 }
575
576 err = hashmap__append(build_fn_table, pinned_info.id, path);
577 if (err) {
578 p_err("failed to append entry to hashmap for ID %u, path '%s': %s",
579 pinned_info.id, path, strerror(errno));
580 free(path);
581 goto out_close;
582 }
583
584 out_close:
585 close(fd);
586 out_ret:
587 return err;
588 }
589
build_pinned_obj_table(struct hashmap * tab,enum bpf_obj_type type)590 int build_pinned_obj_table(struct hashmap *tab,
591 enum bpf_obj_type type)
592 {
593 struct mntent *mntent = NULL;
594 FILE *mntfile = NULL;
595 int flags = FTW_PHYS;
596 int nopenfd = 16;
597 int err = 0;
598
599 mntfile = setmntent("/proc/mounts", "r");
600 if (!mntfile)
601 return -1;
602
603 build_fn_table = tab;
604 build_fn_type = type;
605
606 while ((mntent = getmntent(mntfile))) {
607 char *path = mntent->mnt_dir;
608
609 if (strncmp(mntent->mnt_type, "bpf", 3) != 0)
610 continue;
611 err = nftw(path, do_build_table_cb, nopenfd, flags);
612 if (err)
613 break;
614 }
615 fclose(mntfile);
616 return err;
617 }
618
delete_pinned_obj_table(struct hashmap * map)619 void delete_pinned_obj_table(struct hashmap *map)
620 {
621 struct hashmap_entry *entry;
622 size_t bkt;
623
624 if (!map)
625 return;
626
627 hashmap__for_each_entry(map, entry, bkt)
628 free(entry->pvalue);
629
630 hashmap__free(map);
631 }
632
get_page_size(void)633 unsigned int get_page_size(void)
634 {
635 static int result;
636
637 if (!result)
638 result = getpagesize();
639 return result;
640 }
641
get_possible_cpus(void)642 unsigned int get_possible_cpus(void)
643 {
644 int cpus = libbpf_num_possible_cpus();
645
646 if (cpus < 0) {
647 p_err("Can't get # of possible cpus: %s", strerror(-cpus));
648 exit(-1);
649 }
650 return cpus;
651 }
652
653 static char *
ifindex_to_name_ns(__u32 ifindex,__u32 ns_dev,__u32 ns_ino,char * buf)654 ifindex_to_name_ns(__u32 ifindex, __u32 ns_dev, __u32 ns_ino, char *buf)
655 {
656 struct stat st;
657 int err;
658
659 err = stat("/proc/self/ns/net", &st);
660 if (err) {
661 p_err("Can't stat /proc/self: %s", strerror(errno));
662 return NULL;
663 }
664
665 if (st.st_dev != ns_dev || st.st_ino != ns_ino)
666 return NULL;
667
668 return if_indextoname(ifindex, buf);
669 }
670
read_sysfs_hex_int(char * path)671 static int read_sysfs_hex_int(char *path)
672 {
673 char vendor_id_buf[8];
674 int len;
675 int fd;
676
677 fd = open(path, O_RDONLY);
678 if (fd < 0) {
679 p_err("Can't open %s: %s", path, strerror(errno));
680 return -1;
681 }
682
683 len = read(fd, vendor_id_buf, sizeof(vendor_id_buf));
684 close(fd);
685 if (len < 0) {
686 p_err("Can't read %s: %s", path, strerror(errno));
687 return -1;
688 }
689 if (len >= (int)sizeof(vendor_id_buf)) {
690 p_err("Value in %s too long", path);
691 return -1;
692 }
693
694 vendor_id_buf[len] = 0;
695
696 return strtol(vendor_id_buf, NULL, 0);
697 }
698
read_sysfs_netdev_hex_int(char * devname,const char * entry_name)699 static int read_sysfs_netdev_hex_int(char *devname, const char *entry_name)
700 {
701 char full_path[64];
702
703 snprintf(full_path, sizeof(full_path), "/sys/class/net/%s/device/%s",
704 devname, entry_name);
705
706 return read_sysfs_hex_int(full_path);
707 }
708
709 const char *
ifindex_to_arch(__u32 ifindex,__u64 ns_dev,__u64 ns_ino,const char ** opt)710 ifindex_to_arch(__u32 ifindex, __u64 ns_dev, __u64 ns_ino, const char **opt)
711 {
712 __maybe_unused int device_id;
713 char devname[IF_NAMESIZE];
714 int vendor_id;
715
716 if (!ifindex_to_name_ns(ifindex, ns_dev, ns_ino, devname)) {
717 p_err("Can't get net device name for ifindex %d: %s", ifindex,
718 strerror(errno));
719 return NULL;
720 }
721
722 vendor_id = read_sysfs_netdev_hex_int(devname, "vendor");
723 if (vendor_id < 0) {
724 p_err("Can't get device vendor id for %s", devname);
725 return NULL;
726 }
727
728 switch (vendor_id) {
729 #ifdef HAVE_LIBBFD_SUPPORT
730 case 0x19ee:
731 device_id = read_sysfs_netdev_hex_int(devname, "device");
732 if (device_id != 0x4000 &&
733 device_id != 0x6000 &&
734 device_id != 0x6003)
735 p_info("Unknown NFP device ID, assuming it is NFP-6xxx arch");
736 *opt = "ctx4";
737 return "NFP-6xxx";
738 #endif /* HAVE_LIBBFD_SUPPORT */
739 /* No NFP support in LLVM, we have no valid triple to return. */
740 default:
741 p_err("Can't get arch name for device vendor id 0x%04x",
742 vendor_id);
743 return NULL;
744 }
745 }
746
print_dev_plain(__u32 ifindex,__u64 ns_dev,__u64 ns_inode)747 void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
748 {
749 char name[IF_NAMESIZE];
750
751 if (!ifindex)
752 return;
753
754 printf(" offloaded_to ");
755 if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
756 printf("%s", name);
757 else
758 printf("ifindex %u ns_dev %llu ns_ino %llu",
759 ifindex, ns_dev, ns_inode);
760 }
761
print_dev_json(__u32 ifindex,__u64 ns_dev,__u64 ns_inode)762 void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
763 {
764 char name[IF_NAMESIZE];
765
766 if (!ifindex)
767 return;
768
769 jsonw_name(json_wtr, "dev");
770 jsonw_start_object(json_wtr);
771 jsonw_uint_field(json_wtr, "ifindex", ifindex);
772 jsonw_uint_field(json_wtr, "ns_dev", ns_dev);
773 jsonw_uint_field(json_wtr, "ns_inode", ns_inode);
774 if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
775 jsonw_string_field(json_wtr, "ifname", name);
776 jsonw_end_object(json_wtr);
777 }
778
parse_u32_arg(int * argc,char *** argv,__u32 * val,const char * what)779 int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what)
780 {
781 char *endptr;
782
783 NEXT_ARGP();
784
785 if (*val) {
786 p_err("%s already specified", what);
787 return -1;
788 }
789
790 *val = strtoul(**argv, &endptr, 0);
791 if (*endptr) {
792 p_err("can't parse %s as %s", **argv, what);
793 return -1;
794 }
795 NEXT_ARGP();
796
797 return 0;
798 }
799
800 int __printf(2, 0)
print_all_levels(__maybe_unused enum libbpf_print_level level,const char * format,va_list args)801 print_all_levels(__maybe_unused enum libbpf_print_level level,
802 const char *format, va_list args)
803 {
804 return vfprintf(stderr, format, args);
805 }
806
prog_fd_by_nametag(void * nametag,int ** fds,bool tag)807 static int prog_fd_by_nametag(void *nametag, int **fds, bool tag)
808 {
809 char prog_name[MAX_PROG_FULL_NAME];
810 unsigned int id = 0;
811 int fd, nb_fds = 0;
812 void *tmp;
813 int err;
814
815 while (true) {
816 struct bpf_prog_info info = {};
817 __u32 len = sizeof(info);
818
819 err = bpf_prog_get_next_id(id, &id);
820 if (err) {
821 if (errno != ENOENT) {
822 p_err("%s", strerror(errno));
823 goto err_close_fds;
824 }
825 return nb_fds;
826 }
827
828 fd = bpf_prog_get_fd_by_id(id);
829 if (fd < 0) {
830 p_err("can't get prog by id (%u): %s",
831 id, strerror(errno));
832 goto err_close_fds;
833 }
834
835 err = bpf_prog_get_info_by_fd(fd, &info, &len);
836 if (err) {
837 p_err("can't get prog info (%u): %s",
838 id, strerror(errno));
839 goto err_close_fd;
840 }
841
842 if (tag && memcmp(nametag, info.tag, BPF_TAG_SIZE)) {
843 close(fd);
844 continue;
845 }
846
847 if (!tag) {
848 get_prog_full_name(&info, fd, prog_name,
849 sizeof(prog_name));
850 if (strncmp(nametag, prog_name, sizeof(prog_name))) {
851 close(fd);
852 continue;
853 }
854 }
855
856 if (nb_fds > 0) {
857 tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
858 if (!tmp) {
859 p_err("failed to realloc");
860 goto err_close_fd;
861 }
862 *fds = tmp;
863 }
864 (*fds)[nb_fds++] = fd;
865 }
866
867 err_close_fd:
868 close(fd);
869 err_close_fds:
870 while (--nb_fds >= 0)
871 close((*fds)[nb_fds]);
872 return -1;
873 }
874
prog_parse_fds(int * argc,char *** argv,int ** fds)875 int prog_parse_fds(int *argc, char ***argv, int **fds)
876 {
877 if (is_prefix(**argv, "id")) {
878 unsigned int id;
879 char *endptr;
880
881 NEXT_ARGP();
882
883 id = strtoul(**argv, &endptr, 0);
884 if (*endptr) {
885 p_err("can't parse %s as ID", **argv);
886 return -1;
887 }
888 NEXT_ARGP();
889
890 (*fds)[0] = bpf_prog_get_fd_by_id(id);
891 if ((*fds)[0] < 0) {
892 p_err("get by id (%u): %s", id, strerror(errno));
893 return -1;
894 }
895 return 1;
896 } else if (is_prefix(**argv, "tag")) {
897 unsigned char tag[BPF_TAG_SIZE];
898
899 NEXT_ARGP();
900
901 if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2,
902 tag + 3, tag + 4, tag + 5, tag + 6, tag + 7)
903 != BPF_TAG_SIZE) {
904 p_err("can't parse tag");
905 return -1;
906 }
907 NEXT_ARGP();
908
909 return prog_fd_by_nametag(tag, fds, true);
910 } else if (is_prefix(**argv, "name")) {
911 char *name;
912
913 NEXT_ARGP();
914
915 name = **argv;
916 if (strlen(name) > MAX_PROG_FULL_NAME - 1) {
917 p_err("can't parse name");
918 return -1;
919 }
920 NEXT_ARGP();
921
922 return prog_fd_by_nametag(name, fds, false);
923 } else if (is_prefix(**argv, "pinned")) {
924 char *path;
925
926 NEXT_ARGP();
927
928 path = **argv;
929 NEXT_ARGP();
930
931 (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_PROG);
932 if ((*fds)[0] < 0)
933 return -1;
934 return 1;
935 }
936
937 p_err("expected 'id', 'tag', 'name' or 'pinned', got: '%s'?", **argv);
938 return -1;
939 }
940
prog_parse_fd(int * argc,char *** argv)941 int prog_parse_fd(int *argc, char ***argv)
942 {
943 int *fds = NULL;
944 int nb_fds, fd;
945
946 fds = malloc(sizeof(int));
947 if (!fds) {
948 p_err("mem alloc failed");
949 return -1;
950 }
951 nb_fds = prog_parse_fds(argc, argv, &fds);
952 if (nb_fds != 1) {
953 if (nb_fds > 1) {
954 p_err("several programs match this handle");
955 while (nb_fds--)
956 close(fds[nb_fds]);
957 }
958 fd = -1;
959 goto exit_free;
960 }
961
962 fd = fds[0];
963 exit_free:
964 free(fds);
965 return fd;
966 }
967
map_fd_by_name(char * name,int ** fds)968 static int map_fd_by_name(char *name, int **fds)
969 {
970 unsigned int id = 0;
971 int fd, nb_fds = 0;
972 void *tmp;
973 int err;
974
975 while (true) {
976 struct bpf_map_info info = {};
977 __u32 len = sizeof(info);
978
979 err = bpf_map_get_next_id(id, &id);
980 if (err) {
981 if (errno != ENOENT) {
982 p_err("%s", strerror(errno));
983 goto err_close_fds;
984 }
985 return nb_fds;
986 }
987
988 fd = bpf_map_get_fd_by_id(id);
989 if (fd < 0) {
990 p_err("can't get map by id (%u): %s",
991 id, strerror(errno));
992 goto err_close_fds;
993 }
994
995 err = bpf_map_get_info_by_fd(fd, &info, &len);
996 if (err) {
997 p_err("can't get map info (%u): %s",
998 id, strerror(errno));
999 goto err_close_fd;
1000 }
1001
1002 if (strncmp(name, info.name, BPF_OBJ_NAME_LEN)) {
1003 close(fd);
1004 continue;
1005 }
1006
1007 if (nb_fds > 0) {
1008 tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
1009 if (!tmp) {
1010 p_err("failed to realloc");
1011 goto err_close_fd;
1012 }
1013 *fds = tmp;
1014 }
1015 (*fds)[nb_fds++] = fd;
1016 }
1017
1018 err_close_fd:
1019 close(fd);
1020 err_close_fds:
1021 while (--nb_fds >= 0)
1022 close((*fds)[nb_fds]);
1023 return -1;
1024 }
1025
map_parse_fds(int * argc,char *** argv,int ** fds)1026 int map_parse_fds(int *argc, char ***argv, int **fds)
1027 {
1028 if (is_prefix(**argv, "id")) {
1029 unsigned int id;
1030 char *endptr;
1031
1032 NEXT_ARGP();
1033
1034 id = strtoul(**argv, &endptr, 0);
1035 if (*endptr) {
1036 p_err("can't parse %s as ID", **argv);
1037 return -1;
1038 }
1039 NEXT_ARGP();
1040
1041 (*fds)[0] = bpf_map_get_fd_by_id(id);
1042 if ((*fds)[0] < 0) {
1043 p_err("get map by id (%u): %s", id, strerror(errno));
1044 return -1;
1045 }
1046 return 1;
1047 } else if (is_prefix(**argv, "name")) {
1048 char *name;
1049
1050 NEXT_ARGP();
1051
1052 name = **argv;
1053 if (strlen(name) > BPF_OBJ_NAME_LEN - 1) {
1054 p_err("can't parse name");
1055 return -1;
1056 }
1057 NEXT_ARGP();
1058
1059 return map_fd_by_name(name, fds);
1060 } else if (is_prefix(**argv, "pinned")) {
1061 char *path;
1062
1063 NEXT_ARGP();
1064
1065 path = **argv;
1066 NEXT_ARGP();
1067
1068 (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_MAP);
1069 if ((*fds)[0] < 0)
1070 return -1;
1071 return 1;
1072 }
1073
1074 p_err("expected 'id', 'name' or 'pinned', got: '%s'?", **argv);
1075 return -1;
1076 }
1077
map_parse_fd(int * argc,char *** argv)1078 int map_parse_fd(int *argc, char ***argv)
1079 {
1080 int *fds = NULL;
1081 int nb_fds, fd;
1082
1083 fds = malloc(sizeof(int));
1084 if (!fds) {
1085 p_err("mem alloc failed");
1086 return -1;
1087 }
1088 nb_fds = map_parse_fds(argc, argv, &fds);
1089 if (nb_fds != 1) {
1090 if (nb_fds > 1) {
1091 p_err("several maps match this handle");
1092 while (nb_fds--)
1093 close(fds[nb_fds]);
1094 }
1095 fd = -1;
1096 goto exit_free;
1097 }
1098
1099 fd = fds[0];
1100 exit_free:
1101 free(fds);
1102 return fd;
1103 }
1104
map_parse_fd_and_info(int * argc,char *** argv,struct bpf_map_info * info,__u32 * info_len)1105 int map_parse_fd_and_info(int *argc, char ***argv, struct bpf_map_info *info,
1106 __u32 *info_len)
1107 {
1108 int err;
1109 int fd;
1110
1111 fd = map_parse_fd(argc, argv);
1112 if (fd < 0)
1113 return -1;
1114
1115 err = bpf_map_get_info_by_fd(fd, info, info_len);
1116 if (err) {
1117 p_err("can't get map info: %s", strerror(errno));
1118 close(fd);
1119 return err;
1120 }
1121
1122 return fd;
1123 }
1124
hash_fn_for_key_as_id(long key,void * ctx)1125 size_t hash_fn_for_key_as_id(long key, void *ctx)
1126 {
1127 return key;
1128 }
1129
equal_fn_for_key_as_id(long k1,long k2,void * ctx)1130 bool equal_fn_for_key_as_id(long k1, long k2, void *ctx)
1131 {
1132 return k1 == k2;
1133 }
1134
bpf_attach_type_input_str(enum bpf_attach_type t)1135 const char *bpf_attach_type_input_str(enum bpf_attach_type t)
1136 {
1137 switch (t) {
1138 case BPF_CGROUP_INET_INGRESS: return "ingress";
1139 case BPF_CGROUP_INET_EGRESS: return "egress";
1140 case BPF_CGROUP_INET_SOCK_CREATE: return "sock_create";
1141 case BPF_CGROUP_INET_SOCK_RELEASE: return "sock_release";
1142 case BPF_CGROUP_SOCK_OPS: return "sock_ops";
1143 case BPF_CGROUP_DEVICE: return "device";
1144 case BPF_CGROUP_INET4_BIND: return "bind4";
1145 case BPF_CGROUP_INET6_BIND: return "bind6";
1146 case BPF_CGROUP_INET4_CONNECT: return "connect4";
1147 case BPF_CGROUP_INET6_CONNECT: return "connect6";
1148 case BPF_CGROUP_INET4_POST_BIND: return "post_bind4";
1149 case BPF_CGROUP_INET6_POST_BIND: return "post_bind6";
1150 case BPF_CGROUP_INET4_GETPEERNAME: return "getpeername4";
1151 case BPF_CGROUP_INET6_GETPEERNAME: return "getpeername6";
1152 case BPF_CGROUP_INET4_GETSOCKNAME: return "getsockname4";
1153 case BPF_CGROUP_INET6_GETSOCKNAME: return "getsockname6";
1154 case BPF_CGROUP_UDP4_SENDMSG: return "sendmsg4";
1155 case BPF_CGROUP_UDP6_SENDMSG: return "sendmsg6";
1156 case BPF_CGROUP_SYSCTL: return "sysctl";
1157 case BPF_CGROUP_UDP4_RECVMSG: return "recvmsg4";
1158 case BPF_CGROUP_UDP6_RECVMSG: return "recvmsg6";
1159 case BPF_CGROUP_GETSOCKOPT: return "getsockopt";
1160 case BPF_CGROUP_SETSOCKOPT: return "setsockopt";
1161 case BPF_TRACE_RAW_TP: return "raw_tp";
1162 case BPF_TRACE_FENTRY: return "fentry";
1163 case BPF_TRACE_FEXIT: return "fexit";
1164 case BPF_MODIFY_RETURN: return "mod_ret";
1165 case BPF_SK_REUSEPORT_SELECT: return "sk_skb_reuseport_select";
1166 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: return "sk_skb_reuseport_select_or_migrate";
1167 default: return libbpf_bpf_attach_type_str(t);
1168 }
1169 }
1170
pathname_concat(char * buf,int buf_sz,const char * path,const char * name)1171 int pathname_concat(char *buf, int buf_sz, const char *path,
1172 const char *name)
1173 {
1174 int len;
1175
1176 len = snprintf(buf, buf_sz, "%s/%s", path, name);
1177 if (len < 0)
1178 return -EINVAL;
1179 if (len >= buf_sz)
1180 return -ENAMETOOLONG;
1181
1182 return 0;
1183 }
1184