1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 3 /* 4 * Common eBPF ELF object loading operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 * Copyright (C) 2017 Nicira, Inc. 10 * Copyright (C) 2019 Isovalent, Inc. 11 */ 12 13 #ifndef _GNU_SOURCE 14 #define _GNU_SOURCE 15 #endif 16 #include <stdlib.h> 17 #include <stdio.h> 18 #include <stdarg.h> 19 #include <libgen.h> 20 #include <inttypes.h> 21 #include <limits.h> 22 #include <string.h> 23 #include <unistd.h> 24 #include <endian.h> 25 #include <fcntl.h> 26 #include <errno.h> 27 #include <ctype.h> 28 #include <asm/unistd.h> 29 #include <linux/err.h> 30 #include <linux/kernel.h> 31 #include <linux/bpf.h> 32 #include <linux/btf.h> 33 #include <linux/filter.h> 34 #include <linux/limits.h> 35 #include <linux/perf_event.h> 36 #include <linux/ring_buffer.h> 37 #include <sys/epoll.h> 38 #include <sys/ioctl.h> 39 #include <sys/mman.h> 40 #include <sys/stat.h> 41 #include <sys/types.h> 42 #include <sys/vfs.h> 43 #include <sys/utsname.h> 44 #include <sys/resource.h> 45 #include <libelf.h> 46 #include <gelf.h> 47 #include <zlib.h> 48 49 #include "libbpf.h" 50 #include "bpf.h" 51 #include "btf.h" 52 #include "str_error.h" 53 #include "libbpf_internal.h" 54 #include "hashmap.h" 55 #include "bpf_gen_internal.h" 56 #include "zip.h" 57 58 #ifndef BPF_FS_MAGIC 59 #define BPF_FS_MAGIC 0xcafe4a11 60 #endif 61 62 #define BPF_INSN_SZ (sizeof(struct bpf_insn)) 63 64 /* vsprintf() in __base_pr() uses nonliteral format string. It may break 65 * compilation if user enables corresponding warning. Disable it explicitly. 66 */ 67 #pragma GCC diagnostic ignored "-Wformat-nonliteral" 68 69 #define __printf(a, b) __attribute__((format(printf, a, b))) 70 71 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj); 72 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog); 73 74 static const char * const attach_type_name[] = { 75 [BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress", 76 [BPF_CGROUP_INET_EGRESS] = "cgroup_inet_egress", 77 [BPF_CGROUP_INET_SOCK_CREATE] = "cgroup_inet_sock_create", 78 [BPF_CGROUP_INET_SOCK_RELEASE] = "cgroup_inet_sock_release", 79 [BPF_CGROUP_SOCK_OPS] = "cgroup_sock_ops", 80 [BPF_CGROUP_DEVICE] = "cgroup_device", 81 [BPF_CGROUP_INET4_BIND] = "cgroup_inet4_bind", 82 [BPF_CGROUP_INET6_BIND] = "cgroup_inet6_bind", 83 [BPF_CGROUP_INET4_CONNECT] = "cgroup_inet4_connect", 84 [BPF_CGROUP_INET6_CONNECT] = "cgroup_inet6_connect", 85 [BPF_CGROUP_INET4_POST_BIND] = "cgroup_inet4_post_bind", 86 [BPF_CGROUP_INET6_POST_BIND] = "cgroup_inet6_post_bind", 87 [BPF_CGROUP_INET4_GETPEERNAME] = "cgroup_inet4_getpeername", 88 [BPF_CGROUP_INET6_GETPEERNAME] = "cgroup_inet6_getpeername", 89 [BPF_CGROUP_INET4_GETSOCKNAME] = "cgroup_inet4_getsockname", 90 [BPF_CGROUP_INET6_GETSOCKNAME] = "cgroup_inet6_getsockname", 91 [BPF_CGROUP_UDP4_SENDMSG] = "cgroup_udp4_sendmsg", 92 [BPF_CGROUP_UDP6_SENDMSG] = "cgroup_udp6_sendmsg", 93 [BPF_CGROUP_SYSCTL] = "cgroup_sysctl", 94 [BPF_CGROUP_UDP4_RECVMSG] = "cgroup_udp4_recvmsg", 95 [BPF_CGROUP_UDP6_RECVMSG] = "cgroup_udp6_recvmsg", 96 [BPF_CGROUP_GETSOCKOPT] = "cgroup_getsockopt", 97 [BPF_CGROUP_SETSOCKOPT] = "cgroup_setsockopt", 98 [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser", 99 [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict", 100 [BPF_SK_SKB_VERDICT] = "sk_skb_verdict", 101 [BPF_SK_MSG_VERDICT] = "sk_msg_verdict", 102 [BPF_LIRC_MODE2] = "lirc_mode2", 103 [BPF_FLOW_DISSECTOR] = "flow_dissector", 104 [BPF_TRACE_RAW_TP] = "trace_raw_tp", 105 [BPF_TRACE_FENTRY] = "trace_fentry", 106 [BPF_TRACE_FEXIT] = "trace_fexit", 107 [BPF_MODIFY_RETURN] = "modify_return", 108 [BPF_LSM_MAC] = "lsm_mac", 109 [BPF_LSM_CGROUP] = "lsm_cgroup", 110 [BPF_SK_LOOKUP] = "sk_lookup", 111 [BPF_TRACE_ITER] = "trace_iter", 112 [BPF_XDP_DEVMAP] = "xdp_devmap", 113 [BPF_XDP_CPUMAP] = "xdp_cpumap", 114 [BPF_XDP] = "xdp", 115 [BPF_SK_REUSEPORT_SELECT] = "sk_reuseport_select", 116 [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_reuseport_select_or_migrate", 117 [BPF_PERF_EVENT] = "perf_event", 118 [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi", 119 [BPF_STRUCT_OPS] = "struct_ops", 120 [BPF_NETFILTER] = "netfilter", 121 [BPF_TCX_INGRESS] = "tcx_ingress", 122 [BPF_TCX_EGRESS] = "tcx_egress", 123 [BPF_TRACE_UPROBE_MULTI] = "trace_uprobe_multi", 124 }; 125 126 static const char * const link_type_name[] = { 127 [BPF_LINK_TYPE_UNSPEC] = "unspec", 128 [BPF_LINK_TYPE_RAW_TRACEPOINT] = "raw_tracepoint", 129 [BPF_LINK_TYPE_TRACING] = "tracing", 130 [BPF_LINK_TYPE_CGROUP] = "cgroup", 131 [BPF_LINK_TYPE_ITER] = "iter", 132 [BPF_LINK_TYPE_NETNS] = "netns", 133 [BPF_LINK_TYPE_XDP] = "xdp", 134 [BPF_LINK_TYPE_PERF_EVENT] = "perf_event", 135 [BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi", 136 [BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops", 137 [BPF_LINK_TYPE_NETFILTER] = "netfilter", 138 [BPF_LINK_TYPE_TCX] = "tcx", 139 [BPF_LINK_TYPE_UPROBE_MULTI] = "uprobe_multi", 140 }; 141 142 static const char * const map_type_name[] = { 143 [BPF_MAP_TYPE_UNSPEC] = "unspec", 144 [BPF_MAP_TYPE_HASH] = "hash", 145 [BPF_MAP_TYPE_ARRAY] = "array", 146 [BPF_MAP_TYPE_PROG_ARRAY] = "prog_array", 147 [BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array", 148 [BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash", 149 [BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array", 150 [BPF_MAP_TYPE_STACK_TRACE] = "stack_trace", 151 [BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array", 152 [BPF_MAP_TYPE_LRU_HASH] = "lru_hash", 153 [BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash", 154 [BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie", 155 [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps", 156 [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps", 157 [BPF_MAP_TYPE_DEVMAP] = "devmap", 158 [BPF_MAP_TYPE_DEVMAP_HASH] = "devmap_hash", 159 [BPF_MAP_TYPE_SOCKMAP] = "sockmap", 160 [BPF_MAP_TYPE_CPUMAP] = "cpumap", 161 [BPF_MAP_TYPE_XSKMAP] = "xskmap", 162 [BPF_MAP_TYPE_SOCKHASH] = "sockhash", 163 [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage", 164 [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray", 165 [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage", 166 [BPF_MAP_TYPE_QUEUE] = "queue", 167 [BPF_MAP_TYPE_STACK] = "stack", 168 [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage", 169 [BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops", 170 [BPF_MAP_TYPE_RINGBUF] = "ringbuf", 171 [BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage", 172 [BPF_MAP_TYPE_TASK_STORAGE] = "task_storage", 173 [BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter", 174 [BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf", 175 [BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage", 176 }; 177 178 static const char * const prog_type_name[] = { 179 [BPF_PROG_TYPE_UNSPEC] = "unspec", 180 [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter", 181 [BPF_PROG_TYPE_KPROBE] = "kprobe", 182 [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls", 183 [BPF_PROG_TYPE_SCHED_ACT] = "sched_act", 184 [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint", 185 [BPF_PROG_TYPE_XDP] = "xdp", 186 [BPF_PROG_TYPE_PERF_EVENT] = "perf_event", 187 [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb", 188 [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock", 189 [BPF_PROG_TYPE_LWT_IN] = "lwt_in", 190 [BPF_PROG_TYPE_LWT_OUT] = "lwt_out", 191 [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit", 192 [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops", 193 [BPF_PROG_TYPE_SK_SKB] = "sk_skb", 194 [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device", 195 [BPF_PROG_TYPE_SK_MSG] = "sk_msg", 196 [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint", 197 [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr", 198 [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local", 199 [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2", 200 [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport", 201 [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector", 202 [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl", 203 [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable", 204 [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt", 205 [BPF_PROG_TYPE_TRACING] = "tracing", 206 [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops", 207 [BPF_PROG_TYPE_EXT] = "ext", 208 [BPF_PROG_TYPE_LSM] = "lsm", 209 [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup", 210 [BPF_PROG_TYPE_SYSCALL] = "syscall", 211 [BPF_PROG_TYPE_NETFILTER] = "netfilter", 212 }; 213 214 static int __base_pr(enum libbpf_print_level level, const char *format, 215 va_list args) 216 { 217 if (level == LIBBPF_DEBUG) 218 return 0; 219 220 return vfprintf(stderr, format, args); 221 } 222 223 static libbpf_print_fn_t __libbpf_pr = __base_pr; 224 225 libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn) 226 { 227 libbpf_print_fn_t old_print_fn; 228 229 old_print_fn = __atomic_exchange_n(&__libbpf_pr, fn, __ATOMIC_RELAXED); 230 231 return old_print_fn; 232 } 233 234 __printf(2, 3) 235 void libbpf_print(enum libbpf_print_level level, const char *format, ...) 236 { 237 va_list args; 238 int old_errno; 239 libbpf_print_fn_t print_fn; 240 241 print_fn = __atomic_load_n(&__libbpf_pr, __ATOMIC_RELAXED); 242 if (!print_fn) 243 return; 244 245 old_errno = errno; 246 247 va_start(args, format); 248 __libbpf_pr(level, format, args); 249 va_end(args); 250 251 errno = old_errno; 252 } 253 254 static void pr_perm_msg(int err) 255 { 256 struct rlimit limit; 257 char buf[100]; 258 259 if (err != -EPERM || geteuid() != 0) 260 return; 261 262 err = getrlimit(RLIMIT_MEMLOCK, &limit); 263 if (err) 264 return; 265 266 if (limit.rlim_cur == RLIM_INFINITY) 267 return; 268 269 if (limit.rlim_cur < 1024) 270 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur); 271 else if (limit.rlim_cur < 1024*1024) 272 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024); 273 else 274 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024)); 275 276 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n", 277 buf); 278 } 279 280 #define STRERR_BUFSIZE 128 281 282 /* Copied from tools/perf/util/util.h */ 283 #ifndef zfree 284 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) 285 #endif 286 287 #ifndef zclose 288 # define zclose(fd) ({ \ 289 int ___err = 0; \ 290 if ((fd) >= 0) \ 291 ___err = close((fd)); \ 292 fd = -1; \ 293 ___err; }) 294 #endif 295 296 static inline __u64 ptr_to_u64(const void *ptr) 297 { 298 return (__u64) (unsigned long) ptr; 299 } 300 301 int libbpf_set_strict_mode(enum libbpf_strict_mode mode) 302 { 303 /* as of v1.0 libbpf_set_strict_mode() is a no-op */ 304 return 0; 305 } 306 307 __u32 libbpf_major_version(void) 308 { 309 return LIBBPF_MAJOR_VERSION; 310 } 311 312 __u32 libbpf_minor_version(void) 313 { 314 return LIBBPF_MINOR_VERSION; 315 } 316 317 const char *libbpf_version_string(void) 318 { 319 #define __S(X) #X 320 #define _S(X) __S(X) 321 return "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION); 322 #undef _S 323 #undef __S 324 } 325 326 enum reloc_type { 327 RELO_LD64, 328 RELO_CALL, 329 RELO_DATA, 330 RELO_EXTERN_LD64, 331 RELO_EXTERN_CALL, 332 RELO_SUBPROG_ADDR, 333 RELO_CORE, 334 }; 335 336 struct reloc_desc { 337 enum reloc_type type; 338 int insn_idx; 339 union { 340 const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */ 341 struct { 342 int map_idx; 343 int sym_off; 344 int ext_idx; 345 }; 346 }; 347 }; 348 349 /* stored as sec_def->cookie for all libbpf-supported SEC()s */ 350 enum sec_def_flags { 351 SEC_NONE = 0, 352 /* expected_attach_type is optional, if kernel doesn't support that */ 353 SEC_EXP_ATTACH_OPT = 1, 354 /* legacy, only used by libbpf_get_type_names() and 355 * libbpf_attach_type_by_name(), not used by libbpf itself at all. 356 * This used to be associated with cgroup (and few other) BPF programs 357 * that were attachable through BPF_PROG_ATTACH command. Pretty 358 * meaningless nowadays, though. 359 */ 360 SEC_ATTACHABLE = 2, 361 SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT, 362 /* attachment target is specified through BTF ID in either kernel or 363 * other BPF program's BTF object 364 */ 365 SEC_ATTACH_BTF = 4, 366 /* BPF program type allows sleeping/blocking in kernel */ 367 SEC_SLEEPABLE = 8, 368 /* BPF program support non-linear XDP buffer */ 369 SEC_XDP_FRAGS = 16, 370 /* Setup proper attach type for usdt probes. */ 371 SEC_USDT = 32, 372 }; 373 374 struct bpf_sec_def { 375 char *sec; 376 enum bpf_prog_type prog_type; 377 enum bpf_attach_type expected_attach_type; 378 long cookie; 379 int handler_id; 380 381 libbpf_prog_setup_fn_t prog_setup_fn; 382 libbpf_prog_prepare_load_fn_t prog_prepare_load_fn; 383 libbpf_prog_attach_fn_t prog_attach_fn; 384 }; 385 386 /* 387 * bpf_prog should be a better name but it has been used in 388 * linux/filter.h. 389 */ 390 struct bpf_program { 391 char *name; 392 char *sec_name; 393 size_t sec_idx; 394 const struct bpf_sec_def *sec_def; 395 /* this program's instruction offset (in number of instructions) 396 * within its containing ELF section 397 */ 398 size_t sec_insn_off; 399 /* number of original instructions in ELF section belonging to this 400 * program, not taking into account subprogram instructions possible 401 * appended later during relocation 402 */ 403 size_t sec_insn_cnt; 404 /* Offset (in number of instructions) of the start of instruction 405 * belonging to this BPF program within its containing main BPF 406 * program. For the entry-point (main) BPF program, this is always 407 * zero. For a sub-program, this gets reset before each of main BPF 408 * programs are processed and relocated and is used to determined 409 * whether sub-program was already appended to the main program, and 410 * if yes, at which instruction offset. 411 */ 412 size_t sub_insn_off; 413 414 /* instructions that belong to BPF program; insns[0] is located at 415 * sec_insn_off instruction within its ELF section in ELF file, so 416 * when mapping ELF file instruction index to the local instruction, 417 * one needs to subtract sec_insn_off; and vice versa. 418 */ 419 struct bpf_insn *insns; 420 /* actual number of instruction in this BPF program's image; for 421 * entry-point BPF programs this includes the size of main program 422 * itself plus all the used sub-programs, appended at the end 423 */ 424 size_t insns_cnt; 425 426 struct reloc_desc *reloc_desc; 427 int nr_reloc; 428 429 /* BPF verifier log settings */ 430 char *log_buf; 431 size_t log_size; 432 __u32 log_level; 433 434 struct bpf_object *obj; 435 436 int fd; 437 bool autoload; 438 bool autoattach; 439 bool mark_btf_static; 440 enum bpf_prog_type type; 441 enum bpf_attach_type expected_attach_type; 442 443 int prog_ifindex; 444 __u32 attach_btf_obj_fd; 445 __u32 attach_btf_id; 446 __u32 attach_prog_fd; 447 448 void *func_info; 449 __u32 func_info_rec_size; 450 __u32 func_info_cnt; 451 452 void *line_info; 453 __u32 line_info_rec_size; 454 __u32 line_info_cnt; 455 __u32 prog_flags; 456 }; 457 458 struct bpf_struct_ops { 459 const char *tname; 460 const struct btf_type *type; 461 struct bpf_program **progs; 462 __u32 *kern_func_off; 463 /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */ 464 void *data; 465 /* e.g. struct bpf_struct_ops_tcp_congestion_ops in 466 * btf_vmlinux's format. 467 * struct bpf_struct_ops_tcp_congestion_ops { 468 * [... some other kernel fields ...] 469 * struct tcp_congestion_ops data; 470 * } 471 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops) 472 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata" 473 * from "data". 474 */ 475 void *kern_vdata; 476 __u32 type_id; 477 }; 478 479 #define DATA_SEC ".data" 480 #define BSS_SEC ".bss" 481 #define RODATA_SEC ".rodata" 482 #define KCONFIG_SEC ".kconfig" 483 #define KSYMS_SEC ".ksyms" 484 #define STRUCT_OPS_SEC ".struct_ops" 485 #define STRUCT_OPS_LINK_SEC ".struct_ops.link" 486 487 enum libbpf_map_type { 488 LIBBPF_MAP_UNSPEC, 489 LIBBPF_MAP_DATA, 490 LIBBPF_MAP_BSS, 491 LIBBPF_MAP_RODATA, 492 LIBBPF_MAP_KCONFIG, 493 }; 494 495 struct bpf_map_def { 496 unsigned int type; 497 unsigned int key_size; 498 unsigned int value_size; 499 unsigned int max_entries; 500 unsigned int map_flags; 501 }; 502 503 struct bpf_map { 504 struct bpf_object *obj; 505 char *name; 506 /* real_name is defined for special internal maps (.rodata*, 507 * .data*, .bss, .kconfig) and preserves their original ELF section 508 * name. This is important to be able to find corresponding BTF 509 * DATASEC information. 510 */ 511 char *real_name; 512 int fd; 513 int sec_idx; 514 size_t sec_offset; 515 int map_ifindex; 516 int inner_map_fd; 517 struct bpf_map_def def; 518 __u32 numa_node; 519 __u32 btf_var_idx; 520 __u32 btf_key_type_id; 521 __u32 btf_value_type_id; 522 __u32 btf_vmlinux_value_type_id; 523 enum libbpf_map_type libbpf_type; 524 void *mmaped; 525 struct bpf_struct_ops *st_ops; 526 struct bpf_map *inner_map; 527 void **init_slots; 528 int init_slots_sz; 529 char *pin_path; 530 bool pinned; 531 bool reused; 532 bool autocreate; 533 __u64 map_extra; 534 }; 535 536 enum extern_type { 537 EXT_UNKNOWN, 538 EXT_KCFG, 539 EXT_KSYM, 540 }; 541 542 enum kcfg_type { 543 KCFG_UNKNOWN, 544 KCFG_CHAR, 545 KCFG_BOOL, 546 KCFG_INT, 547 KCFG_TRISTATE, 548 KCFG_CHAR_ARR, 549 }; 550 551 struct extern_desc { 552 enum extern_type type; 553 int sym_idx; 554 int btf_id; 555 int sec_btf_id; 556 const char *name; 557 char *essent_name; 558 bool is_set; 559 bool is_weak; 560 union { 561 struct { 562 enum kcfg_type type; 563 int sz; 564 int align; 565 int data_off; 566 bool is_signed; 567 } kcfg; 568 struct { 569 unsigned long long addr; 570 571 /* target btf_id of the corresponding kernel var. */ 572 int kernel_btf_obj_fd; 573 int kernel_btf_id; 574 575 /* local btf_id of the ksym extern's type. */ 576 __u32 type_id; 577 /* BTF fd index to be patched in for insn->off, this is 578 * 0 for vmlinux BTF, index in obj->fd_array for module 579 * BTF 580 */ 581 __s16 btf_fd_idx; 582 } ksym; 583 }; 584 }; 585 586 struct module_btf { 587 struct btf *btf; 588 char *name; 589 __u32 id; 590 int fd; 591 int fd_array_idx; 592 }; 593 594 enum sec_type { 595 SEC_UNUSED = 0, 596 SEC_RELO, 597 SEC_BSS, 598 SEC_DATA, 599 SEC_RODATA, 600 }; 601 602 struct elf_sec_desc { 603 enum sec_type sec_type; 604 Elf64_Shdr *shdr; 605 Elf_Data *data; 606 }; 607 608 struct elf_state { 609 int fd; 610 const void *obj_buf; 611 size_t obj_buf_sz; 612 Elf *elf; 613 Elf64_Ehdr *ehdr; 614 Elf_Data *symbols; 615 Elf_Data *st_ops_data; 616 Elf_Data *st_ops_link_data; 617 size_t shstrndx; /* section index for section name strings */ 618 size_t strtabidx; 619 struct elf_sec_desc *secs; 620 size_t sec_cnt; 621 int btf_maps_shndx; 622 __u32 btf_maps_sec_btf_id; 623 int text_shndx; 624 int symbols_shndx; 625 int st_ops_shndx; 626 int st_ops_link_shndx; 627 }; 628 629 struct usdt_manager; 630 631 struct bpf_object { 632 char name[BPF_OBJ_NAME_LEN]; 633 char license[64]; 634 __u32 kern_version; 635 636 struct bpf_program *programs; 637 size_t nr_programs; 638 struct bpf_map *maps; 639 size_t nr_maps; 640 size_t maps_cap; 641 642 char *kconfig; 643 struct extern_desc *externs; 644 int nr_extern; 645 int kconfig_map_idx; 646 647 bool loaded; 648 bool has_subcalls; 649 bool has_rodata; 650 651 struct bpf_gen *gen_loader; 652 653 /* Information when doing ELF related work. Only valid if efile.elf is not NULL */ 654 struct elf_state efile; 655 656 struct btf *btf; 657 struct btf_ext *btf_ext; 658 659 /* Parse and load BTF vmlinux if any of the programs in the object need 660 * it at load time. 661 */ 662 struct btf *btf_vmlinux; 663 /* Path to the custom BTF to be used for BPF CO-RE relocations as an 664 * override for vmlinux BTF. 665 */ 666 char *btf_custom_path; 667 /* vmlinux BTF override for CO-RE relocations */ 668 struct btf *btf_vmlinux_override; 669 /* Lazily initialized kernel module BTFs */ 670 struct module_btf *btf_modules; 671 bool btf_modules_loaded; 672 size_t btf_module_cnt; 673 size_t btf_module_cap; 674 675 /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */ 676 char *log_buf; 677 size_t log_size; 678 __u32 log_level; 679 680 int *fd_array; 681 size_t fd_array_cap; 682 size_t fd_array_cnt; 683 684 struct usdt_manager *usdt_man; 685 686 char path[]; 687 }; 688 689 static const char *elf_sym_str(const struct bpf_object *obj, size_t off); 690 static const char *elf_sec_str(const struct bpf_object *obj, size_t off); 691 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx); 692 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name); 693 static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn); 694 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn); 695 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn); 696 static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx); 697 static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx); 698 699 void bpf_program__unload(struct bpf_program *prog) 700 { 701 if (!prog) 702 return; 703 704 zclose(prog->fd); 705 706 zfree(&prog->func_info); 707 zfree(&prog->line_info); 708 } 709 710 static void bpf_program__exit(struct bpf_program *prog) 711 { 712 if (!prog) 713 return; 714 715 bpf_program__unload(prog); 716 zfree(&prog->name); 717 zfree(&prog->sec_name); 718 zfree(&prog->insns); 719 zfree(&prog->reloc_desc); 720 721 prog->nr_reloc = 0; 722 prog->insns_cnt = 0; 723 prog->sec_idx = -1; 724 } 725 726 static bool insn_is_subprog_call(const struct bpf_insn *insn) 727 { 728 return BPF_CLASS(insn->code) == BPF_JMP && 729 BPF_OP(insn->code) == BPF_CALL && 730 BPF_SRC(insn->code) == BPF_K && 731 insn->src_reg == BPF_PSEUDO_CALL && 732 insn->dst_reg == 0 && 733 insn->off == 0; 734 } 735 736 static bool is_call_insn(const struct bpf_insn *insn) 737 { 738 return insn->code == (BPF_JMP | BPF_CALL); 739 } 740 741 static bool insn_is_pseudo_func(struct bpf_insn *insn) 742 { 743 return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC; 744 } 745 746 static int 747 bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog, 748 const char *name, size_t sec_idx, const char *sec_name, 749 size_t sec_off, void *insn_data, size_t insn_data_sz) 750 { 751 if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) { 752 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n", 753 sec_name, name, sec_off, insn_data_sz); 754 return -EINVAL; 755 } 756 757 memset(prog, 0, sizeof(*prog)); 758 prog->obj = obj; 759 760 prog->sec_idx = sec_idx; 761 prog->sec_insn_off = sec_off / BPF_INSN_SZ; 762 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ; 763 /* insns_cnt can later be increased by appending used subprograms */ 764 prog->insns_cnt = prog->sec_insn_cnt; 765 766 prog->type = BPF_PROG_TYPE_UNSPEC; 767 prog->fd = -1; 768 769 /* libbpf's convention for SEC("?abc...") is that it's just like 770 * SEC("abc...") but the corresponding bpf_program starts out with 771 * autoload set to false. 772 */ 773 if (sec_name[0] == '?') { 774 prog->autoload = false; 775 /* from now on forget there was ? in section name */ 776 sec_name++; 777 } else { 778 prog->autoload = true; 779 } 780 781 prog->autoattach = true; 782 783 /* inherit object's log_level */ 784 prog->log_level = obj->log_level; 785 786 prog->sec_name = strdup(sec_name); 787 if (!prog->sec_name) 788 goto errout; 789 790 prog->name = strdup(name); 791 if (!prog->name) 792 goto errout; 793 794 prog->insns = malloc(insn_data_sz); 795 if (!prog->insns) 796 goto errout; 797 memcpy(prog->insns, insn_data, insn_data_sz); 798 799 return 0; 800 errout: 801 pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name); 802 bpf_program__exit(prog); 803 return -ENOMEM; 804 } 805 806 static int 807 bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, 808 const char *sec_name, int sec_idx) 809 { 810 Elf_Data *symbols = obj->efile.symbols; 811 struct bpf_program *prog, *progs; 812 void *data = sec_data->d_buf; 813 size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms; 814 int nr_progs, err, i; 815 const char *name; 816 Elf64_Sym *sym; 817 818 progs = obj->programs; 819 nr_progs = obj->nr_programs; 820 nr_syms = symbols->d_size / sizeof(Elf64_Sym); 821 822 for (i = 0; i < nr_syms; i++) { 823 sym = elf_sym_by_idx(obj, i); 824 825 if (sym->st_shndx != sec_idx) 826 continue; 827 if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC) 828 continue; 829 830 prog_sz = sym->st_size; 831 sec_off = sym->st_value; 832 833 name = elf_sym_str(obj, sym->st_name); 834 if (!name) { 835 pr_warn("sec '%s': failed to get symbol name for offset %zu\n", 836 sec_name, sec_off); 837 return -LIBBPF_ERRNO__FORMAT; 838 } 839 840 if (sec_off + prog_sz > sec_sz) { 841 pr_warn("sec '%s': program at offset %zu crosses section boundary\n", 842 sec_name, sec_off); 843 return -LIBBPF_ERRNO__FORMAT; 844 } 845 846 if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) { 847 pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name); 848 return -ENOTSUP; 849 } 850 851 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n", 852 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz); 853 854 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs)); 855 if (!progs) { 856 /* 857 * In this case the original obj->programs 858 * is still valid, so don't need special treat for 859 * bpf_close_object(). 860 */ 861 pr_warn("sec '%s': failed to alloc memory for new program '%s'\n", 862 sec_name, name); 863 return -ENOMEM; 864 } 865 obj->programs = progs; 866 867 prog = &progs[nr_progs]; 868 869 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name, 870 sec_off, data + sec_off, prog_sz); 871 if (err) 872 return err; 873 874 /* if function is a global/weak symbol, but has restricted 875 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC 876 * as static to enable more permissive BPF verification mode 877 * with more outside context available to BPF verifier 878 */ 879 if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL 880 && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN 881 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)) 882 prog->mark_btf_static = true; 883 884 nr_progs++; 885 obj->nr_programs = nr_progs; 886 } 887 888 return 0; 889 } 890 891 static const struct btf_member * 892 find_member_by_offset(const struct btf_type *t, __u32 bit_offset) 893 { 894 struct btf_member *m; 895 int i; 896 897 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { 898 if (btf_member_bit_offset(t, i) == bit_offset) 899 return m; 900 } 901 902 return NULL; 903 } 904 905 static const struct btf_member * 906 find_member_by_name(const struct btf *btf, const struct btf_type *t, 907 const char *name) 908 { 909 struct btf_member *m; 910 int i; 911 912 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { 913 if (!strcmp(btf__name_by_offset(btf, m->name_off), name)) 914 return m; 915 } 916 917 return NULL; 918 } 919 920 #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_" 921 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, 922 const char *name, __u32 kind); 923 924 static int 925 find_struct_ops_kern_types(const struct btf *btf, const char *tname, 926 const struct btf_type **type, __u32 *type_id, 927 const struct btf_type **vtype, __u32 *vtype_id, 928 const struct btf_member **data_member) 929 { 930 const struct btf_type *kern_type, *kern_vtype; 931 const struct btf_member *kern_data_member; 932 __s32 kern_vtype_id, kern_type_id; 933 __u32 i; 934 935 kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT); 936 if (kern_type_id < 0) { 937 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", 938 tname); 939 return kern_type_id; 940 } 941 kern_type = btf__type_by_id(btf, kern_type_id); 942 943 /* Find the corresponding "map_value" type that will be used 944 * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example, 945 * find "struct bpf_struct_ops_tcp_congestion_ops" from the 946 * btf_vmlinux. 947 */ 948 kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX, 949 tname, BTF_KIND_STRUCT); 950 if (kern_vtype_id < 0) { 951 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n", 952 STRUCT_OPS_VALUE_PREFIX, tname); 953 return kern_vtype_id; 954 } 955 kern_vtype = btf__type_by_id(btf, kern_vtype_id); 956 957 /* Find "struct tcp_congestion_ops" from 958 * struct bpf_struct_ops_tcp_congestion_ops { 959 * [ ... ] 960 * struct tcp_congestion_ops data; 961 * } 962 */ 963 kern_data_member = btf_members(kern_vtype); 964 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) { 965 if (kern_data_member->type == kern_type_id) 966 break; 967 } 968 if (i == btf_vlen(kern_vtype)) { 969 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n", 970 tname, STRUCT_OPS_VALUE_PREFIX, tname); 971 return -EINVAL; 972 } 973 974 *type = kern_type; 975 *type_id = kern_type_id; 976 *vtype = kern_vtype; 977 *vtype_id = kern_vtype_id; 978 *data_member = kern_data_member; 979 980 return 0; 981 } 982 983 static bool bpf_map__is_struct_ops(const struct bpf_map *map) 984 { 985 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS; 986 } 987 988 /* Init the map's fields that depend on kern_btf */ 989 static int bpf_map__init_kern_struct_ops(struct bpf_map *map, 990 const struct btf *btf, 991 const struct btf *kern_btf) 992 { 993 const struct btf_member *member, *kern_member, *kern_data_member; 994 const struct btf_type *type, *kern_type, *kern_vtype; 995 __u32 i, kern_type_id, kern_vtype_id, kern_data_off; 996 struct bpf_struct_ops *st_ops; 997 void *data, *kern_data; 998 const char *tname; 999 int err; 1000 1001 st_ops = map->st_ops; 1002 type = st_ops->type; 1003 tname = st_ops->tname; 1004 err = find_struct_ops_kern_types(kern_btf, tname, 1005 &kern_type, &kern_type_id, 1006 &kern_vtype, &kern_vtype_id, 1007 &kern_data_member); 1008 if (err) 1009 return err; 1010 1011 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n", 1012 map->name, st_ops->type_id, kern_type_id, kern_vtype_id); 1013 1014 map->def.value_size = kern_vtype->size; 1015 map->btf_vmlinux_value_type_id = kern_vtype_id; 1016 1017 st_ops->kern_vdata = calloc(1, kern_vtype->size); 1018 if (!st_ops->kern_vdata) 1019 return -ENOMEM; 1020 1021 data = st_ops->data; 1022 kern_data_off = kern_data_member->offset / 8; 1023 kern_data = st_ops->kern_vdata + kern_data_off; 1024 1025 member = btf_members(type); 1026 for (i = 0; i < btf_vlen(type); i++, member++) { 1027 const struct btf_type *mtype, *kern_mtype; 1028 __u32 mtype_id, kern_mtype_id; 1029 void *mdata, *kern_mdata; 1030 __s64 msize, kern_msize; 1031 __u32 moff, kern_moff; 1032 __u32 kern_member_idx; 1033 const char *mname; 1034 1035 mname = btf__name_by_offset(btf, member->name_off); 1036 kern_member = find_member_by_name(kern_btf, kern_type, mname); 1037 if (!kern_member) { 1038 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n", 1039 map->name, mname); 1040 return -ENOTSUP; 1041 } 1042 1043 kern_member_idx = kern_member - btf_members(kern_type); 1044 if (btf_member_bitfield_size(type, i) || 1045 btf_member_bitfield_size(kern_type, kern_member_idx)) { 1046 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n", 1047 map->name, mname); 1048 return -ENOTSUP; 1049 } 1050 1051 moff = member->offset / 8; 1052 kern_moff = kern_member->offset / 8; 1053 1054 mdata = data + moff; 1055 kern_mdata = kern_data + kern_moff; 1056 1057 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id); 1058 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type, 1059 &kern_mtype_id); 1060 if (BTF_INFO_KIND(mtype->info) != 1061 BTF_INFO_KIND(kern_mtype->info)) { 1062 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n", 1063 map->name, mname, BTF_INFO_KIND(mtype->info), 1064 BTF_INFO_KIND(kern_mtype->info)); 1065 return -ENOTSUP; 1066 } 1067 1068 if (btf_is_ptr(mtype)) { 1069 struct bpf_program *prog; 1070 1071 prog = st_ops->progs[i]; 1072 if (!prog) 1073 continue; 1074 1075 kern_mtype = skip_mods_and_typedefs(kern_btf, 1076 kern_mtype->type, 1077 &kern_mtype_id); 1078 1079 /* mtype->type must be a func_proto which was 1080 * guaranteed in bpf_object__collect_st_ops_relos(), 1081 * so only check kern_mtype for func_proto here. 1082 */ 1083 if (!btf_is_func_proto(kern_mtype)) { 1084 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n", 1085 map->name, mname); 1086 return -ENOTSUP; 1087 } 1088 1089 prog->attach_btf_id = kern_type_id; 1090 prog->expected_attach_type = kern_member_idx; 1091 1092 st_ops->kern_func_off[i] = kern_data_off + kern_moff; 1093 1094 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n", 1095 map->name, mname, prog->name, moff, 1096 kern_moff); 1097 1098 continue; 1099 } 1100 1101 msize = btf__resolve_size(btf, mtype_id); 1102 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id); 1103 if (msize < 0 || kern_msize < 0 || msize != kern_msize) { 1104 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n", 1105 map->name, mname, (ssize_t)msize, 1106 (ssize_t)kern_msize); 1107 return -ENOTSUP; 1108 } 1109 1110 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n", 1111 map->name, mname, (unsigned int)msize, 1112 moff, kern_moff); 1113 memcpy(kern_mdata, mdata, msize); 1114 } 1115 1116 return 0; 1117 } 1118 1119 static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj) 1120 { 1121 struct bpf_map *map; 1122 size_t i; 1123 int err; 1124 1125 for (i = 0; i < obj->nr_maps; i++) { 1126 map = &obj->maps[i]; 1127 1128 if (!bpf_map__is_struct_ops(map)) 1129 continue; 1130 1131 err = bpf_map__init_kern_struct_ops(map, obj->btf, 1132 obj->btf_vmlinux); 1133 if (err) 1134 return err; 1135 } 1136 1137 return 0; 1138 } 1139 1140 static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name, 1141 int shndx, Elf_Data *data, __u32 map_flags) 1142 { 1143 const struct btf_type *type, *datasec; 1144 const struct btf_var_secinfo *vsi; 1145 struct bpf_struct_ops *st_ops; 1146 const char *tname, *var_name; 1147 __s32 type_id, datasec_id; 1148 const struct btf *btf; 1149 struct bpf_map *map; 1150 __u32 i; 1151 1152 if (shndx == -1) 1153 return 0; 1154 1155 btf = obj->btf; 1156 datasec_id = btf__find_by_name_kind(btf, sec_name, 1157 BTF_KIND_DATASEC); 1158 if (datasec_id < 0) { 1159 pr_warn("struct_ops init: DATASEC %s not found\n", 1160 sec_name); 1161 return -EINVAL; 1162 } 1163 1164 datasec = btf__type_by_id(btf, datasec_id); 1165 vsi = btf_var_secinfos(datasec); 1166 for (i = 0; i < btf_vlen(datasec); i++, vsi++) { 1167 type = btf__type_by_id(obj->btf, vsi->type); 1168 var_name = btf__name_by_offset(obj->btf, type->name_off); 1169 1170 type_id = btf__resolve_type(obj->btf, vsi->type); 1171 if (type_id < 0) { 1172 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n", 1173 vsi->type, sec_name); 1174 return -EINVAL; 1175 } 1176 1177 type = btf__type_by_id(obj->btf, type_id); 1178 tname = btf__name_by_offset(obj->btf, type->name_off); 1179 if (!tname[0]) { 1180 pr_warn("struct_ops init: anonymous type is not supported\n"); 1181 return -ENOTSUP; 1182 } 1183 if (!btf_is_struct(type)) { 1184 pr_warn("struct_ops init: %s is not a struct\n", tname); 1185 return -EINVAL; 1186 } 1187 1188 map = bpf_object__add_map(obj); 1189 if (IS_ERR(map)) 1190 return PTR_ERR(map); 1191 1192 map->sec_idx = shndx; 1193 map->sec_offset = vsi->offset; 1194 map->name = strdup(var_name); 1195 if (!map->name) 1196 return -ENOMEM; 1197 1198 map->def.type = BPF_MAP_TYPE_STRUCT_OPS; 1199 map->def.key_size = sizeof(int); 1200 map->def.value_size = type->size; 1201 map->def.max_entries = 1; 1202 map->def.map_flags = map_flags; 1203 1204 map->st_ops = calloc(1, sizeof(*map->st_ops)); 1205 if (!map->st_ops) 1206 return -ENOMEM; 1207 st_ops = map->st_ops; 1208 st_ops->data = malloc(type->size); 1209 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs)); 1210 st_ops->kern_func_off = malloc(btf_vlen(type) * 1211 sizeof(*st_ops->kern_func_off)); 1212 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off) 1213 return -ENOMEM; 1214 1215 if (vsi->offset + type->size > data->d_size) { 1216 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n", 1217 var_name, sec_name); 1218 return -EINVAL; 1219 } 1220 1221 memcpy(st_ops->data, 1222 data->d_buf + vsi->offset, 1223 type->size); 1224 st_ops->tname = tname; 1225 st_ops->type = type; 1226 st_ops->type_id = type_id; 1227 1228 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n", 1229 tname, type_id, var_name, vsi->offset); 1230 } 1231 1232 return 0; 1233 } 1234 1235 static int bpf_object_init_struct_ops(struct bpf_object *obj) 1236 { 1237 int err; 1238 1239 err = init_struct_ops_maps(obj, STRUCT_OPS_SEC, obj->efile.st_ops_shndx, 1240 obj->efile.st_ops_data, 0); 1241 err = err ?: init_struct_ops_maps(obj, STRUCT_OPS_LINK_SEC, 1242 obj->efile.st_ops_link_shndx, 1243 obj->efile.st_ops_link_data, 1244 BPF_F_LINK); 1245 return err; 1246 } 1247 1248 static struct bpf_object *bpf_object__new(const char *path, 1249 const void *obj_buf, 1250 size_t obj_buf_sz, 1251 const char *obj_name) 1252 { 1253 struct bpf_object *obj; 1254 char *end; 1255 1256 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); 1257 if (!obj) { 1258 pr_warn("alloc memory failed for %s\n", path); 1259 return ERR_PTR(-ENOMEM); 1260 } 1261 1262 strcpy(obj->path, path); 1263 if (obj_name) { 1264 libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name)); 1265 } else { 1266 /* Using basename() GNU version which doesn't modify arg. */ 1267 libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name)); 1268 end = strchr(obj->name, '.'); 1269 if (end) 1270 *end = 0; 1271 } 1272 1273 obj->efile.fd = -1; 1274 /* 1275 * Caller of this function should also call 1276 * bpf_object__elf_finish() after data collection to return 1277 * obj_buf to user. If not, we should duplicate the buffer to 1278 * avoid user freeing them before elf finish. 1279 */ 1280 obj->efile.obj_buf = obj_buf; 1281 obj->efile.obj_buf_sz = obj_buf_sz; 1282 obj->efile.btf_maps_shndx = -1; 1283 obj->efile.st_ops_shndx = -1; 1284 obj->efile.st_ops_link_shndx = -1; 1285 obj->kconfig_map_idx = -1; 1286 1287 obj->kern_version = get_kernel_version(); 1288 obj->loaded = false; 1289 1290 return obj; 1291 } 1292 1293 static void bpf_object__elf_finish(struct bpf_object *obj) 1294 { 1295 if (!obj->efile.elf) 1296 return; 1297 1298 elf_end(obj->efile.elf); 1299 obj->efile.elf = NULL; 1300 obj->efile.symbols = NULL; 1301 obj->efile.st_ops_data = NULL; 1302 obj->efile.st_ops_link_data = NULL; 1303 1304 zfree(&obj->efile.secs); 1305 obj->efile.sec_cnt = 0; 1306 zclose(obj->efile.fd); 1307 obj->efile.obj_buf = NULL; 1308 obj->efile.obj_buf_sz = 0; 1309 } 1310 1311 static int bpf_object__elf_init(struct bpf_object *obj) 1312 { 1313 Elf64_Ehdr *ehdr; 1314 int err = 0; 1315 Elf *elf; 1316 1317 if (obj->efile.elf) { 1318 pr_warn("elf: init internal error\n"); 1319 return -LIBBPF_ERRNO__LIBELF; 1320 } 1321 1322 if (obj->efile.obj_buf_sz > 0) { 1323 /* obj_buf should have been validated by bpf_object__open_mem(). */ 1324 elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz); 1325 } else { 1326 obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC); 1327 if (obj->efile.fd < 0) { 1328 char errmsg[STRERR_BUFSIZE], *cp; 1329 1330 err = -errno; 1331 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); 1332 pr_warn("elf: failed to open %s: %s\n", obj->path, cp); 1333 return err; 1334 } 1335 1336 elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL); 1337 } 1338 1339 if (!elf) { 1340 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1)); 1341 err = -LIBBPF_ERRNO__LIBELF; 1342 goto errout; 1343 } 1344 1345 obj->efile.elf = elf; 1346 1347 if (elf_kind(elf) != ELF_K_ELF) { 1348 err = -LIBBPF_ERRNO__FORMAT; 1349 pr_warn("elf: '%s' is not a proper ELF object\n", obj->path); 1350 goto errout; 1351 } 1352 1353 if (gelf_getclass(elf) != ELFCLASS64) { 1354 err = -LIBBPF_ERRNO__FORMAT; 1355 pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path); 1356 goto errout; 1357 } 1358 1359 obj->efile.ehdr = ehdr = elf64_getehdr(elf); 1360 if (!obj->efile.ehdr) { 1361 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1)); 1362 err = -LIBBPF_ERRNO__FORMAT; 1363 goto errout; 1364 } 1365 1366 if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) { 1367 pr_warn("elf: failed to get section names section index for %s: %s\n", 1368 obj->path, elf_errmsg(-1)); 1369 err = -LIBBPF_ERRNO__FORMAT; 1370 goto errout; 1371 } 1372 1373 /* ELF is corrupted/truncated, avoid calling elf_strptr. */ 1374 if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) { 1375 pr_warn("elf: failed to get section names strings from %s: %s\n", 1376 obj->path, elf_errmsg(-1)); 1377 err = -LIBBPF_ERRNO__FORMAT; 1378 goto errout; 1379 } 1380 1381 /* Old LLVM set e_machine to EM_NONE */ 1382 if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) { 1383 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path); 1384 err = -LIBBPF_ERRNO__FORMAT; 1385 goto errout; 1386 } 1387 1388 return 0; 1389 errout: 1390 bpf_object__elf_finish(obj); 1391 return err; 1392 } 1393 1394 static int bpf_object__check_endianness(struct bpf_object *obj) 1395 { 1396 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 1397 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB) 1398 return 0; 1399 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 1400 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB) 1401 return 0; 1402 #else 1403 # error "Unrecognized __BYTE_ORDER__" 1404 #endif 1405 pr_warn("elf: endianness mismatch in %s.\n", obj->path); 1406 return -LIBBPF_ERRNO__ENDIAN; 1407 } 1408 1409 static int 1410 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size) 1411 { 1412 if (!data) { 1413 pr_warn("invalid license section in %s\n", obj->path); 1414 return -LIBBPF_ERRNO__FORMAT; 1415 } 1416 /* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't 1417 * go over allowed ELF data section buffer 1418 */ 1419 libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license))); 1420 pr_debug("license of %s is %s\n", obj->path, obj->license); 1421 return 0; 1422 } 1423 1424 static int 1425 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size) 1426 { 1427 __u32 kver; 1428 1429 if (!data || size != sizeof(kver)) { 1430 pr_warn("invalid kver section in %s\n", obj->path); 1431 return -LIBBPF_ERRNO__FORMAT; 1432 } 1433 memcpy(&kver, data, sizeof(kver)); 1434 obj->kern_version = kver; 1435 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version); 1436 return 0; 1437 } 1438 1439 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) 1440 { 1441 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS || 1442 type == BPF_MAP_TYPE_HASH_OF_MAPS) 1443 return true; 1444 return false; 1445 } 1446 1447 static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size) 1448 { 1449 Elf_Data *data; 1450 Elf_Scn *scn; 1451 1452 if (!name) 1453 return -EINVAL; 1454 1455 scn = elf_sec_by_name(obj, name); 1456 data = elf_sec_data(obj, scn); 1457 if (data) { 1458 *size = data->d_size; 1459 return 0; /* found it */ 1460 } 1461 1462 return -ENOENT; 1463 } 1464 1465 static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *name) 1466 { 1467 Elf_Data *symbols = obj->efile.symbols; 1468 const char *sname; 1469 size_t si; 1470 1471 for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) { 1472 Elf64_Sym *sym = elf_sym_by_idx(obj, si); 1473 1474 if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT) 1475 continue; 1476 1477 if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL && 1478 ELF64_ST_BIND(sym->st_info) != STB_WEAK) 1479 continue; 1480 1481 sname = elf_sym_str(obj, sym->st_name); 1482 if (!sname) { 1483 pr_warn("failed to get sym name string for var %s\n", name); 1484 return ERR_PTR(-EIO); 1485 } 1486 if (strcmp(name, sname) == 0) 1487 return sym; 1488 } 1489 1490 return ERR_PTR(-ENOENT); 1491 } 1492 1493 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj) 1494 { 1495 struct bpf_map *map; 1496 int err; 1497 1498 err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap, 1499 sizeof(*obj->maps), obj->nr_maps + 1); 1500 if (err) 1501 return ERR_PTR(err); 1502 1503 map = &obj->maps[obj->nr_maps++]; 1504 map->obj = obj; 1505 map->fd = -1; 1506 map->inner_map_fd = -1; 1507 map->autocreate = true; 1508 1509 return map; 1510 } 1511 1512 static size_t bpf_map_mmap_sz(unsigned int value_sz, unsigned int max_entries) 1513 { 1514 const long page_sz = sysconf(_SC_PAGE_SIZE); 1515 size_t map_sz; 1516 1517 map_sz = (size_t)roundup(value_sz, 8) * max_entries; 1518 map_sz = roundup(map_sz, page_sz); 1519 return map_sz; 1520 } 1521 1522 static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz) 1523 { 1524 void *mmaped; 1525 1526 if (!map->mmaped) 1527 return -EINVAL; 1528 1529 if (old_sz == new_sz) 1530 return 0; 1531 1532 mmaped = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); 1533 if (mmaped == MAP_FAILED) 1534 return -errno; 1535 1536 memcpy(mmaped, map->mmaped, min(old_sz, new_sz)); 1537 munmap(map->mmaped, old_sz); 1538 map->mmaped = mmaped; 1539 return 0; 1540 } 1541 1542 static char *internal_map_name(struct bpf_object *obj, const char *real_name) 1543 { 1544 char map_name[BPF_OBJ_NAME_LEN], *p; 1545 int pfx_len, sfx_len = max((size_t)7, strlen(real_name)); 1546 1547 /* This is one of the more confusing parts of libbpf for various 1548 * reasons, some of which are historical. The original idea for naming 1549 * internal names was to include as much of BPF object name prefix as 1550 * possible, so that it can be distinguished from similar internal 1551 * maps of a different BPF object. 1552 * As an example, let's say we have bpf_object named 'my_object_name' 1553 * and internal map corresponding to '.rodata' ELF section. The final 1554 * map name advertised to user and to the kernel will be 1555 * 'my_objec.rodata', taking first 8 characters of object name and 1556 * entire 7 characters of '.rodata'. 1557 * Somewhat confusingly, if internal map ELF section name is shorter 1558 * than 7 characters, e.g., '.bss', we still reserve 7 characters 1559 * for the suffix, even though we only have 4 actual characters, and 1560 * resulting map will be called 'my_objec.bss', not even using all 15 1561 * characters allowed by the kernel. Oh well, at least the truncated 1562 * object name is somewhat consistent in this case. But if the map 1563 * name is '.kconfig', we'll still have entirety of '.kconfig' added 1564 * (8 chars) and thus will be left with only first 7 characters of the 1565 * object name ('my_obje'). Happy guessing, user, that the final map 1566 * name will be "my_obje.kconfig". 1567 * Now, with libbpf starting to support arbitrarily named .rodata.* 1568 * and .data.* data sections, it's possible that ELF section name is 1569 * longer than allowed 15 chars, so we now need to be careful to take 1570 * only up to 15 first characters of ELF name, taking no BPF object 1571 * name characters at all. So '.rodata.abracadabra' will result in 1572 * '.rodata.abracad' kernel and user-visible name. 1573 * We need to keep this convoluted logic intact for .data, .bss and 1574 * .rodata maps, but for new custom .data.custom and .rodata.custom 1575 * maps we use their ELF names as is, not prepending bpf_object name 1576 * in front. We still need to truncate them to 15 characters for the 1577 * kernel. Full name can be recovered for such maps by using DATASEC 1578 * BTF type associated with such map's value type, though. 1579 */ 1580 if (sfx_len >= BPF_OBJ_NAME_LEN) 1581 sfx_len = BPF_OBJ_NAME_LEN - 1; 1582 1583 /* if there are two or more dots in map name, it's a custom dot map */ 1584 if (strchr(real_name + 1, '.') != NULL) 1585 pfx_len = 0; 1586 else 1587 pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name)); 1588 1589 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name, 1590 sfx_len, real_name); 1591 1592 /* sanitise map name to characters allowed by kernel */ 1593 for (p = map_name; *p && p < map_name + sizeof(map_name); p++) 1594 if (!isalnum(*p) && *p != '_' && *p != '.') 1595 *p = '_'; 1596 1597 return strdup(map_name); 1598 } 1599 1600 static int 1601 map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map); 1602 1603 /* Internal BPF map is mmap()'able only if at least one of corresponding 1604 * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL 1605 * variable and it's not marked as __hidden (which turns it into, effectively, 1606 * a STATIC variable). 1607 */ 1608 static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map) 1609 { 1610 const struct btf_type *t, *vt; 1611 struct btf_var_secinfo *vsi; 1612 int i, n; 1613 1614 if (!map->btf_value_type_id) 1615 return false; 1616 1617 t = btf__type_by_id(obj->btf, map->btf_value_type_id); 1618 if (!btf_is_datasec(t)) 1619 return false; 1620 1621 vsi = btf_var_secinfos(t); 1622 for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) { 1623 vt = btf__type_by_id(obj->btf, vsi->type); 1624 if (!btf_is_var(vt)) 1625 continue; 1626 1627 if (btf_var(vt)->linkage != BTF_VAR_STATIC) 1628 return true; 1629 } 1630 1631 return false; 1632 } 1633 1634 static int 1635 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, 1636 const char *real_name, int sec_idx, void *data, size_t data_sz) 1637 { 1638 struct bpf_map_def *def; 1639 struct bpf_map *map; 1640 size_t mmap_sz; 1641 int err; 1642 1643 map = bpf_object__add_map(obj); 1644 if (IS_ERR(map)) 1645 return PTR_ERR(map); 1646 1647 map->libbpf_type = type; 1648 map->sec_idx = sec_idx; 1649 map->sec_offset = 0; 1650 map->real_name = strdup(real_name); 1651 map->name = internal_map_name(obj, real_name); 1652 if (!map->real_name || !map->name) { 1653 zfree(&map->real_name); 1654 zfree(&map->name); 1655 return -ENOMEM; 1656 } 1657 1658 def = &map->def; 1659 def->type = BPF_MAP_TYPE_ARRAY; 1660 def->key_size = sizeof(int); 1661 def->value_size = data_sz; 1662 def->max_entries = 1; 1663 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG 1664 ? BPF_F_RDONLY_PROG : 0; 1665 1666 /* failures are fine because of maps like .rodata.str1.1 */ 1667 (void) map_fill_btf_type_info(obj, map); 1668 1669 if (map_is_mmapable(obj, map)) 1670 def->map_flags |= BPF_F_MMAPABLE; 1671 1672 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n", 1673 map->name, map->sec_idx, map->sec_offset, def->map_flags); 1674 1675 mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries); 1676 map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, 1677 MAP_SHARED | MAP_ANONYMOUS, -1, 0); 1678 if (map->mmaped == MAP_FAILED) { 1679 err = -errno; 1680 map->mmaped = NULL; 1681 pr_warn("failed to alloc map '%s' content buffer: %d\n", 1682 map->name, err); 1683 zfree(&map->real_name); 1684 zfree(&map->name); 1685 return err; 1686 } 1687 1688 if (data) 1689 memcpy(map->mmaped, data, data_sz); 1690 1691 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name); 1692 return 0; 1693 } 1694 1695 static int bpf_object__init_global_data_maps(struct bpf_object *obj) 1696 { 1697 struct elf_sec_desc *sec_desc; 1698 const char *sec_name; 1699 int err = 0, sec_idx; 1700 1701 /* 1702 * Populate obj->maps with libbpf internal maps. 1703 */ 1704 for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) { 1705 sec_desc = &obj->efile.secs[sec_idx]; 1706 1707 /* Skip recognized sections with size 0. */ 1708 if (!sec_desc->data || sec_desc->data->d_size == 0) 1709 continue; 1710 1711 switch (sec_desc->sec_type) { 1712 case SEC_DATA: 1713 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); 1714 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA, 1715 sec_name, sec_idx, 1716 sec_desc->data->d_buf, 1717 sec_desc->data->d_size); 1718 break; 1719 case SEC_RODATA: 1720 obj->has_rodata = true; 1721 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); 1722 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA, 1723 sec_name, sec_idx, 1724 sec_desc->data->d_buf, 1725 sec_desc->data->d_size); 1726 break; 1727 case SEC_BSS: 1728 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); 1729 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS, 1730 sec_name, sec_idx, 1731 NULL, 1732 sec_desc->data->d_size); 1733 break; 1734 default: 1735 /* skip */ 1736 break; 1737 } 1738 if (err) 1739 return err; 1740 } 1741 return 0; 1742 } 1743 1744 1745 static struct extern_desc *find_extern_by_name(const struct bpf_object *obj, 1746 const void *name) 1747 { 1748 int i; 1749 1750 for (i = 0; i < obj->nr_extern; i++) { 1751 if (strcmp(obj->externs[i].name, name) == 0) 1752 return &obj->externs[i]; 1753 } 1754 return NULL; 1755 } 1756 1757 static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val, 1758 char value) 1759 { 1760 switch (ext->kcfg.type) { 1761 case KCFG_BOOL: 1762 if (value == 'm') { 1763 pr_warn("extern (kcfg) '%s': value '%c' implies tristate or char type\n", 1764 ext->name, value); 1765 return -EINVAL; 1766 } 1767 *(bool *)ext_val = value == 'y' ? true : false; 1768 break; 1769 case KCFG_TRISTATE: 1770 if (value == 'y') 1771 *(enum libbpf_tristate *)ext_val = TRI_YES; 1772 else if (value == 'm') 1773 *(enum libbpf_tristate *)ext_val = TRI_MODULE; 1774 else /* value == 'n' */ 1775 *(enum libbpf_tristate *)ext_val = TRI_NO; 1776 break; 1777 case KCFG_CHAR: 1778 *(char *)ext_val = value; 1779 break; 1780 case KCFG_UNKNOWN: 1781 case KCFG_INT: 1782 case KCFG_CHAR_ARR: 1783 default: 1784 pr_warn("extern (kcfg) '%s': value '%c' implies bool, tristate, or char type\n", 1785 ext->name, value); 1786 return -EINVAL; 1787 } 1788 ext->is_set = true; 1789 return 0; 1790 } 1791 1792 static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val, 1793 const char *value) 1794 { 1795 size_t len; 1796 1797 if (ext->kcfg.type != KCFG_CHAR_ARR) { 1798 pr_warn("extern (kcfg) '%s': value '%s' implies char array type\n", 1799 ext->name, value); 1800 return -EINVAL; 1801 } 1802 1803 len = strlen(value); 1804 if (value[len - 1] != '"') { 1805 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n", 1806 ext->name, value); 1807 return -EINVAL; 1808 } 1809 1810 /* strip quotes */ 1811 len -= 2; 1812 if (len >= ext->kcfg.sz) { 1813 pr_warn("extern (kcfg) '%s': long string '%s' of (%zu bytes) truncated to %d bytes\n", 1814 ext->name, value, len, ext->kcfg.sz - 1); 1815 len = ext->kcfg.sz - 1; 1816 } 1817 memcpy(ext_val, value + 1, len); 1818 ext_val[len] = '\0'; 1819 ext->is_set = true; 1820 return 0; 1821 } 1822 1823 static int parse_u64(const char *value, __u64 *res) 1824 { 1825 char *value_end; 1826 int err; 1827 1828 errno = 0; 1829 *res = strtoull(value, &value_end, 0); 1830 if (errno) { 1831 err = -errno; 1832 pr_warn("failed to parse '%s' as integer: %d\n", value, err); 1833 return err; 1834 } 1835 if (*value_end) { 1836 pr_warn("failed to parse '%s' as integer completely\n", value); 1837 return -EINVAL; 1838 } 1839 return 0; 1840 } 1841 1842 static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v) 1843 { 1844 int bit_sz = ext->kcfg.sz * 8; 1845 1846 if (ext->kcfg.sz == 8) 1847 return true; 1848 1849 /* Validate that value stored in u64 fits in integer of `ext->sz` 1850 * bytes size without any loss of information. If the target integer 1851 * is signed, we rely on the following limits of integer type of 1852 * Y bits and subsequent transformation: 1853 * 1854 * -2^(Y-1) <= X <= 2^(Y-1) - 1 1855 * 0 <= X + 2^(Y-1) <= 2^Y - 1 1856 * 0 <= X + 2^(Y-1) < 2^Y 1857 * 1858 * For unsigned target integer, check that all the (64 - Y) bits are 1859 * zero. 1860 */ 1861 if (ext->kcfg.is_signed) 1862 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz); 1863 else 1864 return (v >> bit_sz) == 0; 1865 } 1866 1867 static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val, 1868 __u64 value) 1869 { 1870 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR && 1871 ext->kcfg.type != KCFG_BOOL) { 1872 pr_warn("extern (kcfg) '%s': value '%llu' implies integer, char, or boolean type\n", 1873 ext->name, (unsigned long long)value); 1874 return -EINVAL; 1875 } 1876 if (ext->kcfg.type == KCFG_BOOL && value > 1) { 1877 pr_warn("extern (kcfg) '%s': value '%llu' isn't boolean compatible\n", 1878 ext->name, (unsigned long long)value); 1879 return -EINVAL; 1880 1881 } 1882 if (!is_kcfg_value_in_range(ext, value)) { 1883 pr_warn("extern (kcfg) '%s': value '%llu' doesn't fit in %d bytes\n", 1884 ext->name, (unsigned long long)value, ext->kcfg.sz); 1885 return -ERANGE; 1886 } 1887 switch (ext->kcfg.sz) { 1888 case 1: 1889 *(__u8 *)ext_val = value; 1890 break; 1891 case 2: 1892 *(__u16 *)ext_val = value; 1893 break; 1894 case 4: 1895 *(__u32 *)ext_val = value; 1896 break; 1897 case 8: 1898 *(__u64 *)ext_val = value; 1899 break; 1900 default: 1901 return -EINVAL; 1902 } 1903 ext->is_set = true; 1904 return 0; 1905 } 1906 1907 static int bpf_object__process_kconfig_line(struct bpf_object *obj, 1908 char *buf, void *data) 1909 { 1910 struct extern_desc *ext; 1911 char *sep, *value; 1912 int len, err = 0; 1913 void *ext_val; 1914 __u64 num; 1915 1916 if (!str_has_pfx(buf, "CONFIG_")) 1917 return 0; 1918 1919 sep = strchr(buf, '='); 1920 if (!sep) { 1921 pr_warn("failed to parse '%s': no separator\n", buf); 1922 return -EINVAL; 1923 } 1924 1925 /* Trim ending '\n' */ 1926 len = strlen(buf); 1927 if (buf[len - 1] == '\n') 1928 buf[len - 1] = '\0'; 1929 /* Split on '=' and ensure that a value is present. */ 1930 *sep = '\0'; 1931 if (!sep[1]) { 1932 *sep = '='; 1933 pr_warn("failed to parse '%s': no value\n", buf); 1934 return -EINVAL; 1935 } 1936 1937 ext = find_extern_by_name(obj, buf); 1938 if (!ext || ext->is_set) 1939 return 0; 1940 1941 ext_val = data + ext->kcfg.data_off; 1942 value = sep + 1; 1943 1944 switch (*value) { 1945 case 'y': case 'n': case 'm': 1946 err = set_kcfg_value_tri(ext, ext_val, *value); 1947 break; 1948 case '"': 1949 err = set_kcfg_value_str(ext, ext_val, value); 1950 break; 1951 default: 1952 /* assume integer */ 1953 err = parse_u64(value, &num); 1954 if (err) { 1955 pr_warn("extern (kcfg) '%s': value '%s' isn't a valid integer\n", ext->name, value); 1956 return err; 1957 } 1958 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) { 1959 pr_warn("extern (kcfg) '%s': value '%s' implies integer type\n", ext->name, value); 1960 return -EINVAL; 1961 } 1962 err = set_kcfg_value_num(ext, ext_val, num); 1963 break; 1964 } 1965 if (err) 1966 return err; 1967 pr_debug("extern (kcfg) '%s': set to %s\n", ext->name, value); 1968 return 0; 1969 } 1970 1971 static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data) 1972 { 1973 char buf[PATH_MAX]; 1974 struct utsname uts; 1975 int len, err = 0; 1976 gzFile file; 1977 1978 uname(&uts); 1979 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release); 1980 if (len < 0) 1981 return -EINVAL; 1982 else if (len >= PATH_MAX) 1983 return -ENAMETOOLONG; 1984 1985 /* gzopen also accepts uncompressed files. */ 1986 file = gzopen(buf, "re"); 1987 if (!file) 1988 file = gzopen("/proc/config.gz", "re"); 1989 1990 if (!file) { 1991 pr_warn("failed to open system Kconfig\n"); 1992 return -ENOENT; 1993 } 1994 1995 while (gzgets(file, buf, sizeof(buf))) { 1996 err = bpf_object__process_kconfig_line(obj, buf, data); 1997 if (err) { 1998 pr_warn("error parsing system Kconfig line '%s': %d\n", 1999 buf, err); 2000 goto out; 2001 } 2002 } 2003 2004 out: 2005 gzclose(file); 2006 return err; 2007 } 2008 2009 static int bpf_object__read_kconfig_mem(struct bpf_object *obj, 2010 const char *config, void *data) 2011 { 2012 char buf[PATH_MAX]; 2013 int err = 0; 2014 FILE *file; 2015 2016 file = fmemopen((void *)config, strlen(config), "r"); 2017 if (!file) { 2018 err = -errno; 2019 pr_warn("failed to open in-memory Kconfig: %d\n", err); 2020 return err; 2021 } 2022 2023 while (fgets(buf, sizeof(buf), file)) { 2024 err = bpf_object__process_kconfig_line(obj, buf, data); 2025 if (err) { 2026 pr_warn("error parsing in-memory Kconfig line '%s': %d\n", 2027 buf, err); 2028 break; 2029 } 2030 } 2031 2032 fclose(file); 2033 return err; 2034 } 2035 2036 static int bpf_object__init_kconfig_map(struct bpf_object *obj) 2037 { 2038 struct extern_desc *last_ext = NULL, *ext; 2039 size_t map_sz; 2040 int i, err; 2041 2042 for (i = 0; i < obj->nr_extern; i++) { 2043 ext = &obj->externs[i]; 2044 if (ext->type == EXT_KCFG) 2045 last_ext = ext; 2046 } 2047 2048 if (!last_ext) 2049 return 0; 2050 2051 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz; 2052 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG, 2053 ".kconfig", obj->efile.symbols_shndx, 2054 NULL, map_sz); 2055 if (err) 2056 return err; 2057 2058 obj->kconfig_map_idx = obj->nr_maps - 1; 2059 2060 return 0; 2061 } 2062 2063 const struct btf_type * 2064 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id) 2065 { 2066 const struct btf_type *t = btf__type_by_id(btf, id); 2067 2068 if (res_id) 2069 *res_id = id; 2070 2071 while (btf_is_mod(t) || btf_is_typedef(t)) { 2072 if (res_id) 2073 *res_id = t->type; 2074 t = btf__type_by_id(btf, t->type); 2075 } 2076 2077 return t; 2078 } 2079 2080 static const struct btf_type * 2081 resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id) 2082 { 2083 const struct btf_type *t; 2084 2085 t = skip_mods_and_typedefs(btf, id, NULL); 2086 if (!btf_is_ptr(t)) 2087 return NULL; 2088 2089 t = skip_mods_and_typedefs(btf, t->type, res_id); 2090 2091 return btf_is_func_proto(t) ? t : NULL; 2092 } 2093 2094 static const char *__btf_kind_str(__u16 kind) 2095 { 2096 switch (kind) { 2097 case BTF_KIND_UNKN: return "void"; 2098 case BTF_KIND_INT: return "int"; 2099 case BTF_KIND_PTR: return "ptr"; 2100 case BTF_KIND_ARRAY: return "array"; 2101 case BTF_KIND_STRUCT: return "struct"; 2102 case BTF_KIND_UNION: return "union"; 2103 case BTF_KIND_ENUM: return "enum"; 2104 case BTF_KIND_FWD: return "fwd"; 2105 case BTF_KIND_TYPEDEF: return "typedef"; 2106 case BTF_KIND_VOLATILE: return "volatile"; 2107 case BTF_KIND_CONST: return "const"; 2108 case BTF_KIND_RESTRICT: return "restrict"; 2109 case BTF_KIND_FUNC: return "func"; 2110 case BTF_KIND_FUNC_PROTO: return "func_proto"; 2111 case BTF_KIND_VAR: return "var"; 2112 case BTF_KIND_DATASEC: return "datasec"; 2113 case BTF_KIND_FLOAT: return "float"; 2114 case BTF_KIND_DECL_TAG: return "decl_tag"; 2115 case BTF_KIND_TYPE_TAG: return "type_tag"; 2116 case BTF_KIND_ENUM64: return "enum64"; 2117 default: return "unknown"; 2118 } 2119 } 2120 2121 const char *btf_kind_str(const struct btf_type *t) 2122 { 2123 return __btf_kind_str(btf_kind(t)); 2124 } 2125 2126 /* 2127 * Fetch integer attribute of BTF map definition. Such attributes are 2128 * represented using a pointer to an array, in which dimensionality of array 2129 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY]; 2130 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF 2131 * type definition, while using only sizeof(void *) space in ELF data section. 2132 */ 2133 static bool get_map_field_int(const char *map_name, const struct btf *btf, 2134 const struct btf_member *m, __u32 *res) 2135 { 2136 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL); 2137 const char *name = btf__name_by_offset(btf, m->name_off); 2138 const struct btf_array *arr_info; 2139 const struct btf_type *arr_t; 2140 2141 if (!btf_is_ptr(t)) { 2142 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n", 2143 map_name, name, btf_kind_str(t)); 2144 return false; 2145 } 2146 2147 arr_t = btf__type_by_id(btf, t->type); 2148 if (!arr_t) { 2149 pr_warn("map '%s': attr '%s': type [%u] not found.\n", 2150 map_name, name, t->type); 2151 return false; 2152 } 2153 if (!btf_is_array(arr_t)) { 2154 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n", 2155 map_name, name, btf_kind_str(arr_t)); 2156 return false; 2157 } 2158 arr_info = btf_array(arr_t); 2159 *res = arr_info->nelems; 2160 return true; 2161 } 2162 2163 static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name) 2164 { 2165 int len; 2166 2167 len = snprintf(buf, buf_sz, "%s/%s", path, name); 2168 if (len < 0) 2169 return -EINVAL; 2170 if (len >= buf_sz) 2171 return -ENAMETOOLONG; 2172 2173 return 0; 2174 } 2175 2176 static int build_map_pin_path(struct bpf_map *map, const char *path) 2177 { 2178 char buf[PATH_MAX]; 2179 int err; 2180 2181 if (!path) 2182 path = "/sys/fs/bpf"; 2183 2184 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map)); 2185 if (err) 2186 return err; 2187 2188 return bpf_map__set_pin_path(map, buf); 2189 } 2190 2191 /* should match definition in bpf_helpers.h */ 2192 enum libbpf_pin_type { 2193 LIBBPF_PIN_NONE, 2194 /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ 2195 LIBBPF_PIN_BY_NAME, 2196 }; 2197 2198 int parse_btf_map_def(const char *map_name, struct btf *btf, 2199 const struct btf_type *def_t, bool strict, 2200 struct btf_map_def *map_def, struct btf_map_def *inner_def) 2201 { 2202 const struct btf_type *t; 2203 const struct btf_member *m; 2204 bool is_inner = inner_def == NULL; 2205 int vlen, i; 2206 2207 vlen = btf_vlen(def_t); 2208 m = btf_members(def_t); 2209 for (i = 0; i < vlen; i++, m++) { 2210 const char *name = btf__name_by_offset(btf, m->name_off); 2211 2212 if (!name) { 2213 pr_warn("map '%s': invalid field #%d.\n", map_name, i); 2214 return -EINVAL; 2215 } 2216 if (strcmp(name, "type") == 0) { 2217 if (!get_map_field_int(map_name, btf, m, &map_def->map_type)) 2218 return -EINVAL; 2219 map_def->parts |= MAP_DEF_MAP_TYPE; 2220 } else if (strcmp(name, "max_entries") == 0) { 2221 if (!get_map_field_int(map_name, btf, m, &map_def->max_entries)) 2222 return -EINVAL; 2223 map_def->parts |= MAP_DEF_MAX_ENTRIES; 2224 } else if (strcmp(name, "map_flags") == 0) { 2225 if (!get_map_field_int(map_name, btf, m, &map_def->map_flags)) 2226 return -EINVAL; 2227 map_def->parts |= MAP_DEF_MAP_FLAGS; 2228 } else if (strcmp(name, "numa_node") == 0) { 2229 if (!get_map_field_int(map_name, btf, m, &map_def->numa_node)) 2230 return -EINVAL; 2231 map_def->parts |= MAP_DEF_NUMA_NODE; 2232 } else if (strcmp(name, "key_size") == 0) { 2233 __u32 sz; 2234 2235 if (!get_map_field_int(map_name, btf, m, &sz)) 2236 return -EINVAL; 2237 if (map_def->key_size && map_def->key_size != sz) { 2238 pr_warn("map '%s': conflicting key size %u != %u.\n", 2239 map_name, map_def->key_size, sz); 2240 return -EINVAL; 2241 } 2242 map_def->key_size = sz; 2243 map_def->parts |= MAP_DEF_KEY_SIZE; 2244 } else if (strcmp(name, "key") == 0) { 2245 __s64 sz; 2246 2247 t = btf__type_by_id(btf, m->type); 2248 if (!t) { 2249 pr_warn("map '%s': key type [%d] not found.\n", 2250 map_name, m->type); 2251 return -EINVAL; 2252 } 2253 if (!btf_is_ptr(t)) { 2254 pr_warn("map '%s': key spec is not PTR: %s.\n", 2255 map_name, btf_kind_str(t)); 2256 return -EINVAL; 2257 } 2258 sz = btf__resolve_size(btf, t->type); 2259 if (sz < 0) { 2260 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n", 2261 map_name, t->type, (ssize_t)sz); 2262 return sz; 2263 } 2264 if (map_def->key_size && map_def->key_size != sz) { 2265 pr_warn("map '%s': conflicting key size %u != %zd.\n", 2266 map_name, map_def->key_size, (ssize_t)sz); 2267 return -EINVAL; 2268 } 2269 map_def->key_size = sz; 2270 map_def->key_type_id = t->type; 2271 map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE; 2272 } else if (strcmp(name, "value_size") == 0) { 2273 __u32 sz; 2274 2275 if (!get_map_field_int(map_name, btf, m, &sz)) 2276 return -EINVAL; 2277 if (map_def->value_size && map_def->value_size != sz) { 2278 pr_warn("map '%s': conflicting value size %u != %u.\n", 2279 map_name, map_def->value_size, sz); 2280 return -EINVAL; 2281 } 2282 map_def->value_size = sz; 2283 map_def->parts |= MAP_DEF_VALUE_SIZE; 2284 } else if (strcmp(name, "value") == 0) { 2285 __s64 sz; 2286 2287 t = btf__type_by_id(btf, m->type); 2288 if (!t) { 2289 pr_warn("map '%s': value type [%d] not found.\n", 2290 map_name, m->type); 2291 return -EINVAL; 2292 } 2293 if (!btf_is_ptr(t)) { 2294 pr_warn("map '%s': value spec is not PTR: %s.\n", 2295 map_name, btf_kind_str(t)); 2296 return -EINVAL; 2297 } 2298 sz = btf__resolve_size(btf, t->type); 2299 if (sz < 0) { 2300 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n", 2301 map_name, t->type, (ssize_t)sz); 2302 return sz; 2303 } 2304 if (map_def->value_size && map_def->value_size != sz) { 2305 pr_warn("map '%s': conflicting value size %u != %zd.\n", 2306 map_name, map_def->value_size, (ssize_t)sz); 2307 return -EINVAL; 2308 } 2309 map_def->value_size = sz; 2310 map_def->value_type_id = t->type; 2311 map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE; 2312 } 2313 else if (strcmp(name, "values") == 0) { 2314 bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type); 2315 bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY; 2316 const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value"; 2317 char inner_map_name[128]; 2318 int err; 2319 2320 if (is_inner) { 2321 pr_warn("map '%s': multi-level inner maps not supported.\n", 2322 map_name); 2323 return -ENOTSUP; 2324 } 2325 if (i != vlen - 1) { 2326 pr_warn("map '%s': '%s' member should be last.\n", 2327 map_name, name); 2328 return -EINVAL; 2329 } 2330 if (!is_map_in_map && !is_prog_array) { 2331 pr_warn("map '%s': should be map-in-map or prog-array.\n", 2332 map_name); 2333 return -ENOTSUP; 2334 } 2335 if (map_def->value_size && map_def->value_size != 4) { 2336 pr_warn("map '%s': conflicting value size %u != 4.\n", 2337 map_name, map_def->value_size); 2338 return -EINVAL; 2339 } 2340 map_def->value_size = 4; 2341 t = btf__type_by_id(btf, m->type); 2342 if (!t) { 2343 pr_warn("map '%s': %s type [%d] not found.\n", 2344 map_name, desc, m->type); 2345 return -EINVAL; 2346 } 2347 if (!btf_is_array(t) || btf_array(t)->nelems) { 2348 pr_warn("map '%s': %s spec is not a zero-sized array.\n", 2349 map_name, desc); 2350 return -EINVAL; 2351 } 2352 t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL); 2353 if (!btf_is_ptr(t)) { 2354 pr_warn("map '%s': %s def is of unexpected kind %s.\n", 2355 map_name, desc, btf_kind_str(t)); 2356 return -EINVAL; 2357 } 2358 t = skip_mods_and_typedefs(btf, t->type, NULL); 2359 if (is_prog_array) { 2360 if (!btf_is_func_proto(t)) { 2361 pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n", 2362 map_name, btf_kind_str(t)); 2363 return -EINVAL; 2364 } 2365 continue; 2366 } 2367 if (!btf_is_struct(t)) { 2368 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", 2369 map_name, btf_kind_str(t)); 2370 return -EINVAL; 2371 } 2372 2373 snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name); 2374 err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL); 2375 if (err) 2376 return err; 2377 2378 map_def->parts |= MAP_DEF_INNER_MAP; 2379 } else if (strcmp(name, "pinning") == 0) { 2380 __u32 val; 2381 2382 if (is_inner) { 2383 pr_warn("map '%s': inner def can't be pinned.\n", map_name); 2384 return -EINVAL; 2385 } 2386 if (!get_map_field_int(map_name, btf, m, &val)) 2387 return -EINVAL; 2388 if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) { 2389 pr_warn("map '%s': invalid pinning value %u.\n", 2390 map_name, val); 2391 return -EINVAL; 2392 } 2393 map_def->pinning = val; 2394 map_def->parts |= MAP_DEF_PINNING; 2395 } else if (strcmp(name, "map_extra") == 0) { 2396 __u32 map_extra; 2397 2398 if (!get_map_field_int(map_name, btf, m, &map_extra)) 2399 return -EINVAL; 2400 map_def->map_extra = map_extra; 2401 map_def->parts |= MAP_DEF_MAP_EXTRA; 2402 } else { 2403 if (strict) { 2404 pr_warn("map '%s': unknown field '%s'.\n", map_name, name); 2405 return -ENOTSUP; 2406 } 2407 pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name); 2408 } 2409 } 2410 2411 if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) { 2412 pr_warn("map '%s': map type isn't specified.\n", map_name); 2413 return -EINVAL; 2414 } 2415 2416 return 0; 2417 } 2418 2419 static size_t adjust_ringbuf_sz(size_t sz) 2420 { 2421 __u32 page_sz = sysconf(_SC_PAGE_SIZE); 2422 __u32 mul; 2423 2424 /* if user forgot to set any size, make sure they see error */ 2425 if (sz == 0) 2426 return 0; 2427 /* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be 2428 * a power-of-2 multiple of kernel's page size. If user diligently 2429 * satisified these conditions, pass the size through. 2430 */ 2431 if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz)) 2432 return sz; 2433 2434 /* Otherwise find closest (page_sz * power_of_2) product bigger than 2435 * user-set size to satisfy both user size request and kernel 2436 * requirements and substitute correct max_entries for map creation. 2437 */ 2438 for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) { 2439 if (mul * page_sz > sz) 2440 return mul * page_sz; 2441 } 2442 2443 /* if it's impossible to satisfy the conditions (i.e., user size is 2444 * very close to UINT_MAX but is not a power-of-2 multiple of 2445 * page_size) then just return original size and let kernel reject it 2446 */ 2447 return sz; 2448 } 2449 2450 static bool map_is_ringbuf(const struct bpf_map *map) 2451 { 2452 return map->def.type == BPF_MAP_TYPE_RINGBUF || 2453 map->def.type == BPF_MAP_TYPE_USER_RINGBUF; 2454 } 2455 2456 static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def) 2457 { 2458 map->def.type = def->map_type; 2459 map->def.key_size = def->key_size; 2460 map->def.value_size = def->value_size; 2461 map->def.max_entries = def->max_entries; 2462 map->def.map_flags = def->map_flags; 2463 map->map_extra = def->map_extra; 2464 2465 map->numa_node = def->numa_node; 2466 map->btf_key_type_id = def->key_type_id; 2467 map->btf_value_type_id = def->value_type_id; 2468 2469 /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */ 2470 if (map_is_ringbuf(map)) 2471 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries); 2472 2473 if (def->parts & MAP_DEF_MAP_TYPE) 2474 pr_debug("map '%s': found type = %u.\n", map->name, def->map_type); 2475 2476 if (def->parts & MAP_DEF_KEY_TYPE) 2477 pr_debug("map '%s': found key [%u], sz = %u.\n", 2478 map->name, def->key_type_id, def->key_size); 2479 else if (def->parts & MAP_DEF_KEY_SIZE) 2480 pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size); 2481 2482 if (def->parts & MAP_DEF_VALUE_TYPE) 2483 pr_debug("map '%s': found value [%u], sz = %u.\n", 2484 map->name, def->value_type_id, def->value_size); 2485 else if (def->parts & MAP_DEF_VALUE_SIZE) 2486 pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size); 2487 2488 if (def->parts & MAP_DEF_MAX_ENTRIES) 2489 pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries); 2490 if (def->parts & MAP_DEF_MAP_FLAGS) 2491 pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags); 2492 if (def->parts & MAP_DEF_MAP_EXTRA) 2493 pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name, 2494 (unsigned long long)def->map_extra); 2495 if (def->parts & MAP_DEF_PINNING) 2496 pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning); 2497 if (def->parts & MAP_DEF_NUMA_NODE) 2498 pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node); 2499 2500 if (def->parts & MAP_DEF_INNER_MAP) 2501 pr_debug("map '%s': found inner map definition.\n", map->name); 2502 } 2503 2504 static const char *btf_var_linkage_str(__u32 linkage) 2505 { 2506 switch (linkage) { 2507 case BTF_VAR_STATIC: return "static"; 2508 case BTF_VAR_GLOBAL_ALLOCATED: return "global"; 2509 case BTF_VAR_GLOBAL_EXTERN: return "extern"; 2510 default: return "unknown"; 2511 } 2512 } 2513 2514 static int bpf_object__init_user_btf_map(struct bpf_object *obj, 2515 const struct btf_type *sec, 2516 int var_idx, int sec_idx, 2517 const Elf_Data *data, bool strict, 2518 const char *pin_root_path) 2519 { 2520 struct btf_map_def map_def = {}, inner_def = {}; 2521 const struct btf_type *var, *def; 2522 const struct btf_var_secinfo *vi; 2523 const struct btf_var *var_extra; 2524 const char *map_name; 2525 struct bpf_map *map; 2526 int err; 2527 2528 vi = btf_var_secinfos(sec) + var_idx; 2529 var = btf__type_by_id(obj->btf, vi->type); 2530 var_extra = btf_var(var); 2531 map_name = btf__name_by_offset(obj->btf, var->name_off); 2532 2533 if (map_name == NULL || map_name[0] == '\0') { 2534 pr_warn("map #%d: empty name.\n", var_idx); 2535 return -EINVAL; 2536 } 2537 if ((__u64)vi->offset + vi->size > data->d_size) { 2538 pr_warn("map '%s' BTF data is corrupted.\n", map_name); 2539 return -EINVAL; 2540 } 2541 if (!btf_is_var(var)) { 2542 pr_warn("map '%s': unexpected var kind %s.\n", 2543 map_name, btf_kind_str(var)); 2544 return -EINVAL; 2545 } 2546 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) { 2547 pr_warn("map '%s': unsupported map linkage %s.\n", 2548 map_name, btf_var_linkage_str(var_extra->linkage)); 2549 return -EOPNOTSUPP; 2550 } 2551 2552 def = skip_mods_and_typedefs(obj->btf, var->type, NULL); 2553 if (!btf_is_struct(def)) { 2554 pr_warn("map '%s': unexpected def kind %s.\n", 2555 map_name, btf_kind_str(var)); 2556 return -EINVAL; 2557 } 2558 if (def->size > vi->size) { 2559 pr_warn("map '%s': invalid def size.\n", map_name); 2560 return -EINVAL; 2561 } 2562 2563 map = bpf_object__add_map(obj); 2564 if (IS_ERR(map)) 2565 return PTR_ERR(map); 2566 map->name = strdup(map_name); 2567 if (!map->name) { 2568 pr_warn("map '%s': failed to alloc map name.\n", map_name); 2569 return -ENOMEM; 2570 } 2571 map->libbpf_type = LIBBPF_MAP_UNSPEC; 2572 map->def.type = BPF_MAP_TYPE_UNSPEC; 2573 map->sec_idx = sec_idx; 2574 map->sec_offset = vi->offset; 2575 map->btf_var_idx = var_idx; 2576 pr_debug("map '%s': at sec_idx %d, offset %zu.\n", 2577 map_name, map->sec_idx, map->sec_offset); 2578 2579 err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def); 2580 if (err) 2581 return err; 2582 2583 fill_map_from_def(map, &map_def); 2584 2585 if (map_def.pinning == LIBBPF_PIN_BY_NAME) { 2586 err = build_map_pin_path(map, pin_root_path); 2587 if (err) { 2588 pr_warn("map '%s': couldn't build pin path.\n", map->name); 2589 return err; 2590 } 2591 } 2592 2593 if (map_def.parts & MAP_DEF_INNER_MAP) { 2594 map->inner_map = calloc(1, sizeof(*map->inner_map)); 2595 if (!map->inner_map) 2596 return -ENOMEM; 2597 map->inner_map->fd = -1; 2598 map->inner_map->sec_idx = sec_idx; 2599 map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1); 2600 if (!map->inner_map->name) 2601 return -ENOMEM; 2602 sprintf(map->inner_map->name, "%s.inner", map_name); 2603 2604 fill_map_from_def(map->inner_map, &inner_def); 2605 } 2606 2607 err = map_fill_btf_type_info(obj, map); 2608 if (err) 2609 return err; 2610 2611 return 0; 2612 } 2613 2614 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict, 2615 const char *pin_root_path) 2616 { 2617 const struct btf_type *sec = NULL; 2618 int nr_types, i, vlen, err; 2619 const struct btf_type *t; 2620 const char *name; 2621 Elf_Data *data; 2622 Elf_Scn *scn; 2623 2624 if (obj->efile.btf_maps_shndx < 0) 2625 return 0; 2626 2627 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx); 2628 data = elf_sec_data(obj, scn); 2629 if (!scn || !data) { 2630 pr_warn("elf: failed to get %s map definitions for %s\n", 2631 MAPS_ELF_SEC, obj->path); 2632 return -EINVAL; 2633 } 2634 2635 nr_types = btf__type_cnt(obj->btf); 2636 for (i = 1; i < nr_types; i++) { 2637 t = btf__type_by_id(obj->btf, i); 2638 if (!btf_is_datasec(t)) 2639 continue; 2640 name = btf__name_by_offset(obj->btf, t->name_off); 2641 if (strcmp(name, MAPS_ELF_SEC) == 0) { 2642 sec = t; 2643 obj->efile.btf_maps_sec_btf_id = i; 2644 break; 2645 } 2646 } 2647 2648 if (!sec) { 2649 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC); 2650 return -ENOENT; 2651 } 2652 2653 vlen = btf_vlen(sec); 2654 for (i = 0; i < vlen; i++) { 2655 err = bpf_object__init_user_btf_map(obj, sec, i, 2656 obj->efile.btf_maps_shndx, 2657 data, strict, 2658 pin_root_path); 2659 if (err) 2660 return err; 2661 } 2662 2663 return 0; 2664 } 2665 2666 static int bpf_object__init_maps(struct bpf_object *obj, 2667 const struct bpf_object_open_opts *opts) 2668 { 2669 const char *pin_root_path; 2670 bool strict; 2671 int err = 0; 2672 2673 strict = !OPTS_GET(opts, relaxed_maps, false); 2674 pin_root_path = OPTS_GET(opts, pin_root_path, NULL); 2675 2676 err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path); 2677 err = err ?: bpf_object__init_global_data_maps(obj); 2678 err = err ?: bpf_object__init_kconfig_map(obj); 2679 err = err ?: bpf_object_init_struct_ops(obj); 2680 2681 return err; 2682 } 2683 2684 static bool section_have_execinstr(struct bpf_object *obj, int idx) 2685 { 2686 Elf64_Shdr *sh; 2687 2688 sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx)); 2689 if (!sh) 2690 return false; 2691 2692 return sh->sh_flags & SHF_EXECINSTR; 2693 } 2694 2695 static bool btf_needs_sanitization(struct bpf_object *obj) 2696 { 2697 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC); 2698 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); 2699 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); 2700 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); 2701 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); 2702 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG); 2703 bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64); 2704 2705 return !has_func || !has_datasec || !has_func_global || !has_float || 2706 !has_decl_tag || !has_type_tag || !has_enum64; 2707 } 2708 2709 static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) 2710 { 2711 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC); 2712 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); 2713 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); 2714 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); 2715 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); 2716 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG); 2717 bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64); 2718 int enum64_placeholder_id = 0; 2719 struct btf_type *t; 2720 int i, j, vlen; 2721 2722 for (i = 1; i < btf__type_cnt(btf); i++) { 2723 t = (struct btf_type *)btf__type_by_id(btf, i); 2724 2725 if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) { 2726 /* replace VAR/DECL_TAG with INT */ 2727 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0); 2728 /* 2729 * using size = 1 is the safest choice, 4 will be too 2730 * big and cause kernel BTF validation failure if 2731 * original variable took less than 4 bytes 2732 */ 2733 t->size = 1; 2734 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8); 2735 } else if (!has_datasec && btf_is_datasec(t)) { 2736 /* replace DATASEC with STRUCT */ 2737 const struct btf_var_secinfo *v = btf_var_secinfos(t); 2738 struct btf_member *m = btf_members(t); 2739 struct btf_type *vt; 2740 char *name; 2741 2742 name = (char *)btf__name_by_offset(btf, t->name_off); 2743 while (*name) { 2744 if (*name == '.') 2745 *name = '_'; 2746 name++; 2747 } 2748 2749 vlen = btf_vlen(t); 2750 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen); 2751 for (j = 0; j < vlen; j++, v++, m++) { 2752 /* order of field assignments is important */ 2753 m->offset = v->offset * 8; 2754 m->type = v->type; 2755 /* preserve variable name as member name */ 2756 vt = (void *)btf__type_by_id(btf, v->type); 2757 m->name_off = vt->name_off; 2758 } 2759 } else if (!has_func && btf_is_func_proto(t)) { 2760 /* replace FUNC_PROTO with ENUM */ 2761 vlen = btf_vlen(t); 2762 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen); 2763 t->size = sizeof(__u32); /* kernel enforced */ 2764 } else if (!has_func && btf_is_func(t)) { 2765 /* replace FUNC with TYPEDEF */ 2766 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0); 2767 } else if (!has_func_global && btf_is_func(t)) { 2768 /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */ 2769 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0); 2770 } else if (!has_float && btf_is_float(t)) { 2771 /* replace FLOAT with an equally-sized empty STRUCT; 2772 * since C compilers do not accept e.g. "float" as a 2773 * valid struct name, make it anonymous 2774 */ 2775 t->name_off = 0; 2776 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0); 2777 } else if (!has_type_tag && btf_is_type_tag(t)) { 2778 /* replace TYPE_TAG with a CONST */ 2779 t->name_off = 0; 2780 t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0); 2781 } else if (!has_enum64 && btf_is_enum(t)) { 2782 /* clear the kflag */ 2783 t->info = btf_type_info(btf_kind(t), btf_vlen(t), false); 2784 } else if (!has_enum64 && btf_is_enum64(t)) { 2785 /* replace ENUM64 with a union */ 2786 struct btf_member *m; 2787 2788 if (enum64_placeholder_id == 0) { 2789 enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0); 2790 if (enum64_placeholder_id < 0) 2791 return enum64_placeholder_id; 2792 2793 t = (struct btf_type *)btf__type_by_id(btf, i); 2794 } 2795 2796 m = btf_members(t); 2797 vlen = btf_vlen(t); 2798 t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen); 2799 for (j = 0; j < vlen; j++, m++) { 2800 m->type = enum64_placeholder_id; 2801 m->offset = 0; 2802 } 2803 } 2804 } 2805 2806 return 0; 2807 } 2808 2809 static bool libbpf_needs_btf(const struct bpf_object *obj) 2810 { 2811 return obj->efile.btf_maps_shndx >= 0 || 2812 obj->efile.st_ops_shndx >= 0 || 2813 obj->efile.st_ops_link_shndx >= 0 || 2814 obj->nr_extern > 0; 2815 } 2816 2817 static bool kernel_needs_btf(const struct bpf_object *obj) 2818 { 2819 return obj->efile.st_ops_shndx >= 0 || obj->efile.st_ops_link_shndx >= 0; 2820 } 2821 2822 static int bpf_object__init_btf(struct bpf_object *obj, 2823 Elf_Data *btf_data, 2824 Elf_Data *btf_ext_data) 2825 { 2826 int err = -ENOENT; 2827 2828 if (btf_data) { 2829 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); 2830 err = libbpf_get_error(obj->btf); 2831 if (err) { 2832 obj->btf = NULL; 2833 pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err); 2834 goto out; 2835 } 2836 /* enforce 8-byte pointers for BPF-targeted BTFs */ 2837 btf__set_pointer_size(obj->btf, 8); 2838 } 2839 if (btf_ext_data) { 2840 struct btf_ext_info *ext_segs[3]; 2841 int seg_num, sec_num; 2842 2843 if (!obj->btf) { 2844 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n", 2845 BTF_EXT_ELF_SEC, BTF_ELF_SEC); 2846 goto out; 2847 } 2848 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); 2849 err = libbpf_get_error(obj->btf_ext); 2850 if (err) { 2851 pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n", 2852 BTF_EXT_ELF_SEC, err); 2853 obj->btf_ext = NULL; 2854 goto out; 2855 } 2856 2857 /* setup .BTF.ext to ELF section mapping */ 2858 ext_segs[0] = &obj->btf_ext->func_info; 2859 ext_segs[1] = &obj->btf_ext->line_info; 2860 ext_segs[2] = &obj->btf_ext->core_relo_info; 2861 for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) { 2862 struct btf_ext_info *seg = ext_segs[seg_num]; 2863 const struct btf_ext_info_sec *sec; 2864 const char *sec_name; 2865 Elf_Scn *scn; 2866 2867 if (seg->sec_cnt == 0) 2868 continue; 2869 2870 seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs)); 2871 if (!seg->sec_idxs) { 2872 err = -ENOMEM; 2873 goto out; 2874 } 2875 2876 sec_num = 0; 2877 for_each_btf_ext_sec(seg, sec) { 2878 /* preventively increment index to avoid doing 2879 * this before every continue below 2880 */ 2881 sec_num++; 2882 2883 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); 2884 if (str_is_empty(sec_name)) 2885 continue; 2886 scn = elf_sec_by_name(obj, sec_name); 2887 if (!scn) 2888 continue; 2889 2890 seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn); 2891 } 2892 } 2893 } 2894 out: 2895 if (err && libbpf_needs_btf(obj)) { 2896 pr_warn("BTF is required, but is missing or corrupted.\n"); 2897 return err; 2898 } 2899 return 0; 2900 } 2901 2902 static int compare_vsi_off(const void *_a, const void *_b) 2903 { 2904 const struct btf_var_secinfo *a = _a; 2905 const struct btf_var_secinfo *b = _b; 2906 2907 return a->offset - b->offset; 2908 } 2909 2910 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, 2911 struct btf_type *t) 2912 { 2913 __u32 size = 0, i, vars = btf_vlen(t); 2914 const char *sec_name = btf__name_by_offset(btf, t->name_off); 2915 struct btf_var_secinfo *vsi; 2916 bool fixup_offsets = false; 2917 int err; 2918 2919 if (!sec_name) { 2920 pr_debug("No name found in string section for DATASEC kind.\n"); 2921 return -ENOENT; 2922 } 2923 2924 /* Extern-backing datasecs (.ksyms, .kconfig) have their size and 2925 * variable offsets set at the previous step. Further, not every 2926 * extern BTF VAR has corresponding ELF symbol preserved, so we skip 2927 * all fixups altogether for such sections and go straight to sorting 2928 * VARs within their DATASEC. 2929 */ 2930 if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0) 2931 goto sort_vars; 2932 2933 /* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to 2934 * fix this up. But BPF static linker already fixes this up and fills 2935 * all the sizes and offsets during static linking. So this step has 2936 * to be optional. But the STV_HIDDEN handling is non-optional for any 2937 * non-extern DATASEC, so the variable fixup loop below handles both 2938 * functions at the same time, paying the cost of BTF VAR <-> ELF 2939 * symbol matching just once. 2940 */ 2941 if (t->size == 0) { 2942 err = find_elf_sec_sz(obj, sec_name, &size); 2943 if (err || !size) { 2944 pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n", 2945 sec_name, size, err); 2946 return -ENOENT; 2947 } 2948 2949 t->size = size; 2950 fixup_offsets = true; 2951 } 2952 2953 for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) { 2954 const struct btf_type *t_var; 2955 struct btf_var *var; 2956 const char *var_name; 2957 Elf64_Sym *sym; 2958 2959 t_var = btf__type_by_id(btf, vsi->type); 2960 if (!t_var || !btf_is_var(t_var)) { 2961 pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name); 2962 return -EINVAL; 2963 } 2964 2965 var = btf_var(t_var); 2966 if (var->linkage == BTF_VAR_STATIC || var->linkage == BTF_VAR_GLOBAL_EXTERN) 2967 continue; 2968 2969 var_name = btf__name_by_offset(btf, t_var->name_off); 2970 if (!var_name) { 2971 pr_debug("sec '%s': failed to find name of DATASEC's member #%d\n", 2972 sec_name, i); 2973 return -ENOENT; 2974 } 2975 2976 sym = find_elf_var_sym(obj, var_name); 2977 if (IS_ERR(sym)) { 2978 pr_debug("sec '%s': failed to find ELF symbol for VAR '%s'\n", 2979 sec_name, var_name); 2980 return -ENOENT; 2981 } 2982 2983 if (fixup_offsets) 2984 vsi->offset = sym->st_value; 2985 2986 /* if variable is a global/weak symbol, but has restricted 2987 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR 2988 * as static. This follows similar logic for functions (BPF 2989 * subprogs) and influences libbpf's further decisions about 2990 * whether to make global data BPF array maps as 2991 * BPF_F_MMAPABLE. 2992 */ 2993 if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN 2994 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL) 2995 var->linkage = BTF_VAR_STATIC; 2996 } 2997 2998 sort_vars: 2999 qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off); 3000 return 0; 3001 } 3002 3003 static int bpf_object_fixup_btf(struct bpf_object *obj) 3004 { 3005 int i, n, err = 0; 3006 3007 if (!obj->btf) 3008 return 0; 3009 3010 n = btf__type_cnt(obj->btf); 3011 for (i = 1; i < n; i++) { 3012 struct btf_type *t = btf_type_by_id(obj->btf, i); 3013 3014 /* Loader needs to fix up some of the things compiler 3015 * couldn't get its hands on while emitting BTF. This 3016 * is section size and global variable offset. We use 3017 * the info from the ELF itself for this purpose. 3018 */ 3019 if (btf_is_datasec(t)) { 3020 err = btf_fixup_datasec(obj, obj->btf, t); 3021 if (err) 3022 return err; 3023 } 3024 } 3025 3026 return 0; 3027 } 3028 3029 static bool prog_needs_vmlinux_btf(struct bpf_program *prog) 3030 { 3031 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || 3032 prog->type == BPF_PROG_TYPE_LSM) 3033 return true; 3034 3035 /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs 3036 * also need vmlinux BTF 3037 */ 3038 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd) 3039 return true; 3040 3041 return false; 3042 } 3043 3044 static bool obj_needs_vmlinux_btf(const struct bpf_object *obj) 3045 { 3046 struct bpf_program *prog; 3047 int i; 3048 3049 /* CO-RE relocations need kernel BTF, only when btf_custom_path 3050 * is not specified 3051 */ 3052 if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path) 3053 return true; 3054 3055 /* Support for typed ksyms needs kernel BTF */ 3056 for (i = 0; i < obj->nr_extern; i++) { 3057 const struct extern_desc *ext; 3058 3059 ext = &obj->externs[i]; 3060 if (ext->type == EXT_KSYM && ext->ksym.type_id) 3061 return true; 3062 } 3063 3064 bpf_object__for_each_program(prog, obj) { 3065 if (!prog->autoload) 3066 continue; 3067 if (prog_needs_vmlinux_btf(prog)) 3068 return true; 3069 } 3070 3071 return false; 3072 } 3073 3074 static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force) 3075 { 3076 int err; 3077 3078 /* btf_vmlinux could be loaded earlier */ 3079 if (obj->btf_vmlinux || obj->gen_loader) 3080 return 0; 3081 3082 if (!force && !obj_needs_vmlinux_btf(obj)) 3083 return 0; 3084 3085 obj->btf_vmlinux = btf__load_vmlinux_btf(); 3086 err = libbpf_get_error(obj->btf_vmlinux); 3087 if (err) { 3088 pr_warn("Error loading vmlinux BTF: %d\n", err); 3089 obj->btf_vmlinux = NULL; 3090 return err; 3091 } 3092 return 0; 3093 } 3094 3095 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) 3096 { 3097 struct btf *kern_btf = obj->btf; 3098 bool btf_mandatory, sanitize; 3099 int i, err = 0; 3100 3101 if (!obj->btf) 3102 return 0; 3103 3104 if (!kernel_supports(obj, FEAT_BTF)) { 3105 if (kernel_needs_btf(obj)) { 3106 err = -EOPNOTSUPP; 3107 goto report; 3108 } 3109 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n"); 3110 return 0; 3111 } 3112 3113 /* Even though some subprogs are global/weak, user might prefer more 3114 * permissive BPF verification process that BPF verifier performs for 3115 * static functions, taking into account more context from the caller 3116 * functions. In such case, they need to mark such subprogs with 3117 * __attribute__((visibility("hidden"))) and libbpf will adjust 3118 * corresponding FUNC BTF type to be marked as static and trigger more 3119 * involved BPF verification process. 3120 */ 3121 for (i = 0; i < obj->nr_programs; i++) { 3122 struct bpf_program *prog = &obj->programs[i]; 3123 struct btf_type *t; 3124 const char *name; 3125 int j, n; 3126 3127 if (!prog->mark_btf_static || !prog_is_subprog(obj, prog)) 3128 continue; 3129 3130 n = btf__type_cnt(obj->btf); 3131 for (j = 1; j < n; j++) { 3132 t = btf_type_by_id(obj->btf, j); 3133 if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) 3134 continue; 3135 3136 name = btf__str_by_offset(obj->btf, t->name_off); 3137 if (strcmp(name, prog->name) != 0) 3138 continue; 3139 3140 t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0); 3141 break; 3142 } 3143 } 3144 3145 sanitize = btf_needs_sanitization(obj); 3146 if (sanitize) { 3147 const void *raw_data; 3148 __u32 sz; 3149 3150 /* clone BTF to sanitize a copy and leave the original intact */ 3151 raw_data = btf__raw_data(obj->btf, &sz); 3152 kern_btf = btf__new(raw_data, sz); 3153 err = libbpf_get_error(kern_btf); 3154 if (err) 3155 return err; 3156 3157 /* enforce 8-byte pointers for BPF-targeted BTFs */ 3158 btf__set_pointer_size(obj->btf, 8); 3159 err = bpf_object__sanitize_btf(obj, kern_btf); 3160 if (err) 3161 return err; 3162 } 3163 3164 if (obj->gen_loader) { 3165 __u32 raw_size = 0; 3166 const void *raw_data = btf__raw_data(kern_btf, &raw_size); 3167 3168 if (!raw_data) 3169 return -ENOMEM; 3170 bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size); 3171 /* Pretend to have valid FD to pass various fd >= 0 checks. 3172 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually. 3173 */ 3174 btf__set_fd(kern_btf, 0); 3175 } else { 3176 /* currently BPF_BTF_LOAD only supports log_level 1 */ 3177 err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size, 3178 obj->log_level ? 1 : 0); 3179 } 3180 if (sanitize) { 3181 if (!err) { 3182 /* move fd to libbpf's BTF */ 3183 btf__set_fd(obj->btf, btf__fd(kern_btf)); 3184 btf__set_fd(kern_btf, -1); 3185 } 3186 btf__free(kern_btf); 3187 } 3188 report: 3189 if (err) { 3190 btf_mandatory = kernel_needs_btf(obj); 3191 pr_warn("Error loading .BTF into kernel: %d. %s\n", err, 3192 btf_mandatory ? "BTF is mandatory, can't proceed." 3193 : "BTF is optional, ignoring."); 3194 if (!btf_mandatory) 3195 err = 0; 3196 } 3197 return err; 3198 } 3199 3200 static const char *elf_sym_str(const struct bpf_object *obj, size_t off) 3201 { 3202 const char *name; 3203 3204 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off); 3205 if (!name) { 3206 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", 3207 off, obj->path, elf_errmsg(-1)); 3208 return NULL; 3209 } 3210 3211 return name; 3212 } 3213 3214 static const char *elf_sec_str(const struct bpf_object *obj, size_t off) 3215 { 3216 const char *name; 3217 3218 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off); 3219 if (!name) { 3220 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", 3221 off, obj->path, elf_errmsg(-1)); 3222 return NULL; 3223 } 3224 3225 return name; 3226 } 3227 3228 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx) 3229 { 3230 Elf_Scn *scn; 3231 3232 scn = elf_getscn(obj->efile.elf, idx); 3233 if (!scn) { 3234 pr_warn("elf: failed to get section(%zu) from %s: %s\n", 3235 idx, obj->path, elf_errmsg(-1)); 3236 return NULL; 3237 } 3238 return scn; 3239 } 3240 3241 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name) 3242 { 3243 Elf_Scn *scn = NULL; 3244 Elf *elf = obj->efile.elf; 3245 const char *sec_name; 3246 3247 while ((scn = elf_nextscn(elf, scn)) != NULL) { 3248 sec_name = elf_sec_name(obj, scn); 3249 if (!sec_name) 3250 return NULL; 3251 3252 if (strcmp(sec_name, name) != 0) 3253 continue; 3254 3255 return scn; 3256 } 3257 return NULL; 3258 } 3259 3260 static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn) 3261 { 3262 Elf64_Shdr *shdr; 3263 3264 if (!scn) 3265 return NULL; 3266 3267 shdr = elf64_getshdr(scn); 3268 if (!shdr) { 3269 pr_warn("elf: failed to get section(%zu) header from %s: %s\n", 3270 elf_ndxscn(scn), obj->path, elf_errmsg(-1)); 3271 return NULL; 3272 } 3273 3274 return shdr; 3275 } 3276 3277 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn) 3278 { 3279 const char *name; 3280 Elf64_Shdr *sh; 3281 3282 if (!scn) 3283 return NULL; 3284 3285 sh = elf_sec_hdr(obj, scn); 3286 if (!sh) 3287 return NULL; 3288 3289 name = elf_sec_str(obj, sh->sh_name); 3290 if (!name) { 3291 pr_warn("elf: failed to get section(%zu) name from %s: %s\n", 3292 elf_ndxscn(scn), obj->path, elf_errmsg(-1)); 3293 return NULL; 3294 } 3295 3296 return name; 3297 } 3298 3299 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn) 3300 { 3301 Elf_Data *data; 3302 3303 if (!scn) 3304 return NULL; 3305 3306 data = elf_getdata(scn, 0); 3307 if (!data) { 3308 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n", 3309 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>", 3310 obj->path, elf_errmsg(-1)); 3311 return NULL; 3312 } 3313 3314 return data; 3315 } 3316 3317 static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx) 3318 { 3319 if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym)) 3320 return NULL; 3321 3322 return (Elf64_Sym *)obj->efile.symbols->d_buf + idx; 3323 } 3324 3325 static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx) 3326 { 3327 if (idx >= data->d_size / sizeof(Elf64_Rel)) 3328 return NULL; 3329 3330 return (Elf64_Rel *)data->d_buf + idx; 3331 } 3332 3333 static bool is_sec_name_dwarf(const char *name) 3334 { 3335 /* approximation, but the actual list is too long */ 3336 return str_has_pfx(name, ".debug_"); 3337 } 3338 3339 static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name) 3340 { 3341 /* no special handling of .strtab */ 3342 if (hdr->sh_type == SHT_STRTAB) 3343 return true; 3344 3345 /* ignore .llvm_addrsig section as well */ 3346 if (hdr->sh_type == SHT_LLVM_ADDRSIG) 3347 return true; 3348 3349 /* no subprograms will lead to an empty .text section, ignore it */ 3350 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 && 3351 strcmp(name, ".text") == 0) 3352 return true; 3353 3354 /* DWARF sections */ 3355 if (is_sec_name_dwarf(name)) 3356 return true; 3357 3358 if (str_has_pfx(name, ".rel")) { 3359 name += sizeof(".rel") - 1; 3360 /* DWARF section relocations */ 3361 if (is_sec_name_dwarf(name)) 3362 return true; 3363 3364 /* .BTF and .BTF.ext don't need relocations */ 3365 if (strcmp(name, BTF_ELF_SEC) == 0 || 3366 strcmp(name, BTF_EXT_ELF_SEC) == 0) 3367 return true; 3368 } 3369 3370 return false; 3371 } 3372 3373 static int cmp_progs(const void *_a, const void *_b) 3374 { 3375 const struct bpf_program *a = _a; 3376 const struct bpf_program *b = _b; 3377 3378 if (a->sec_idx != b->sec_idx) 3379 return a->sec_idx < b->sec_idx ? -1 : 1; 3380 3381 /* sec_insn_off can't be the same within the section */ 3382 return a->sec_insn_off < b->sec_insn_off ? -1 : 1; 3383 } 3384 3385 static int bpf_object__elf_collect(struct bpf_object *obj) 3386 { 3387 struct elf_sec_desc *sec_desc; 3388 Elf *elf = obj->efile.elf; 3389 Elf_Data *btf_ext_data = NULL; 3390 Elf_Data *btf_data = NULL; 3391 int idx = 0, err = 0; 3392 const char *name; 3393 Elf_Data *data; 3394 Elf_Scn *scn; 3395 Elf64_Shdr *sh; 3396 3397 /* ELF section indices are 0-based, but sec #0 is special "invalid" 3398 * section. Since section count retrieved by elf_getshdrnum() does 3399 * include sec #0, it is already the necessary size of an array to keep 3400 * all the sections. 3401 */ 3402 if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) { 3403 pr_warn("elf: failed to get the number of sections for %s: %s\n", 3404 obj->path, elf_errmsg(-1)); 3405 return -LIBBPF_ERRNO__FORMAT; 3406 } 3407 obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs)); 3408 if (!obj->efile.secs) 3409 return -ENOMEM; 3410 3411 /* a bunch of ELF parsing functionality depends on processing symbols, 3412 * so do the first pass and find the symbol table 3413 */ 3414 scn = NULL; 3415 while ((scn = elf_nextscn(elf, scn)) != NULL) { 3416 sh = elf_sec_hdr(obj, scn); 3417 if (!sh) 3418 return -LIBBPF_ERRNO__FORMAT; 3419 3420 if (sh->sh_type == SHT_SYMTAB) { 3421 if (obj->efile.symbols) { 3422 pr_warn("elf: multiple symbol tables in %s\n", obj->path); 3423 return -LIBBPF_ERRNO__FORMAT; 3424 } 3425 3426 data = elf_sec_data(obj, scn); 3427 if (!data) 3428 return -LIBBPF_ERRNO__FORMAT; 3429 3430 idx = elf_ndxscn(scn); 3431 3432 obj->efile.symbols = data; 3433 obj->efile.symbols_shndx = idx; 3434 obj->efile.strtabidx = sh->sh_link; 3435 } 3436 } 3437 3438 if (!obj->efile.symbols) { 3439 pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n", 3440 obj->path); 3441 return -ENOENT; 3442 } 3443 3444 scn = NULL; 3445 while ((scn = elf_nextscn(elf, scn)) != NULL) { 3446 idx = elf_ndxscn(scn); 3447 sec_desc = &obj->efile.secs[idx]; 3448 3449 sh = elf_sec_hdr(obj, scn); 3450 if (!sh) 3451 return -LIBBPF_ERRNO__FORMAT; 3452 3453 name = elf_sec_str(obj, sh->sh_name); 3454 if (!name) 3455 return -LIBBPF_ERRNO__FORMAT; 3456 3457 if (ignore_elf_section(sh, name)) 3458 continue; 3459 3460 data = elf_sec_data(obj, scn); 3461 if (!data) 3462 return -LIBBPF_ERRNO__FORMAT; 3463 3464 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", 3465 idx, name, (unsigned long)data->d_size, 3466 (int)sh->sh_link, (unsigned long)sh->sh_flags, 3467 (int)sh->sh_type); 3468 3469 if (strcmp(name, "license") == 0) { 3470 err = bpf_object__init_license(obj, data->d_buf, data->d_size); 3471 if (err) 3472 return err; 3473 } else if (strcmp(name, "version") == 0) { 3474 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size); 3475 if (err) 3476 return err; 3477 } else if (strcmp(name, "maps") == 0) { 3478 pr_warn("elf: legacy map definitions in 'maps' section are not supported by libbpf v1.0+\n"); 3479 return -ENOTSUP; 3480 } else if (strcmp(name, MAPS_ELF_SEC) == 0) { 3481 obj->efile.btf_maps_shndx = idx; 3482 } else if (strcmp(name, BTF_ELF_SEC) == 0) { 3483 if (sh->sh_type != SHT_PROGBITS) 3484 return -LIBBPF_ERRNO__FORMAT; 3485 btf_data = data; 3486 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { 3487 if (sh->sh_type != SHT_PROGBITS) 3488 return -LIBBPF_ERRNO__FORMAT; 3489 btf_ext_data = data; 3490 } else if (sh->sh_type == SHT_SYMTAB) { 3491 /* already processed during the first pass above */ 3492 } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) { 3493 if (sh->sh_flags & SHF_EXECINSTR) { 3494 if (strcmp(name, ".text") == 0) 3495 obj->efile.text_shndx = idx; 3496 err = bpf_object__add_programs(obj, data, name, idx); 3497 if (err) 3498 return err; 3499 } else if (strcmp(name, DATA_SEC) == 0 || 3500 str_has_pfx(name, DATA_SEC ".")) { 3501 sec_desc->sec_type = SEC_DATA; 3502 sec_desc->shdr = sh; 3503 sec_desc->data = data; 3504 } else if (strcmp(name, RODATA_SEC) == 0 || 3505 str_has_pfx(name, RODATA_SEC ".")) { 3506 sec_desc->sec_type = SEC_RODATA; 3507 sec_desc->shdr = sh; 3508 sec_desc->data = data; 3509 } else if (strcmp(name, STRUCT_OPS_SEC) == 0) { 3510 obj->efile.st_ops_data = data; 3511 obj->efile.st_ops_shndx = idx; 3512 } else if (strcmp(name, STRUCT_OPS_LINK_SEC) == 0) { 3513 obj->efile.st_ops_link_data = data; 3514 obj->efile.st_ops_link_shndx = idx; 3515 } else { 3516 pr_info("elf: skipping unrecognized data section(%d) %s\n", 3517 idx, name); 3518 } 3519 } else if (sh->sh_type == SHT_REL) { 3520 int targ_sec_idx = sh->sh_info; /* points to other section */ 3521 3522 if (sh->sh_entsize != sizeof(Elf64_Rel) || 3523 targ_sec_idx >= obj->efile.sec_cnt) 3524 return -LIBBPF_ERRNO__FORMAT; 3525 3526 /* Only do relo for section with exec instructions */ 3527 if (!section_have_execinstr(obj, targ_sec_idx) && 3528 strcmp(name, ".rel" STRUCT_OPS_SEC) && 3529 strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) && 3530 strcmp(name, ".rel" MAPS_ELF_SEC)) { 3531 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n", 3532 idx, name, targ_sec_idx, 3533 elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>"); 3534 continue; 3535 } 3536 3537 sec_desc->sec_type = SEC_RELO; 3538 sec_desc->shdr = sh; 3539 sec_desc->data = data; 3540 } else if (sh->sh_type == SHT_NOBITS && (strcmp(name, BSS_SEC) == 0 || 3541 str_has_pfx(name, BSS_SEC "."))) { 3542 sec_desc->sec_type = SEC_BSS; 3543 sec_desc->shdr = sh; 3544 sec_desc->data = data; 3545 } else { 3546 pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name, 3547 (size_t)sh->sh_size); 3548 } 3549 } 3550 3551 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) { 3552 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path); 3553 return -LIBBPF_ERRNO__FORMAT; 3554 } 3555 3556 /* sort BPF programs by section name and in-section instruction offset 3557 * for faster search 3558 */ 3559 if (obj->nr_programs) 3560 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs); 3561 3562 return bpf_object__init_btf(obj, btf_data, btf_ext_data); 3563 } 3564 3565 static bool sym_is_extern(const Elf64_Sym *sym) 3566 { 3567 int bind = ELF64_ST_BIND(sym->st_info); 3568 /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */ 3569 return sym->st_shndx == SHN_UNDEF && 3570 (bind == STB_GLOBAL || bind == STB_WEAK) && 3571 ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE; 3572 } 3573 3574 static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx) 3575 { 3576 int bind = ELF64_ST_BIND(sym->st_info); 3577 int type = ELF64_ST_TYPE(sym->st_info); 3578 3579 /* in .text section */ 3580 if (sym->st_shndx != text_shndx) 3581 return false; 3582 3583 /* local function */ 3584 if (bind == STB_LOCAL && type == STT_SECTION) 3585 return true; 3586 3587 /* global function */ 3588 return bind == STB_GLOBAL && type == STT_FUNC; 3589 } 3590 3591 static int find_extern_btf_id(const struct btf *btf, const char *ext_name) 3592 { 3593 const struct btf_type *t; 3594 const char *tname; 3595 int i, n; 3596 3597 if (!btf) 3598 return -ESRCH; 3599 3600 n = btf__type_cnt(btf); 3601 for (i = 1; i < n; i++) { 3602 t = btf__type_by_id(btf, i); 3603 3604 if (!btf_is_var(t) && !btf_is_func(t)) 3605 continue; 3606 3607 tname = btf__name_by_offset(btf, t->name_off); 3608 if (strcmp(tname, ext_name)) 3609 continue; 3610 3611 if (btf_is_var(t) && 3612 btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN) 3613 return -EINVAL; 3614 3615 if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN) 3616 return -EINVAL; 3617 3618 return i; 3619 } 3620 3621 return -ENOENT; 3622 } 3623 3624 static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) { 3625 const struct btf_var_secinfo *vs; 3626 const struct btf_type *t; 3627 int i, j, n; 3628 3629 if (!btf) 3630 return -ESRCH; 3631 3632 n = btf__type_cnt(btf); 3633 for (i = 1; i < n; i++) { 3634 t = btf__type_by_id(btf, i); 3635 3636 if (!btf_is_datasec(t)) 3637 continue; 3638 3639 vs = btf_var_secinfos(t); 3640 for (j = 0; j < btf_vlen(t); j++, vs++) { 3641 if (vs->type == ext_btf_id) 3642 return i; 3643 } 3644 } 3645 3646 return -ENOENT; 3647 } 3648 3649 static enum kcfg_type find_kcfg_type(const struct btf *btf, int id, 3650 bool *is_signed) 3651 { 3652 const struct btf_type *t; 3653 const char *name; 3654 3655 t = skip_mods_and_typedefs(btf, id, NULL); 3656 name = btf__name_by_offset(btf, t->name_off); 3657 3658 if (is_signed) 3659 *is_signed = false; 3660 switch (btf_kind(t)) { 3661 case BTF_KIND_INT: { 3662 int enc = btf_int_encoding(t); 3663 3664 if (enc & BTF_INT_BOOL) 3665 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN; 3666 if (is_signed) 3667 *is_signed = enc & BTF_INT_SIGNED; 3668 if (t->size == 1) 3669 return KCFG_CHAR; 3670 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1))) 3671 return KCFG_UNKNOWN; 3672 return KCFG_INT; 3673 } 3674 case BTF_KIND_ENUM: 3675 if (t->size != 4) 3676 return KCFG_UNKNOWN; 3677 if (strcmp(name, "libbpf_tristate")) 3678 return KCFG_UNKNOWN; 3679 return KCFG_TRISTATE; 3680 case BTF_KIND_ENUM64: 3681 if (strcmp(name, "libbpf_tristate")) 3682 return KCFG_UNKNOWN; 3683 return KCFG_TRISTATE; 3684 case BTF_KIND_ARRAY: 3685 if (btf_array(t)->nelems == 0) 3686 return KCFG_UNKNOWN; 3687 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR) 3688 return KCFG_UNKNOWN; 3689 return KCFG_CHAR_ARR; 3690 default: 3691 return KCFG_UNKNOWN; 3692 } 3693 } 3694 3695 static int cmp_externs(const void *_a, const void *_b) 3696 { 3697 const struct extern_desc *a = _a; 3698 const struct extern_desc *b = _b; 3699 3700 if (a->type != b->type) 3701 return a->type < b->type ? -1 : 1; 3702 3703 if (a->type == EXT_KCFG) { 3704 /* descending order by alignment requirements */ 3705 if (a->kcfg.align != b->kcfg.align) 3706 return a->kcfg.align > b->kcfg.align ? -1 : 1; 3707 /* ascending order by size, within same alignment class */ 3708 if (a->kcfg.sz != b->kcfg.sz) 3709 return a->kcfg.sz < b->kcfg.sz ? -1 : 1; 3710 } 3711 3712 /* resolve ties by name */ 3713 return strcmp(a->name, b->name); 3714 } 3715 3716 static int find_int_btf_id(const struct btf *btf) 3717 { 3718 const struct btf_type *t; 3719 int i, n; 3720 3721 n = btf__type_cnt(btf); 3722 for (i = 1; i < n; i++) { 3723 t = btf__type_by_id(btf, i); 3724 3725 if (btf_is_int(t) && btf_int_bits(t) == 32) 3726 return i; 3727 } 3728 3729 return 0; 3730 } 3731 3732 static int add_dummy_ksym_var(struct btf *btf) 3733 { 3734 int i, int_btf_id, sec_btf_id, dummy_var_btf_id; 3735 const struct btf_var_secinfo *vs; 3736 const struct btf_type *sec; 3737 3738 if (!btf) 3739 return 0; 3740 3741 sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC, 3742 BTF_KIND_DATASEC); 3743 if (sec_btf_id < 0) 3744 return 0; 3745 3746 sec = btf__type_by_id(btf, sec_btf_id); 3747 vs = btf_var_secinfos(sec); 3748 for (i = 0; i < btf_vlen(sec); i++, vs++) { 3749 const struct btf_type *vt; 3750 3751 vt = btf__type_by_id(btf, vs->type); 3752 if (btf_is_func(vt)) 3753 break; 3754 } 3755 3756 /* No func in ksyms sec. No need to add dummy var. */ 3757 if (i == btf_vlen(sec)) 3758 return 0; 3759 3760 int_btf_id = find_int_btf_id(btf); 3761 dummy_var_btf_id = btf__add_var(btf, 3762 "dummy_ksym", 3763 BTF_VAR_GLOBAL_ALLOCATED, 3764 int_btf_id); 3765 if (dummy_var_btf_id < 0) 3766 pr_warn("cannot create a dummy_ksym var\n"); 3767 3768 return dummy_var_btf_id; 3769 } 3770 3771 static int bpf_object__collect_externs(struct bpf_object *obj) 3772 { 3773 struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL; 3774 const struct btf_type *t; 3775 struct extern_desc *ext; 3776 int i, n, off, dummy_var_btf_id; 3777 const char *ext_name, *sec_name; 3778 size_t ext_essent_len; 3779 Elf_Scn *scn; 3780 Elf64_Shdr *sh; 3781 3782 if (!obj->efile.symbols) 3783 return 0; 3784 3785 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx); 3786 sh = elf_sec_hdr(obj, scn); 3787 if (!sh || sh->sh_entsize != sizeof(Elf64_Sym)) 3788 return -LIBBPF_ERRNO__FORMAT; 3789 3790 dummy_var_btf_id = add_dummy_ksym_var(obj->btf); 3791 if (dummy_var_btf_id < 0) 3792 return dummy_var_btf_id; 3793 3794 n = sh->sh_size / sh->sh_entsize; 3795 pr_debug("looking for externs among %d symbols...\n", n); 3796 3797 for (i = 0; i < n; i++) { 3798 Elf64_Sym *sym = elf_sym_by_idx(obj, i); 3799 3800 if (!sym) 3801 return -LIBBPF_ERRNO__FORMAT; 3802 if (!sym_is_extern(sym)) 3803 continue; 3804 ext_name = elf_sym_str(obj, sym->st_name); 3805 if (!ext_name || !ext_name[0]) 3806 continue; 3807 3808 ext = obj->externs; 3809 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext)); 3810 if (!ext) 3811 return -ENOMEM; 3812 obj->externs = ext; 3813 ext = &ext[obj->nr_extern]; 3814 memset(ext, 0, sizeof(*ext)); 3815 obj->nr_extern++; 3816 3817 ext->btf_id = find_extern_btf_id(obj->btf, ext_name); 3818 if (ext->btf_id <= 0) { 3819 pr_warn("failed to find BTF for extern '%s': %d\n", 3820 ext_name, ext->btf_id); 3821 return ext->btf_id; 3822 } 3823 t = btf__type_by_id(obj->btf, ext->btf_id); 3824 ext->name = btf__name_by_offset(obj->btf, t->name_off); 3825 ext->sym_idx = i; 3826 ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK; 3827 3828 ext_essent_len = bpf_core_essential_name_len(ext->name); 3829 ext->essent_name = NULL; 3830 if (ext_essent_len != strlen(ext->name)) { 3831 ext->essent_name = strndup(ext->name, ext_essent_len); 3832 if (!ext->essent_name) 3833 return -ENOMEM; 3834 } 3835 3836 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id); 3837 if (ext->sec_btf_id <= 0) { 3838 pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n", 3839 ext_name, ext->btf_id, ext->sec_btf_id); 3840 return ext->sec_btf_id; 3841 } 3842 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id); 3843 sec_name = btf__name_by_offset(obj->btf, sec->name_off); 3844 3845 if (strcmp(sec_name, KCONFIG_SEC) == 0) { 3846 if (btf_is_func(t)) { 3847 pr_warn("extern function %s is unsupported under %s section\n", 3848 ext->name, KCONFIG_SEC); 3849 return -ENOTSUP; 3850 } 3851 kcfg_sec = sec; 3852 ext->type = EXT_KCFG; 3853 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type); 3854 if (ext->kcfg.sz <= 0) { 3855 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n", 3856 ext_name, ext->kcfg.sz); 3857 return ext->kcfg.sz; 3858 } 3859 ext->kcfg.align = btf__align_of(obj->btf, t->type); 3860 if (ext->kcfg.align <= 0) { 3861 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n", 3862 ext_name, ext->kcfg.align); 3863 return -EINVAL; 3864 } 3865 ext->kcfg.type = find_kcfg_type(obj->btf, t->type, 3866 &ext->kcfg.is_signed); 3867 if (ext->kcfg.type == KCFG_UNKNOWN) { 3868 pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name); 3869 return -ENOTSUP; 3870 } 3871 } else if (strcmp(sec_name, KSYMS_SEC) == 0) { 3872 ksym_sec = sec; 3873 ext->type = EXT_KSYM; 3874 skip_mods_and_typedefs(obj->btf, t->type, 3875 &ext->ksym.type_id); 3876 } else { 3877 pr_warn("unrecognized extern section '%s'\n", sec_name); 3878 return -ENOTSUP; 3879 } 3880 } 3881 pr_debug("collected %d externs total\n", obj->nr_extern); 3882 3883 if (!obj->nr_extern) 3884 return 0; 3885 3886 /* sort externs by type, for kcfg ones also by (align, size, name) */ 3887 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs); 3888 3889 /* for .ksyms section, we need to turn all externs into allocated 3890 * variables in BTF to pass kernel verification; we do this by 3891 * pretending that each extern is a 8-byte variable 3892 */ 3893 if (ksym_sec) { 3894 /* find existing 4-byte integer type in BTF to use for fake 3895 * extern variables in DATASEC 3896 */ 3897 int int_btf_id = find_int_btf_id(obj->btf); 3898 /* For extern function, a dummy_var added earlier 3899 * will be used to replace the vs->type and 3900 * its name string will be used to refill 3901 * the missing param's name. 3902 */ 3903 const struct btf_type *dummy_var; 3904 3905 dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id); 3906 for (i = 0; i < obj->nr_extern; i++) { 3907 ext = &obj->externs[i]; 3908 if (ext->type != EXT_KSYM) 3909 continue; 3910 pr_debug("extern (ksym) #%d: symbol %d, name %s\n", 3911 i, ext->sym_idx, ext->name); 3912 } 3913 3914 sec = ksym_sec; 3915 n = btf_vlen(sec); 3916 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) { 3917 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; 3918 struct btf_type *vt; 3919 3920 vt = (void *)btf__type_by_id(obj->btf, vs->type); 3921 ext_name = btf__name_by_offset(obj->btf, vt->name_off); 3922 ext = find_extern_by_name(obj, ext_name); 3923 if (!ext) { 3924 pr_warn("failed to find extern definition for BTF %s '%s'\n", 3925 btf_kind_str(vt), ext_name); 3926 return -ESRCH; 3927 } 3928 if (btf_is_func(vt)) { 3929 const struct btf_type *func_proto; 3930 struct btf_param *param; 3931 int j; 3932 3933 func_proto = btf__type_by_id(obj->btf, 3934 vt->type); 3935 param = btf_params(func_proto); 3936 /* Reuse the dummy_var string if the 3937 * func proto does not have param name. 3938 */ 3939 for (j = 0; j < btf_vlen(func_proto); j++) 3940 if (param[j].type && !param[j].name_off) 3941 param[j].name_off = 3942 dummy_var->name_off; 3943 vs->type = dummy_var_btf_id; 3944 vt->info &= ~0xffff; 3945 vt->info |= BTF_FUNC_GLOBAL; 3946 } else { 3947 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED; 3948 vt->type = int_btf_id; 3949 } 3950 vs->offset = off; 3951 vs->size = sizeof(int); 3952 } 3953 sec->size = off; 3954 } 3955 3956 if (kcfg_sec) { 3957 sec = kcfg_sec; 3958 /* for kcfg externs calculate their offsets within a .kconfig map */ 3959 off = 0; 3960 for (i = 0; i < obj->nr_extern; i++) { 3961 ext = &obj->externs[i]; 3962 if (ext->type != EXT_KCFG) 3963 continue; 3964 3965 ext->kcfg.data_off = roundup(off, ext->kcfg.align); 3966 off = ext->kcfg.data_off + ext->kcfg.sz; 3967 pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n", 3968 i, ext->sym_idx, ext->kcfg.data_off, ext->name); 3969 } 3970 sec->size = off; 3971 n = btf_vlen(sec); 3972 for (i = 0; i < n; i++) { 3973 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; 3974 3975 t = btf__type_by_id(obj->btf, vs->type); 3976 ext_name = btf__name_by_offset(obj->btf, t->name_off); 3977 ext = find_extern_by_name(obj, ext_name); 3978 if (!ext) { 3979 pr_warn("failed to find extern definition for BTF var '%s'\n", 3980 ext_name); 3981 return -ESRCH; 3982 } 3983 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; 3984 vs->offset = ext->kcfg.data_off; 3985 } 3986 } 3987 return 0; 3988 } 3989 3990 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog) 3991 { 3992 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1; 3993 } 3994 3995 struct bpf_program * 3996 bpf_object__find_program_by_name(const struct bpf_object *obj, 3997 const char *name) 3998 { 3999 struct bpf_program *prog; 4000 4001 bpf_object__for_each_program(prog, obj) { 4002 if (prog_is_subprog(obj, prog)) 4003 continue; 4004 if (!strcmp(prog->name, name)) 4005 return prog; 4006 } 4007 return errno = ENOENT, NULL; 4008 } 4009 4010 static bool bpf_object__shndx_is_data(const struct bpf_object *obj, 4011 int shndx) 4012 { 4013 switch (obj->efile.secs[shndx].sec_type) { 4014 case SEC_BSS: 4015 case SEC_DATA: 4016 case SEC_RODATA: 4017 return true; 4018 default: 4019 return false; 4020 } 4021 } 4022 4023 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj, 4024 int shndx) 4025 { 4026 return shndx == obj->efile.btf_maps_shndx; 4027 } 4028 4029 static enum libbpf_map_type 4030 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx) 4031 { 4032 if (shndx == obj->efile.symbols_shndx) 4033 return LIBBPF_MAP_KCONFIG; 4034 4035 switch (obj->efile.secs[shndx].sec_type) { 4036 case SEC_BSS: 4037 return LIBBPF_MAP_BSS; 4038 case SEC_DATA: 4039 return LIBBPF_MAP_DATA; 4040 case SEC_RODATA: 4041 return LIBBPF_MAP_RODATA; 4042 default: 4043 return LIBBPF_MAP_UNSPEC; 4044 } 4045 } 4046 4047 static int bpf_program__record_reloc(struct bpf_program *prog, 4048 struct reloc_desc *reloc_desc, 4049 __u32 insn_idx, const char *sym_name, 4050 const Elf64_Sym *sym, const Elf64_Rel *rel) 4051 { 4052 struct bpf_insn *insn = &prog->insns[insn_idx]; 4053 size_t map_idx, nr_maps = prog->obj->nr_maps; 4054 struct bpf_object *obj = prog->obj; 4055 __u32 shdr_idx = sym->st_shndx; 4056 enum libbpf_map_type type; 4057 const char *sym_sec_name; 4058 struct bpf_map *map; 4059 4060 if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) { 4061 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n", 4062 prog->name, sym_name, insn_idx, insn->code); 4063 return -LIBBPF_ERRNO__RELOC; 4064 } 4065 4066 if (sym_is_extern(sym)) { 4067 int sym_idx = ELF64_R_SYM(rel->r_info); 4068 int i, n = obj->nr_extern; 4069 struct extern_desc *ext; 4070 4071 for (i = 0; i < n; i++) { 4072 ext = &obj->externs[i]; 4073 if (ext->sym_idx == sym_idx) 4074 break; 4075 } 4076 if (i >= n) { 4077 pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n", 4078 prog->name, sym_name, sym_idx); 4079 return -LIBBPF_ERRNO__RELOC; 4080 } 4081 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n", 4082 prog->name, i, ext->name, ext->sym_idx, insn_idx); 4083 if (insn->code == (BPF_JMP | BPF_CALL)) 4084 reloc_desc->type = RELO_EXTERN_CALL; 4085 else 4086 reloc_desc->type = RELO_EXTERN_LD64; 4087 reloc_desc->insn_idx = insn_idx; 4088 reloc_desc->ext_idx = i; 4089 return 0; 4090 } 4091 4092 /* sub-program call relocation */ 4093 if (is_call_insn(insn)) { 4094 if (insn->src_reg != BPF_PSEUDO_CALL) { 4095 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name); 4096 return -LIBBPF_ERRNO__RELOC; 4097 } 4098 /* text_shndx can be 0, if no default "main" program exists */ 4099 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) { 4100 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); 4101 pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n", 4102 prog->name, sym_name, sym_sec_name); 4103 return -LIBBPF_ERRNO__RELOC; 4104 } 4105 if (sym->st_value % BPF_INSN_SZ) { 4106 pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n", 4107 prog->name, sym_name, (size_t)sym->st_value); 4108 return -LIBBPF_ERRNO__RELOC; 4109 } 4110 reloc_desc->type = RELO_CALL; 4111 reloc_desc->insn_idx = insn_idx; 4112 reloc_desc->sym_off = sym->st_value; 4113 return 0; 4114 } 4115 4116 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) { 4117 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n", 4118 prog->name, sym_name, shdr_idx); 4119 return -LIBBPF_ERRNO__RELOC; 4120 } 4121 4122 /* loading subprog addresses */ 4123 if (sym_is_subprog(sym, obj->efile.text_shndx)) { 4124 /* global_func: sym->st_value = offset in the section, insn->imm = 0. 4125 * local_func: sym->st_value = 0, insn->imm = offset in the section. 4126 */ 4127 if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) { 4128 pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n", 4129 prog->name, sym_name, (size_t)sym->st_value, insn->imm); 4130 return -LIBBPF_ERRNO__RELOC; 4131 } 4132 4133 reloc_desc->type = RELO_SUBPROG_ADDR; 4134 reloc_desc->insn_idx = insn_idx; 4135 reloc_desc->sym_off = sym->st_value; 4136 return 0; 4137 } 4138 4139 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx); 4140 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); 4141 4142 /* generic map reference relocation */ 4143 if (type == LIBBPF_MAP_UNSPEC) { 4144 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) { 4145 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n", 4146 prog->name, sym_name, sym_sec_name); 4147 return -LIBBPF_ERRNO__RELOC; 4148 } 4149 for (map_idx = 0; map_idx < nr_maps; map_idx++) { 4150 map = &obj->maps[map_idx]; 4151 if (map->libbpf_type != type || 4152 map->sec_idx != sym->st_shndx || 4153 map->sec_offset != sym->st_value) 4154 continue; 4155 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n", 4156 prog->name, map_idx, map->name, map->sec_idx, 4157 map->sec_offset, insn_idx); 4158 break; 4159 } 4160 if (map_idx >= nr_maps) { 4161 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n", 4162 prog->name, sym_sec_name, (size_t)sym->st_value); 4163 return -LIBBPF_ERRNO__RELOC; 4164 } 4165 reloc_desc->type = RELO_LD64; 4166 reloc_desc->insn_idx = insn_idx; 4167 reloc_desc->map_idx = map_idx; 4168 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */ 4169 return 0; 4170 } 4171 4172 /* global data map relocation */ 4173 if (!bpf_object__shndx_is_data(obj, shdr_idx)) { 4174 pr_warn("prog '%s': bad data relo against section '%s'\n", 4175 prog->name, sym_sec_name); 4176 return -LIBBPF_ERRNO__RELOC; 4177 } 4178 for (map_idx = 0; map_idx < nr_maps; map_idx++) { 4179 map = &obj->maps[map_idx]; 4180 if (map->libbpf_type != type || map->sec_idx != sym->st_shndx) 4181 continue; 4182 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n", 4183 prog->name, map_idx, map->name, map->sec_idx, 4184 map->sec_offset, insn_idx); 4185 break; 4186 } 4187 if (map_idx >= nr_maps) { 4188 pr_warn("prog '%s': data relo failed to find map for section '%s'\n", 4189 prog->name, sym_sec_name); 4190 return -LIBBPF_ERRNO__RELOC; 4191 } 4192 4193 reloc_desc->type = RELO_DATA; 4194 reloc_desc->insn_idx = insn_idx; 4195 reloc_desc->map_idx = map_idx; 4196 reloc_desc->sym_off = sym->st_value; 4197 return 0; 4198 } 4199 4200 static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx) 4201 { 4202 return insn_idx >= prog->sec_insn_off && 4203 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt; 4204 } 4205 4206 static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj, 4207 size_t sec_idx, size_t insn_idx) 4208 { 4209 int l = 0, r = obj->nr_programs - 1, m; 4210 struct bpf_program *prog; 4211 4212 if (!obj->nr_programs) 4213 return NULL; 4214 4215 while (l < r) { 4216 m = l + (r - l + 1) / 2; 4217 prog = &obj->programs[m]; 4218 4219 if (prog->sec_idx < sec_idx || 4220 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx)) 4221 l = m; 4222 else 4223 r = m - 1; 4224 } 4225 /* matching program could be at index l, but it still might be the 4226 * wrong one, so we need to double check conditions for the last time 4227 */ 4228 prog = &obj->programs[l]; 4229 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx)) 4230 return prog; 4231 return NULL; 4232 } 4233 4234 static int 4235 bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data) 4236 { 4237 const char *relo_sec_name, *sec_name; 4238 size_t sec_idx = shdr->sh_info, sym_idx; 4239 struct bpf_program *prog; 4240 struct reloc_desc *relos; 4241 int err, i, nrels; 4242 const char *sym_name; 4243 __u32 insn_idx; 4244 Elf_Scn *scn; 4245 Elf_Data *scn_data; 4246 Elf64_Sym *sym; 4247 Elf64_Rel *rel; 4248 4249 if (sec_idx >= obj->efile.sec_cnt) 4250 return -EINVAL; 4251 4252 scn = elf_sec_by_idx(obj, sec_idx); 4253 scn_data = elf_sec_data(obj, scn); 4254 if (!scn_data) 4255 return -LIBBPF_ERRNO__FORMAT; 4256 4257 relo_sec_name = elf_sec_str(obj, shdr->sh_name); 4258 sec_name = elf_sec_name(obj, scn); 4259 if (!relo_sec_name || !sec_name) 4260 return -EINVAL; 4261 4262 pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n", 4263 relo_sec_name, sec_idx, sec_name); 4264 nrels = shdr->sh_size / shdr->sh_entsize; 4265 4266 for (i = 0; i < nrels; i++) { 4267 rel = elf_rel_by_idx(data, i); 4268 if (!rel) { 4269 pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i); 4270 return -LIBBPF_ERRNO__FORMAT; 4271 } 4272 4273 sym_idx = ELF64_R_SYM(rel->r_info); 4274 sym = elf_sym_by_idx(obj, sym_idx); 4275 if (!sym) { 4276 pr_warn("sec '%s': symbol #%zu not found for relo #%d\n", 4277 relo_sec_name, sym_idx, i); 4278 return -LIBBPF_ERRNO__FORMAT; 4279 } 4280 4281 if (sym->st_shndx >= obj->efile.sec_cnt) { 4282 pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n", 4283 relo_sec_name, sym_idx, (size_t)sym->st_shndx, i); 4284 return -LIBBPF_ERRNO__FORMAT; 4285 } 4286 4287 if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) { 4288 pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n", 4289 relo_sec_name, (size_t)rel->r_offset, i); 4290 return -LIBBPF_ERRNO__FORMAT; 4291 } 4292 4293 insn_idx = rel->r_offset / BPF_INSN_SZ; 4294 /* relocations against static functions are recorded as 4295 * relocations against the section that contains a function; 4296 * in such case, symbol will be STT_SECTION and sym.st_name 4297 * will point to empty string (0), so fetch section name 4298 * instead 4299 */ 4300 if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0) 4301 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx)); 4302 else 4303 sym_name = elf_sym_str(obj, sym->st_name); 4304 sym_name = sym_name ?: "<?"; 4305 4306 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n", 4307 relo_sec_name, i, insn_idx, sym_name); 4308 4309 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx); 4310 if (!prog) { 4311 pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n", 4312 relo_sec_name, i, sec_name, insn_idx); 4313 continue; 4314 } 4315 4316 relos = libbpf_reallocarray(prog->reloc_desc, 4317 prog->nr_reloc + 1, sizeof(*relos)); 4318 if (!relos) 4319 return -ENOMEM; 4320 prog->reloc_desc = relos; 4321 4322 /* adjust insn_idx to local BPF program frame of reference */ 4323 insn_idx -= prog->sec_insn_off; 4324 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc], 4325 insn_idx, sym_name, sym, rel); 4326 if (err) 4327 return err; 4328 4329 prog->nr_reloc++; 4330 } 4331 return 0; 4332 } 4333 4334 static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map) 4335 { 4336 int id; 4337 4338 if (!obj->btf) 4339 return -ENOENT; 4340 4341 /* if it's BTF-defined map, we don't need to search for type IDs. 4342 * For struct_ops map, it does not need btf_key_type_id and 4343 * btf_value_type_id. 4344 */ 4345 if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map)) 4346 return 0; 4347 4348 /* 4349 * LLVM annotates global data differently in BTF, that is, 4350 * only as '.data', '.bss' or '.rodata'. 4351 */ 4352 if (!bpf_map__is_internal(map)) 4353 return -ENOENT; 4354 4355 id = btf__find_by_name(obj->btf, map->real_name); 4356 if (id < 0) 4357 return id; 4358 4359 map->btf_key_type_id = 0; 4360 map->btf_value_type_id = id; 4361 return 0; 4362 } 4363 4364 static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info) 4365 { 4366 char file[PATH_MAX], buff[4096]; 4367 FILE *fp; 4368 __u32 val; 4369 int err; 4370 4371 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd); 4372 memset(info, 0, sizeof(*info)); 4373 4374 fp = fopen(file, "re"); 4375 if (!fp) { 4376 err = -errno; 4377 pr_warn("failed to open %s: %d. No procfs support?\n", file, 4378 err); 4379 return err; 4380 } 4381 4382 while (fgets(buff, sizeof(buff), fp)) { 4383 if (sscanf(buff, "map_type:\t%u", &val) == 1) 4384 info->type = val; 4385 else if (sscanf(buff, "key_size:\t%u", &val) == 1) 4386 info->key_size = val; 4387 else if (sscanf(buff, "value_size:\t%u", &val) == 1) 4388 info->value_size = val; 4389 else if (sscanf(buff, "max_entries:\t%u", &val) == 1) 4390 info->max_entries = val; 4391 else if (sscanf(buff, "map_flags:\t%i", &val) == 1) 4392 info->map_flags = val; 4393 } 4394 4395 fclose(fp); 4396 4397 return 0; 4398 } 4399 4400 bool bpf_map__autocreate(const struct bpf_map *map) 4401 { 4402 return map->autocreate; 4403 } 4404 4405 int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate) 4406 { 4407 if (map->obj->loaded) 4408 return libbpf_err(-EBUSY); 4409 4410 map->autocreate = autocreate; 4411 return 0; 4412 } 4413 4414 int bpf_map__reuse_fd(struct bpf_map *map, int fd) 4415 { 4416 struct bpf_map_info info; 4417 __u32 len = sizeof(info), name_len; 4418 int new_fd, err; 4419 char *new_name; 4420 4421 memset(&info, 0, len); 4422 err = bpf_map_get_info_by_fd(fd, &info, &len); 4423 if (err && errno == EINVAL) 4424 err = bpf_get_map_info_from_fdinfo(fd, &info); 4425 if (err) 4426 return libbpf_err(err); 4427 4428 name_len = strlen(info.name); 4429 if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0) 4430 new_name = strdup(map->name); 4431 else 4432 new_name = strdup(info.name); 4433 4434 if (!new_name) 4435 return libbpf_err(-errno); 4436 4437 /* 4438 * Like dup(), but make sure new FD is >= 3 and has O_CLOEXEC set. 4439 * This is similar to what we do in ensure_good_fd(), but without 4440 * closing original FD. 4441 */ 4442 new_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3); 4443 if (new_fd < 0) { 4444 err = -errno; 4445 goto err_free_new_name; 4446 } 4447 4448 err = zclose(map->fd); 4449 if (err) { 4450 err = -errno; 4451 goto err_close_new_fd; 4452 } 4453 free(map->name); 4454 4455 map->fd = new_fd; 4456 map->name = new_name; 4457 map->def.type = info.type; 4458 map->def.key_size = info.key_size; 4459 map->def.value_size = info.value_size; 4460 map->def.max_entries = info.max_entries; 4461 map->def.map_flags = info.map_flags; 4462 map->btf_key_type_id = info.btf_key_type_id; 4463 map->btf_value_type_id = info.btf_value_type_id; 4464 map->reused = true; 4465 map->map_extra = info.map_extra; 4466 4467 return 0; 4468 4469 err_close_new_fd: 4470 close(new_fd); 4471 err_free_new_name: 4472 free(new_name); 4473 return libbpf_err(err); 4474 } 4475 4476 __u32 bpf_map__max_entries(const struct bpf_map *map) 4477 { 4478 return map->def.max_entries; 4479 } 4480 4481 struct bpf_map *bpf_map__inner_map(struct bpf_map *map) 4482 { 4483 if (!bpf_map_type__is_map_in_map(map->def.type)) 4484 return errno = EINVAL, NULL; 4485 4486 return map->inner_map; 4487 } 4488 4489 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) 4490 { 4491 if (map->obj->loaded) 4492 return libbpf_err(-EBUSY); 4493 4494 map->def.max_entries = max_entries; 4495 4496 /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */ 4497 if (map_is_ringbuf(map)) 4498 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries); 4499 4500 return 0; 4501 } 4502 4503 static int 4504 bpf_object__probe_loading(struct bpf_object *obj) 4505 { 4506 char *cp, errmsg[STRERR_BUFSIZE]; 4507 struct bpf_insn insns[] = { 4508 BPF_MOV64_IMM(BPF_REG_0, 0), 4509 BPF_EXIT_INSN(), 4510 }; 4511 int ret, insn_cnt = ARRAY_SIZE(insns); 4512 4513 if (obj->gen_loader) 4514 return 0; 4515 4516 ret = bump_rlimit_memlock(); 4517 if (ret) 4518 pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret); 4519 4520 /* make sure basic loading works */ 4521 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL); 4522 if (ret < 0) 4523 ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL); 4524 if (ret < 0) { 4525 ret = errno; 4526 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); 4527 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF " 4528 "program. Make sure your kernel supports BPF " 4529 "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is " 4530 "set to big enough value.\n", __func__, cp, ret); 4531 return -ret; 4532 } 4533 close(ret); 4534 4535 return 0; 4536 } 4537 4538 static int probe_fd(int fd) 4539 { 4540 if (fd >= 0) 4541 close(fd); 4542 return fd >= 0; 4543 } 4544 4545 static int probe_kern_prog_name(void) 4546 { 4547 const size_t attr_sz = offsetofend(union bpf_attr, prog_name); 4548 struct bpf_insn insns[] = { 4549 BPF_MOV64_IMM(BPF_REG_0, 0), 4550 BPF_EXIT_INSN(), 4551 }; 4552 union bpf_attr attr; 4553 int ret; 4554 4555 memset(&attr, 0, attr_sz); 4556 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 4557 attr.license = ptr_to_u64("GPL"); 4558 attr.insns = ptr_to_u64(insns); 4559 attr.insn_cnt = (__u32)ARRAY_SIZE(insns); 4560 libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name)); 4561 4562 /* make sure loading with name works */ 4563 ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS); 4564 return probe_fd(ret); 4565 } 4566 4567 static int probe_kern_global_data(void) 4568 { 4569 char *cp, errmsg[STRERR_BUFSIZE]; 4570 struct bpf_insn insns[] = { 4571 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16), 4572 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42), 4573 BPF_MOV64_IMM(BPF_REG_0, 0), 4574 BPF_EXIT_INSN(), 4575 }; 4576 int ret, map, insn_cnt = ARRAY_SIZE(insns); 4577 4578 map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, NULL); 4579 if (map < 0) { 4580 ret = -errno; 4581 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); 4582 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", 4583 __func__, cp, -ret); 4584 return ret; 4585 } 4586 4587 insns[0].imm = map; 4588 4589 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL); 4590 close(map); 4591 return probe_fd(ret); 4592 } 4593 4594 static int probe_kern_btf(void) 4595 { 4596 static const char strs[] = "\0int"; 4597 __u32 types[] = { 4598 /* int */ 4599 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), 4600 }; 4601 4602 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 4603 strs, sizeof(strs))); 4604 } 4605 4606 static int probe_kern_btf_func(void) 4607 { 4608 static const char strs[] = "\0int\0x\0a"; 4609 /* void x(int a) {} */ 4610 __u32 types[] = { 4611 /* int */ 4612 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 4613 /* FUNC_PROTO */ /* [2] */ 4614 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0), 4615 BTF_PARAM_ENC(7, 1), 4616 /* FUNC x */ /* [3] */ 4617 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2), 4618 }; 4619 4620 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 4621 strs, sizeof(strs))); 4622 } 4623 4624 static int probe_kern_btf_func_global(void) 4625 { 4626 static const char strs[] = "\0int\0x\0a"; 4627 /* static void x(int a) {} */ 4628 __u32 types[] = { 4629 /* int */ 4630 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 4631 /* FUNC_PROTO */ /* [2] */ 4632 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0), 4633 BTF_PARAM_ENC(7, 1), 4634 /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */ 4635 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2), 4636 }; 4637 4638 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 4639 strs, sizeof(strs))); 4640 } 4641 4642 static int probe_kern_btf_datasec(void) 4643 { 4644 static const char strs[] = "\0x\0.data"; 4645 /* static int a; */ 4646 __u32 types[] = { 4647 /* int */ 4648 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 4649 /* VAR x */ /* [2] */ 4650 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), 4651 BTF_VAR_STATIC, 4652 /* DATASEC val */ /* [3] */ 4653 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), 4654 BTF_VAR_SECINFO_ENC(2, 0, 4), 4655 }; 4656 4657 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 4658 strs, sizeof(strs))); 4659 } 4660 4661 static int probe_kern_btf_float(void) 4662 { 4663 static const char strs[] = "\0float"; 4664 __u32 types[] = { 4665 /* float */ 4666 BTF_TYPE_FLOAT_ENC(1, 4), 4667 }; 4668 4669 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 4670 strs, sizeof(strs))); 4671 } 4672 4673 static int probe_kern_btf_decl_tag(void) 4674 { 4675 static const char strs[] = "\0tag"; 4676 __u32 types[] = { 4677 /* int */ 4678 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 4679 /* VAR x */ /* [2] */ 4680 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), 4681 BTF_VAR_STATIC, 4682 /* attr */ 4683 BTF_TYPE_DECL_TAG_ENC(1, 2, -1), 4684 }; 4685 4686 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 4687 strs, sizeof(strs))); 4688 } 4689 4690 static int probe_kern_btf_type_tag(void) 4691 { 4692 static const char strs[] = "\0tag"; 4693 __u32 types[] = { 4694 /* int */ 4695 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 4696 /* attr */ 4697 BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */ 4698 /* ptr */ 4699 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */ 4700 }; 4701 4702 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 4703 strs, sizeof(strs))); 4704 } 4705 4706 static int probe_kern_array_mmap(void) 4707 { 4708 LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE); 4709 int fd; 4710 4711 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts); 4712 return probe_fd(fd); 4713 } 4714 4715 static int probe_kern_exp_attach_type(void) 4716 { 4717 LIBBPF_OPTS(bpf_prog_load_opts, opts, .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE); 4718 struct bpf_insn insns[] = { 4719 BPF_MOV64_IMM(BPF_REG_0, 0), 4720 BPF_EXIT_INSN(), 4721 }; 4722 int fd, insn_cnt = ARRAY_SIZE(insns); 4723 4724 /* use any valid combination of program type and (optional) 4725 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS) 4726 * to see if kernel supports expected_attach_type field for 4727 * BPF_PROG_LOAD command 4728 */ 4729 fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts); 4730 return probe_fd(fd); 4731 } 4732 4733 static int probe_kern_probe_read_kernel(void) 4734 { 4735 struct bpf_insn insns[] = { 4736 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */ 4737 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */ 4738 BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */ 4739 BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */ 4740 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel), 4741 BPF_EXIT_INSN(), 4742 }; 4743 int fd, insn_cnt = ARRAY_SIZE(insns); 4744 4745 fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL); 4746 return probe_fd(fd); 4747 } 4748 4749 static int probe_prog_bind_map(void) 4750 { 4751 char *cp, errmsg[STRERR_BUFSIZE]; 4752 struct bpf_insn insns[] = { 4753 BPF_MOV64_IMM(BPF_REG_0, 0), 4754 BPF_EXIT_INSN(), 4755 }; 4756 int ret, map, prog, insn_cnt = ARRAY_SIZE(insns); 4757 4758 map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, NULL); 4759 if (map < 0) { 4760 ret = -errno; 4761 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); 4762 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", 4763 __func__, cp, -ret); 4764 return ret; 4765 } 4766 4767 prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL); 4768 if (prog < 0) { 4769 close(map); 4770 return 0; 4771 } 4772 4773 ret = bpf_prog_bind_map(prog, map, NULL); 4774 4775 close(map); 4776 close(prog); 4777 4778 return ret >= 0; 4779 } 4780 4781 static int probe_module_btf(void) 4782 { 4783 static const char strs[] = "\0int"; 4784 __u32 types[] = { 4785 /* int */ 4786 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), 4787 }; 4788 struct bpf_btf_info info; 4789 __u32 len = sizeof(info); 4790 char name[16]; 4791 int fd, err; 4792 4793 fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs)); 4794 if (fd < 0) 4795 return 0; /* BTF not supported at all */ 4796 4797 memset(&info, 0, sizeof(info)); 4798 info.name = ptr_to_u64(name); 4799 info.name_len = sizeof(name); 4800 4801 /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer; 4802 * kernel's module BTF support coincides with support for 4803 * name/name_len fields in struct bpf_btf_info. 4804 */ 4805 err = bpf_btf_get_info_by_fd(fd, &info, &len); 4806 close(fd); 4807 return !err; 4808 } 4809 4810 static int probe_perf_link(void) 4811 { 4812 struct bpf_insn insns[] = { 4813 BPF_MOV64_IMM(BPF_REG_0, 0), 4814 BPF_EXIT_INSN(), 4815 }; 4816 int prog_fd, link_fd, err; 4817 4818 prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", 4819 insns, ARRAY_SIZE(insns), NULL); 4820 if (prog_fd < 0) 4821 return -errno; 4822 4823 /* use invalid perf_event FD to get EBADF, if link is supported; 4824 * otherwise EINVAL should be returned 4825 */ 4826 link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL); 4827 err = -errno; /* close() can clobber errno */ 4828 4829 if (link_fd >= 0) 4830 close(link_fd); 4831 close(prog_fd); 4832 4833 return link_fd < 0 && err == -EBADF; 4834 } 4835 4836 static int probe_uprobe_multi_link(void) 4837 { 4838 LIBBPF_OPTS(bpf_prog_load_opts, load_opts, 4839 .expected_attach_type = BPF_TRACE_UPROBE_MULTI, 4840 ); 4841 LIBBPF_OPTS(bpf_link_create_opts, link_opts); 4842 struct bpf_insn insns[] = { 4843 BPF_MOV64_IMM(BPF_REG_0, 0), 4844 BPF_EXIT_INSN(), 4845 }; 4846 int prog_fd, link_fd, err; 4847 unsigned long offset = 0; 4848 4849 prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", 4850 insns, ARRAY_SIZE(insns), &load_opts); 4851 if (prog_fd < 0) 4852 return -errno; 4853 4854 /* Creating uprobe in '/' binary should fail with -EBADF. */ 4855 link_opts.uprobe_multi.path = "/"; 4856 link_opts.uprobe_multi.offsets = &offset; 4857 link_opts.uprobe_multi.cnt = 1; 4858 4859 link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts); 4860 err = -errno; /* close() can clobber errno */ 4861 4862 if (link_fd >= 0) 4863 close(link_fd); 4864 close(prog_fd); 4865 4866 return link_fd < 0 && err == -EBADF; 4867 } 4868 4869 static int probe_kern_bpf_cookie(void) 4870 { 4871 struct bpf_insn insns[] = { 4872 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie), 4873 BPF_EXIT_INSN(), 4874 }; 4875 int ret, insn_cnt = ARRAY_SIZE(insns); 4876 4877 ret = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL); 4878 return probe_fd(ret); 4879 } 4880 4881 static int probe_kern_btf_enum64(void) 4882 { 4883 static const char strs[] = "\0enum64"; 4884 __u32 types[] = { 4885 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8), 4886 }; 4887 4888 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 4889 strs, sizeof(strs))); 4890 } 4891 4892 static int probe_kern_syscall_wrapper(void); 4893 4894 enum kern_feature_result { 4895 FEAT_UNKNOWN = 0, 4896 FEAT_SUPPORTED = 1, 4897 FEAT_MISSING = 2, 4898 }; 4899 4900 typedef int (*feature_probe_fn)(void); 4901 4902 static struct kern_feature_desc { 4903 const char *desc; 4904 feature_probe_fn probe; 4905 enum kern_feature_result res; 4906 } feature_probes[__FEAT_CNT] = { 4907 [FEAT_PROG_NAME] = { 4908 "BPF program name", probe_kern_prog_name, 4909 }, 4910 [FEAT_GLOBAL_DATA] = { 4911 "global variables", probe_kern_global_data, 4912 }, 4913 [FEAT_BTF] = { 4914 "minimal BTF", probe_kern_btf, 4915 }, 4916 [FEAT_BTF_FUNC] = { 4917 "BTF functions", probe_kern_btf_func, 4918 }, 4919 [FEAT_BTF_GLOBAL_FUNC] = { 4920 "BTF global function", probe_kern_btf_func_global, 4921 }, 4922 [FEAT_BTF_DATASEC] = { 4923 "BTF data section and variable", probe_kern_btf_datasec, 4924 }, 4925 [FEAT_ARRAY_MMAP] = { 4926 "ARRAY map mmap()", probe_kern_array_mmap, 4927 }, 4928 [FEAT_EXP_ATTACH_TYPE] = { 4929 "BPF_PROG_LOAD expected_attach_type attribute", 4930 probe_kern_exp_attach_type, 4931 }, 4932 [FEAT_PROBE_READ_KERN] = { 4933 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel, 4934 }, 4935 [FEAT_PROG_BIND_MAP] = { 4936 "BPF_PROG_BIND_MAP support", probe_prog_bind_map, 4937 }, 4938 [FEAT_MODULE_BTF] = { 4939 "module BTF support", probe_module_btf, 4940 }, 4941 [FEAT_BTF_FLOAT] = { 4942 "BTF_KIND_FLOAT support", probe_kern_btf_float, 4943 }, 4944 [FEAT_PERF_LINK] = { 4945 "BPF perf link support", probe_perf_link, 4946 }, 4947 [FEAT_BTF_DECL_TAG] = { 4948 "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag, 4949 }, 4950 [FEAT_BTF_TYPE_TAG] = { 4951 "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag, 4952 }, 4953 [FEAT_MEMCG_ACCOUNT] = { 4954 "memcg-based memory accounting", probe_memcg_account, 4955 }, 4956 [FEAT_BPF_COOKIE] = { 4957 "BPF cookie support", probe_kern_bpf_cookie, 4958 }, 4959 [FEAT_BTF_ENUM64] = { 4960 "BTF_KIND_ENUM64 support", probe_kern_btf_enum64, 4961 }, 4962 [FEAT_SYSCALL_WRAPPER] = { 4963 "Kernel using syscall wrapper", probe_kern_syscall_wrapper, 4964 }, 4965 [FEAT_UPROBE_MULTI_LINK] = { 4966 "BPF multi-uprobe link support", probe_uprobe_multi_link, 4967 }, 4968 }; 4969 4970 bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id) 4971 { 4972 struct kern_feature_desc *feat = &feature_probes[feat_id]; 4973 int ret; 4974 4975 if (obj && obj->gen_loader) 4976 /* To generate loader program assume the latest kernel 4977 * to avoid doing extra prog_load, map_create syscalls. 4978 */ 4979 return true; 4980 4981 if (READ_ONCE(feat->res) == FEAT_UNKNOWN) { 4982 ret = feat->probe(); 4983 if (ret > 0) { 4984 WRITE_ONCE(feat->res, FEAT_SUPPORTED); 4985 } else if (ret == 0) { 4986 WRITE_ONCE(feat->res, FEAT_MISSING); 4987 } else { 4988 pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret); 4989 WRITE_ONCE(feat->res, FEAT_MISSING); 4990 } 4991 } 4992 4993 return READ_ONCE(feat->res) == FEAT_SUPPORTED; 4994 } 4995 4996 static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) 4997 { 4998 struct bpf_map_info map_info; 4999 char msg[STRERR_BUFSIZE]; 5000 __u32 map_info_len = sizeof(map_info); 5001 int err; 5002 5003 memset(&map_info, 0, map_info_len); 5004 err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len); 5005 if (err && errno == EINVAL) 5006 err = bpf_get_map_info_from_fdinfo(map_fd, &map_info); 5007 if (err) { 5008 pr_warn("failed to get map info for map FD %d: %s\n", map_fd, 5009 libbpf_strerror_r(errno, msg, sizeof(msg))); 5010 return false; 5011 } 5012 5013 return (map_info.type == map->def.type && 5014 map_info.key_size == map->def.key_size && 5015 map_info.value_size == map->def.value_size && 5016 map_info.max_entries == map->def.max_entries && 5017 map_info.map_flags == map->def.map_flags && 5018 map_info.map_extra == map->map_extra); 5019 } 5020 5021 static int 5022 bpf_object__reuse_map(struct bpf_map *map) 5023 { 5024 char *cp, errmsg[STRERR_BUFSIZE]; 5025 int err, pin_fd; 5026 5027 pin_fd = bpf_obj_get(map->pin_path); 5028 if (pin_fd < 0) { 5029 err = -errno; 5030 if (err == -ENOENT) { 5031 pr_debug("found no pinned map to reuse at '%s'\n", 5032 map->pin_path); 5033 return 0; 5034 } 5035 5036 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); 5037 pr_warn("couldn't retrieve pinned map '%s': %s\n", 5038 map->pin_path, cp); 5039 return err; 5040 } 5041 5042 if (!map_is_reuse_compat(map, pin_fd)) { 5043 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n", 5044 map->pin_path); 5045 close(pin_fd); 5046 return -EINVAL; 5047 } 5048 5049 err = bpf_map__reuse_fd(map, pin_fd); 5050 close(pin_fd); 5051 if (err) 5052 return err; 5053 5054 map->pinned = true; 5055 pr_debug("reused pinned map at '%s'\n", map->pin_path); 5056 5057 return 0; 5058 } 5059 5060 static int 5061 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) 5062 { 5063 enum libbpf_map_type map_type = map->libbpf_type; 5064 char *cp, errmsg[STRERR_BUFSIZE]; 5065 int err, zero = 0; 5066 5067 if (obj->gen_loader) { 5068 bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps, 5069 map->mmaped, map->def.value_size); 5070 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) 5071 bpf_gen__map_freeze(obj->gen_loader, map - obj->maps); 5072 return 0; 5073 } 5074 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0); 5075 if (err) { 5076 err = -errno; 5077 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); 5078 pr_warn("Error setting initial map(%s) contents: %s\n", 5079 map->name, cp); 5080 return err; 5081 } 5082 5083 /* Freeze .rodata and .kconfig map as read-only from syscall side. */ 5084 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) { 5085 err = bpf_map_freeze(map->fd); 5086 if (err) { 5087 err = -errno; 5088 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); 5089 pr_warn("Error freezing map(%s) as read-only: %s\n", 5090 map->name, cp); 5091 return err; 5092 } 5093 } 5094 return 0; 5095 } 5096 5097 static void bpf_map__destroy(struct bpf_map *map); 5098 5099 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner) 5100 { 5101 LIBBPF_OPTS(bpf_map_create_opts, create_attr); 5102 struct bpf_map_def *def = &map->def; 5103 const char *map_name = NULL; 5104 int err = 0; 5105 5106 if (kernel_supports(obj, FEAT_PROG_NAME)) 5107 map_name = map->name; 5108 create_attr.map_ifindex = map->map_ifindex; 5109 create_attr.map_flags = def->map_flags; 5110 create_attr.numa_node = map->numa_node; 5111 create_attr.map_extra = map->map_extra; 5112 5113 if (bpf_map__is_struct_ops(map)) 5114 create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; 5115 5116 if (obj->btf && btf__fd(obj->btf) >= 0) { 5117 create_attr.btf_fd = btf__fd(obj->btf); 5118 create_attr.btf_key_type_id = map->btf_key_type_id; 5119 create_attr.btf_value_type_id = map->btf_value_type_id; 5120 } 5121 5122 if (bpf_map_type__is_map_in_map(def->type)) { 5123 if (map->inner_map) { 5124 err = bpf_object__create_map(obj, map->inner_map, true); 5125 if (err) { 5126 pr_warn("map '%s': failed to create inner map: %d\n", 5127 map->name, err); 5128 return err; 5129 } 5130 map->inner_map_fd = bpf_map__fd(map->inner_map); 5131 } 5132 if (map->inner_map_fd >= 0) 5133 create_attr.inner_map_fd = map->inner_map_fd; 5134 } 5135 5136 switch (def->type) { 5137 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 5138 case BPF_MAP_TYPE_CGROUP_ARRAY: 5139 case BPF_MAP_TYPE_STACK_TRACE: 5140 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 5141 case BPF_MAP_TYPE_HASH_OF_MAPS: 5142 case BPF_MAP_TYPE_DEVMAP: 5143 case BPF_MAP_TYPE_DEVMAP_HASH: 5144 case BPF_MAP_TYPE_CPUMAP: 5145 case BPF_MAP_TYPE_XSKMAP: 5146 case BPF_MAP_TYPE_SOCKMAP: 5147 case BPF_MAP_TYPE_SOCKHASH: 5148 case BPF_MAP_TYPE_QUEUE: 5149 case BPF_MAP_TYPE_STACK: 5150 create_attr.btf_fd = 0; 5151 create_attr.btf_key_type_id = 0; 5152 create_attr.btf_value_type_id = 0; 5153 map->btf_key_type_id = 0; 5154 map->btf_value_type_id = 0; 5155 default: 5156 break; 5157 } 5158 5159 if (obj->gen_loader) { 5160 bpf_gen__map_create(obj->gen_loader, def->type, map_name, 5161 def->key_size, def->value_size, def->max_entries, 5162 &create_attr, is_inner ? -1 : map - obj->maps); 5163 /* Pretend to have valid FD to pass various fd >= 0 checks. 5164 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually. 5165 */ 5166 map->fd = 0; 5167 } else { 5168 map->fd = bpf_map_create(def->type, map_name, 5169 def->key_size, def->value_size, 5170 def->max_entries, &create_attr); 5171 } 5172 if (map->fd < 0 && (create_attr.btf_key_type_id || 5173 create_attr.btf_value_type_id)) { 5174 char *cp, errmsg[STRERR_BUFSIZE]; 5175 5176 err = -errno; 5177 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); 5178 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", 5179 map->name, cp, err); 5180 create_attr.btf_fd = 0; 5181 create_attr.btf_key_type_id = 0; 5182 create_attr.btf_value_type_id = 0; 5183 map->btf_key_type_id = 0; 5184 map->btf_value_type_id = 0; 5185 map->fd = bpf_map_create(def->type, map_name, 5186 def->key_size, def->value_size, 5187 def->max_entries, &create_attr); 5188 } 5189 5190 err = map->fd < 0 ? -errno : 0; 5191 5192 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) { 5193 if (obj->gen_loader) 5194 map->inner_map->fd = -1; 5195 bpf_map__destroy(map->inner_map); 5196 zfree(&map->inner_map); 5197 } 5198 5199 return err; 5200 } 5201 5202 static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map) 5203 { 5204 const struct bpf_map *targ_map; 5205 unsigned int i; 5206 int fd, err = 0; 5207 5208 for (i = 0; i < map->init_slots_sz; i++) { 5209 if (!map->init_slots[i]) 5210 continue; 5211 5212 targ_map = map->init_slots[i]; 5213 fd = bpf_map__fd(targ_map); 5214 5215 if (obj->gen_loader) { 5216 bpf_gen__populate_outer_map(obj->gen_loader, 5217 map - obj->maps, i, 5218 targ_map - obj->maps); 5219 } else { 5220 err = bpf_map_update_elem(map->fd, &i, &fd, 0); 5221 } 5222 if (err) { 5223 err = -errno; 5224 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n", 5225 map->name, i, targ_map->name, fd, err); 5226 return err; 5227 } 5228 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n", 5229 map->name, i, targ_map->name, fd); 5230 } 5231 5232 zfree(&map->init_slots); 5233 map->init_slots_sz = 0; 5234 5235 return 0; 5236 } 5237 5238 static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map) 5239 { 5240 const struct bpf_program *targ_prog; 5241 unsigned int i; 5242 int fd, err; 5243 5244 if (obj->gen_loader) 5245 return -ENOTSUP; 5246 5247 for (i = 0; i < map->init_slots_sz; i++) { 5248 if (!map->init_slots[i]) 5249 continue; 5250 5251 targ_prog = map->init_slots[i]; 5252 fd = bpf_program__fd(targ_prog); 5253 5254 err = bpf_map_update_elem(map->fd, &i, &fd, 0); 5255 if (err) { 5256 err = -errno; 5257 pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n", 5258 map->name, i, targ_prog->name, fd, err); 5259 return err; 5260 } 5261 pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n", 5262 map->name, i, targ_prog->name, fd); 5263 } 5264 5265 zfree(&map->init_slots); 5266 map->init_slots_sz = 0; 5267 5268 return 0; 5269 } 5270 5271 static int bpf_object_init_prog_arrays(struct bpf_object *obj) 5272 { 5273 struct bpf_map *map; 5274 int i, err; 5275 5276 for (i = 0; i < obj->nr_maps; i++) { 5277 map = &obj->maps[i]; 5278 5279 if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY) 5280 continue; 5281 5282 err = init_prog_array_slots(obj, map); 5283 if (err < 0) { 5284 zclose(map->fd); 5285 return err; 5286 } 5287 } 5288 return 0; 5289 } 5290 5291 static int map_set_def_max_entries(struct bpf_map *map) 5292 { 5293 if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) { 5294 int nr_cpus; 5295 5296 nr_cpus = libbpf_num_possible_cpus(); 5297 if (nr_cpus < 0) { 5298 pr_warn("map '%s': failed to determine number of system CPUs: %d\n", 5299 map->name, nr_cpus); 5300 return nr_cpus; 5301 } 5302 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); 5303 map->def.max_entries = nr_cpus; 5304 } 5305 5306 return 0; 5307 } 5308 5309 static int 5310 bpf_object__create_maps(struct bpf_object *obj) 5311 { 5312 struct bpf_map *map; 5313 char *cp, errmsg[STRERR_BUFSIZE]; 5314 unsigned int i, j; 5315 int err; 5316 bool retried; 5317 5318 for (i = 0; i < obj->nr_maps; i++) { 5319 map = &obj->maps[i]; 5320 5321 /* To support old kernels, we skip creating global data maps 5322 * (.rodata, .data, .kconfig, etc); later on, during program 5323 * loading, if we detect that at least one of the to-be-loaded 5324 * programs is referencing any global data map, we'll error 5325 * out with program name and relocation index logged. 5326 * This approach allows to accommodate Clang emitting 5327 * unnecessary .rodata.str1.1 sections for string literals, 5328 * but also it allows to have CO-RE applications that use 5329 * global variables in some of BPF programs, but not others. 5330 * If those global variable-using programs are not loaded at 5331 * runtime due to bpf_program__set_autoload(prog, false), 5332 * bpf_object loading will succeed just fine even on old 5333 * kernels. 5334 */ 5335 if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA)) 5336 map->autocreate = false; 5337 5338 if (!map->autocreate) { 5339 pr_debug("map '%s': skipped auto-creating...\n", map->name); 5340 continue; 5341 } 5342 5343 err = map_set_def_max_entries(map); 5344 if (err) 5345 goto err_out; 5346 5347 retried = false; 5348 retry: 5349 if (map->pin_path) { 5350 err = bpf_object__reuse_map(map); 5351 if (err) { 5352 pr_warn("map '%s': error reusing pinned map\n", 5353 map->name); 5354 goto err_out; 5355 } 5356 if (retried && map->fd < 0) { 5357 pr_warn("map '%s': cannot find pinned map\n", 5358 map->name); 5359 err = -ENOENT; 5360 goto err_out; 5361 } 5362 } 5363 5364 if (map->fd >= 0) { 5365 pr_debug("map '%s': skipping creation (preset fd=%d)\n", 5366 map->name, map->fd); 5367 } else { 5368 err = bpf_object__create_map(obj, map, false); 5369 if (err) 5370 goto err_out; 5371 5372 pr_debug("map '%s': created successfully, fd=%d\n", 5373 map->name, map->fd); 5374 5375 if (bpf_map__is_internal(map)) { 5376 err = bpf_object__populate_internal_map(obj, map); 5377 if (err < 0) { 5378 zclose(map->fd); 5379 goto err_out; 5380 } 5381 } 5382 5383 if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) { 5384 err = init_map_in_map_slots(obj, map); 5385 if (err < 0) { 5386 zclose(map->fd); 5387 goto err_out; 5388 } 5389 } 5390 } 5391 5392 if (map->pin_path && !map->pinned) { 5393 err = bpf_map__pin(map, NULL); 5394 if (err) { 5395 zclose(map->fd); 5396 if (!retried && err == -EEXIST) { 5397 retried = true; 5398 goto retry; 5399 } 5400 pr_warn("map '%s': failed to auto-pin at '%s': %d\n", 5401 map->name, map->pin_path, err); 5402 goto err_out; 5403 } 5404 } 5405 } 5406 5407 return 0; 5408 5409 err_out: 5410 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); 5411 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err); 5412 pr_perm_msg(err); 5413 for (j = 0; j < i; j++) 5414 zclose(obj->maps[j].fd); 5415 return err; 5416 } 5417 5418 static bool bpf_core_is_flavor_sep(const char *s) 5419 { 5420 /* check X___Y name pattern, where X and Y are not underscores */ 5421 return s[0] != '_' && /* X */ 5422 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */ 5423 s[4] != '_'; /* Y */ 5424 } 5425 5426 /* Given 'some_struct_name___with_flavor' return the length of a name prefix 5427 * before last triple underscore. Struct name part after last triple 5428 * underscore is ignored by BPF CO-RE relocation during relocation matching. 5429 */ 5430 size_t bpf_core_essential_name_len(const char *name) 5431 { 5432 size_t n = strlen(name); 5433 int i; 5434 5435 for (i = n - 5; i >= 0; i--) { 5436 if (bpf_core_is_flavor_sep(name + i)) 5437 return i + 1; 5438 } 5439 return n; 5440 } 5441 5442 void bpf_core_free_cands(struct bpf_core_cand_list *cands) 5443 { 5444 if (!cands) 5445 return; 5446 5447 free(cands->cands); 5448 free(cands); 5449 } 5450 5451 int bpf_core_add_cands(struct bpf_core_cand *local_cand, 5452 size_t local_essent_len, 5453 const struct btf *targ_btf, 5454 const char *targ_btf_name, 5455 int targ_start_id, 5456 struct bpf_core_cand_list *cands) 5457 { 5458 struct bpf_core_cand *new_cands, *cand; 5459 const struct btf_type *t, *local_t; 5460 const char *targ_name, *local_name; 5461 size_t targ_essent_len; 5462 int n, i; 5463 5464 local_t = btf__type_by_id(local_cand->btf, local_cand->id); 5465 local_name = btf__str_by_offset(local_cand->btf, local_t->name_off); 5466 5467 n = btf__type_cnt(targ_btf); 5468 for (i = targ_start_id; i < n; i++) { 5469 t = btf__type_by_id(targ_btf, i); 5470 if (!btf_kind_core_compat(t, local_t)) 5471 continue; 5472 5473 targ_name = btf__name_by_offset(targ_btf, t->name_off); 5474 if (str_is_empty(targ_name)) 5475 continue; 5476 5477 targ_essent_len = bpf_core_essential_name_len(targ_name); 5478 if (targ_essent_len != local_essent_len) 5479 continue; 5480 5481 if (strncmp(local_name, targ_name, local_essent_len) != 0) 5482 continue; 5483 5484 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n", 5485 local_cand->id, btf_kind_str(local_t), 5486 local_name, i, btf_kind_str(t), targ_name, 5487 targ_btf_name); 5488 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1, 5489 sizeof(*cands->cands)); 5490 if (!new_cands) 5491 return -ENOMEM; 5492 5493 cand = &new_cands[cands->len]; 5494 cand->btf = targ_btf; 5495 cand->id = i; 5496 5497 cands->cands = new_cands; 5498 cands->len++; 5499 } 5500 return 0; 5501 } 5502 5503 static int load_module_btfs(struct bpf_object *obj) 5504 { 5505 struct bpf_btf_info info; 5506 struct module_btf *mod_btf; 5507 struct btf *btf; 5508 char name[64]; 5509 __u32 id = 0, len; 5510 int err, fd; 5511 5512 if (obj->btf_modules_loaded) 5513 return 0; 5514 5515 if (obj->gen_loader) 5516 return 0; 5517 5518 /* don't do this again, even if we find no module BTFs */ 5519 obj->btf_modules_loaded = true; 5520 5521 /* kernel too old to support module BTFs */ 5522 if (!kernel_supports(obj, FEAT_MODULE_BTF)) 5523 return 0; 5524 5525 while (true) { 5526 err = bpf_btf_get_next_id(id, &id); 5527 if (err && errno == ENOENT) 5528 return 0; 5529 if (err && errno == EPERM) { 5530 pr_debug("skipping module BTFs loading, missing privileges\n"); 5531 return 0; 5532 } 5533 if (err) { 5534 err = -errno; 5535 pr_warn("failed to iterate BTF objects: %d\n", err); 5536 return err; 5537 } 5538 5539 fd = bpf_btf_get_fd_by_id(id); 5540 if (fd < 0) { 5541 if (errno == ENOENT) 5542 continue; /* expected race: BTF was unloaded */ 5543 err = -errno; 5544 pr_warn("failed to get BTF object #%d FD: %d\n", id, err); 5545 return err; 5546 } 5547 5548 len = sizeof(info); 5549 memset(&info, 0, sizeof(info)); 5550 info.name = ptr_to_u64(name); 5551 info.name_len = sizeof(name); 5552 5553 err = bpf_btf_get_info_by_fd(fd, &info, &len); 5554 if (err) { 5555 err = -errno; 5556 pr_warn("failed to get BTF object #%d info: %d\n", id, err); 5557 goto err_out; 5558 } 5559 5560 /* ignore non-module BTFs */ 5561 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) { 5562 close(fd); 5563 continue; 5564 } 5565 5566 btf = btf_get_from_fd(fd, obj->btf_vmlinux); 5567 err = libbpf_get_error(btf); 5568 if (err) { 5569 pr_warn("failed to load module [%s]'s BTF object #%d: %d\n", 5570 name, id, err); 5571 goto err_out; 5572 } 5573 5574 err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap, 5575 sizeof(*obj->btf_modules), obj->btf_module_cnt + 1); 5576 if (err) 5577 goto err_out; 5578 5579 mod_btf = &obj->btf_modules[obj->btf_module_cnt++]; 5580 5581 mod_btf->btf = btf; 5582 mod_btf->id = id; 5583 mod_btf->fd = fd; 5584 mod_btf->name = strdup(name); 5585 if (!mod_btf->name) { 5586 err = -ENOMEM; 5587 goto err_out; 5588 } 5589 continue; 5590 5591 err_out: 5592 close(fd); 5593 return err; 5594 } 5595 5596 return 0; 5597 } 5598 5599 static struct bpf_core_cand_list * 5600 bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id) 5601 { 5602 struct bpf_core_cand local_cand = {}; 5603 struct bpf_core_cand_list *cands; 5604 const struct btf *main_btf; 5605 const struct btf_type *local_t; 5606 const char *local_name; 5607 size_t local_essent_len; 5608 int err, i; 5609 5610 local_cand.btf = local_btf; 5611 local_cand.id = local_type_id; 5612 local_t = btf__type_by_id(local_btf, local_type_id); 5613 if (!local_t) 5614 return ERR_PTR(-EINVAL); 5615 5616 local_name = btf__name_by_offset(local_btf, local_t->name_off); 5617 if (str_is_empty(local_name)) 5618 return ERR_PTR(-EINVAL); 5619 local_essent_len = bpf_core_essential_name_len(local_name); 5620 5621 cands = calloc(1, sizeof(*cands)); 5622 if (!cands) 5623 return ERR_PTR(-ENOMEM); 5624 5625 /* Attempt to find target candidates in vmlinux BTF first */ 5626 main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux; 5627 err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands); 5628 if (err) 5629 goto err_out; 5630 5631 /* if vmlinux BTF has any candidate, don't got for module BTFs */ 5632 if (cands->len) 5633 return cands; 5634 5635 /* if vmlinux BTF was overridden, don't attempt to load module BTFs */ 5636 if (obj->btf_vmlinux_override) 5637 return cands; 5638 5639 /* now look through module BTFs, trying to still find candidates */ 5640 err = load_module_btfs(obj); 5641 if (err) 5642 goto err_out; 5643 5644 for (i = 0; i < obj->btf_module_cnt; i++) { 5645 err = bpf_core_add_cands(&local_cand, local_essent_len, 5646 obj->btf_modules[i].btf, 5647 obj->btf_modules[i].name, 5648 btf__type_cnt(obj->btf_vmlinux), 5649 cands); 5650 if (err) 5651 goto err_out; 5652 } 5653 5654 return cands; 5655 err_out: 5656 bpf_core_free_cands(cands); 5657 return ERR_PTR(err); 5658 } 5659 5660 /* Check local and target types for compatibility. This check is used for 5661 * type-based CO-RE relocations and follow slightly different rules than 5662 * field-based relocations. This function assumes that root types were already 5663 * checked for name match. Beyond that initial root-level name check, names 5664 * are completely ignored. Compatibility rules are as follows: 5665 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but 5666 * kind should match for local and target types (i.e., STRUCT is not 5667 * compatible with UNION); 5668 * - for ENUMs, the size is ignored; 5669 * - for INT, size and signedness are ignored; 5670 * - for ARRAY, dimensionality is ignored, element types are checked for 5671 * compatibility recursively; 5672 * - CONST/VOLATILE/RESTRICT modifiers are ignored; 5673 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; 5674 * - FUNC_PROTOs are compatible if they have compatible signature: same 5675 * number of input args and compatible return and argument types. 5676 * These rules are not set in stone and probably will be adjusted as we get 5677 * more experience with using BPF CO-RE relocations. 5678 */ 5679 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, 5680 const struct btf *targ_btf, __u32 targ_id) 5681 { 5682 return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 32); 5683 } 5684 5685 int bpf_core_types_match(const struct btf *local_btf, __u32 local_id, 5686 const struct btf *targ_btf, __u32 targ_id) 5687 { 5688 return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32); 5689 } 5690 5691 static size_t bpf_core_hash_fn(const long key, void *ctx) 5692 { 5693 return key; 5694 } 5695 5696 static bool bpf_core_equal_fn(const long k1, const long k2, void *ctx) 5697 { 5698 return k1 == k2; 5699 } 5700 5701 static int record_relo_core(struct bpf_program *prog, 5702 const struct bpf_core_relo *core_relo, int insn_idx) 5703 { 5704 struct reloc_desc *relos, *relo; 5705 5706 relos = libbpf_reallocarray(prog->reloc_desc, 5707 prog->nr_reloc + 1, sizeof(*relos)); 5708 if (!relos) 5709 return -ENOMEM; 5710 relo = &relos[prog->nr_reloc]; 5711 relo->type = RELO_CORE; 5712 relo->insn_idx = insn_idx; 5713 relo->core_relo = core_relo; 5714 prog->reloc_desc = relos; 5715 prog->nr_reloc++; 5716 return 0; 5717 } 5718 5719 static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx) 5720 { 5721 struct reloc_desc *relo; 5722 int i; 5723 5724 for (i = 0; i < prog->nr_reloc; i++) { 5725 relo = &prog->reloc_desc[i]; 5726 if (relo->type != RELO_CORE || relo->insn_idx != insn_idx) 5727 continue; 5728 5729 return relo->core_relo; 5730 } 5731 5732 return NULL; 5733 } 5734 5735 static int bpf_core_resolve_relo(struct bpf_program *prog, 5736 const struct bpf_core_relo *relo, 5737 int relo_idx, 5738 const struct btf *local_btf, 5739 struct hashmap *cand_cache, 5740 struct bpf_core_relo_res *targ_res) 5741 { 5742 struct bpf_core_spec specs_scratch[3] = {}; 5743 struct bpf_core_cand_list *cands = NULL; 5744 const char *prog_name = prog->name; 5745 const struct btf_type *local_type; 5746 const char *local_name; 5747 __u32 local_id = relo->type_id; 5748 int err; 5749 5750 local_type = btf__type_by_id(local_btf, local_id); 5751 if (!local_type) 5752 return -EINVAL; 5753 5754 local_name = btf__name_by_offset(local_btf, local_type->name_off); 5755 if (!local_name) 5756 return -EINVAL; 5757 5758 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL && 5759 !hashmap__find(cand_cache, local_id, &cands)) { 5760 cands = bpf_core_find_cands(prog->obj, local_btf, local_id); 5761 if (IS_ERR(cands)) { 5762 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n", 5763 prog_name, relo_idx, local_id, btf_kind_str(local_type), 5764 local_name, PTR_ERR(cands)); 5765 return PTR_ERR(cands); 5766 } 5767 err = hashmap__set(cand_cache, local_id, cands, NULL, NULL); 5768 if (err) { 5769 bpf_core_free_cands(cands); 5770 return err; 5771 } 5772 } 5773 5774 return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch, 5775 targ_res); 5776 } 5777 5778 static int 5779 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) 5780 { 5781 const struct btf_ext_info_sec *sec; 5782 struct bpf_core_relo_res targ_res; 5783 const struct bpf_core_relo *rec; 5784 const struct btf_ext_info *seg; 5785 struct hashmap_entry *entry; 5786 struct hashmap *cand_cache = NULL; 5787 struct bpf_program *prog; 5788 struct bpf_insn *insn; 5789 const char *sec_name; 5790 int i, err = 0, insn_idx, sec_idx, sec_num; 5791 5792 if (obj->btf_ext->core_relo_info.len == 0) 5793 return 0; 5794 5795 if (targ_btf_path) { 5796 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL); 5797 err = libbpf_get_error(obj->btf_vmlinux_override); 5798 if (err) { 5799 pr_warn("failed to parse target BTF: %d\n", err); 5800 return err; 5801 } 5802 } 5803 5804 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL); 5805 if (IS_ERR(cand_cache)) { 5806 err = PTR_ERR(cand_cache); 5807 goto out; 5808 } 5809 5810 seg = &obj->btf_ext->core_relo_info; 5811 sec_num = 0; 5812 for_each_btf_ext_sec(seg, sec) { 5813 sec_idx = seg->sec_idxs[sec_num]; 5814 sec_num++; 5815 5816 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); 5817 if (str_is_empty(sec_name)) { 5818 err = -EINVAL; 5819 goto out; 5820 } 5821 5822 pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info); 5823 5824 for_each_btf_ext_rec(seg, sec, i, rec) { 5825 if (rec->insn_off % BPF_INSN_SZ) 5826 return -EINVAL; 5827 insn_idx = rec->insn_off / BPF_INSN_SZ; 5828 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx); 5829 if (!prog) { 5830 /* When __weak subprog is "overridden" by another instance 5831 * of the subprog from a different object file, linker still 5832 * appends all the .BTF.ext info that used to belong to that 5833 * eliminated subprogram. 5834 * This is similar to what x86-64 linker does for relocations. 5835 * So just ignore such relocations just like we ignore 5836 * subprog instructions when discovering subprograms. 5837 */ 5838 pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n", 5839 sec_name, i, insn_idx); 5840 continue; 5841 } 5842 /* no need to apply CO-RE relocation if the program is 5843 * not going to be loaded 5844 */ 5845 if (!prog->autoload) 5846 continue; 5847 5848 /* adjust insn_idx from section frame of reference to the local 5849 * program's frame of reference; (sub-)program code is not yet 5850 * relocated, so it's enough to just subtract in-section offset 5851 */ 5852 insn_idx = insn_idx - prog->sec_insn_off; 5853 if (insn_idx >= prog->insns_cnt) 5854 return -EINVAL; 5855 insn = &prog->insns[insn_idx]; 5856 5857 err = record_relo_core(prog, rec, insn_idx); 5858 if (err) { 5859 pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n", 5860 prog->name, i, err); 5861 goto out; 5862 } 5863 5864 if (prog->obj->gen_loader) 5865 continue; 5866 5867 err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res); 5868 if (err) { 5869 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n", 5870 prog->name, i, err); 5871 goto out; 5872 } 5873 5874 err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res); 5875 if (err) { 5876 pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n", 5877 prog->name, i, insn_idx, err); 5878 goto out; 5879 } 5880 } 5881 } 5882 5883 out: 5884 /* obj->btf_vmlinux and module BTFs are freed after object load */ 5885 btf__free(obj->btf_vmlinux_override); 5886 obj->btf_vmlinux_override = NULL; 5887 5888 if (!IS_ERR_OR_NULL(cand_cache)) { 5889 hashmap__for_each_entry(cand_cache, entry, i) { 5890 bpf_core_free_cands(entry->pvalue); 5891 } 5892 hashmap__free(cand_cache); 5893 } 5894 return err; 5895 } 5896 5897 /* base map load ldimm64 special constant, used also for log fixup logic */ 5898 #define POISON_LDIMM64_MAP_BASE 2001000000 5899 #define POISON_LDIMM64_MAP_PFX "200100" 5900 5901 static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx, 5902 int insn_idx, struct bpf_insn *insn, 5903 int map_idx, const struct bpf_map *map) 5904 { 5905 int i; 5906 5907 pr_debug("prog '%s': relo #%d: poisoning insn #%d that loads map #%d '%s'\n", 5908 prog->name, relo_idx, insn_idx, map_idx, map->name); 5909 5910 /* we turn single ldimm64 into two identical invalid calls */ 5911 for (i = 0; i < 2; i++) { 5912 insn->code = BPF_JMP | BPF_CALL; 5913 insn->dst_reg = 0; 5914 insn->src_reg = 0; 5915 insn->off = 0; 5916 /* if this instruction is reachable (not a dead code), 5917 * verifier will complain with something like: 5918 * invalid func unknown#2001000123 5919 * where lower 123 is map index into obj->maps[] array 5920 */ 5921 insn->imm = POISON_LDIMM64_MAP_BASE + map_idx; 5922 5923 insn++; 5924 } 5925 } 5926 5927 /* unresolved kfunc call special constant, used also for log fixup logic */ 5928 #define POISON_CALL_KFUNC_BASE 2002000000 5929 #define POISON_CALL_KFUNC_PFX "2002" 5930 5931 static void poison_kfunc_call(struct bpf_program *prog, int relo_idx, 5932 int insn_idx, struct bpf_insn *insn, 5933 int ext_idx, const struct extern_desc *ext) 5934 { 5935 pr_debug("prog '%s': relo #%d: poisoning insn #%d that calls kfunc '%s'\n", 5936 prog->name, relo_idx, insn_idx, ext->name); 5937 5938 /* we turn kfunc call into invalid helper call with identifiable constant */ 5939 insn->code = BPF_JMP | BPF_CALL; 5940 insn->dst_reg = 0; 5941 insn->src_reg = 0; 5942 insn->off = 0; 5943 /* if this instruction is reachable (not a dead code), 5944 * verifier will complain with something like: 5945 * invalid func unknown#2001000123 5946 * where lower 123 is extern index into obj->externs[] array 5947 */ 5948 insn->imm = POISON_CALL_KFUNC_BASE + ext_idx; 5949 } 5950 5951 /* Relocate data references within program code: 5952 * - map references; 5953 * - global variable references; 5954 * - extern references. 5955 */ 5956 static int 5957 bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) 5958 { 5959 int i; 5960 5961 for (i = 0; i < prog->nr_reloc; i++) { 5962 struct reloc_desc *relo = &prog->reloc_desc[i]; 5963 struct bpf_insn *insn = &prog->insns[relo->insn_idx]; 5964 const struct bpf_map *map; 5965 struct extern_desc *ext; 5966 5967 switch (relo->type) { 5968 case RELO_LD64: 5969 map = &obj->maps[relo->map_idx]; 5970 if (obj->gen_loader) { 5971 insn[0].src_reg = BPF_PSEUDO_MAP_IDX; 5972 insn[0].imm = relo->map_idx; 5973 } else if (map->autocreate) { 5974 insn[0].src_reg = BPF_PSEUDO_MAP_FD; 5975 insn[0].imm = map->fd; 5976 } else { 5977 poison_map_ldimm64(prog, i, relo->insn_idx, insn, 5978 relo->map_idx, map); 5979 } 5980 break; 5981 case RELO_DATA: 5982 map = &obj->maps[relo->map_idx]; 5983 insn[1].imm = insn[0].imm + relo->sym_off; 5984 if (obj->gen_loader) { 5985 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; 5986 insn[0].imm = relo->map_idx; 5987 } else if (map->autocreate) { 5988 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; 5989 insn[0].imm = map->fd; 5990 } else { 5991 poison_map_ldimm64(prog, i, relo->insn_idx, insn, 5992 relo->map_idx, map); 5993 } 5994 break; 5995 case RELO_EXTERN_LD64: 5996 ext = &obj->externs[relo->ext_idx]; 5997 if (ext->type == EXT_KCFG) { 5998 if (obj->gen_loader) { 5999 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; 6000 insn[0].imm = obj->kconfig_map_idx; 6001 } else { 6002 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; 6003 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; 6004 } 6005 insn[1].imm = ext->kcfg.data_off; 6006 } else /* EXT_KSYM */ { 6007 if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */ 6008 insn[0].src_reg = BPF_PSEUDO_BTF_ID; 6009 insn[0].imm = ext->ksym.kernel_btf_id; 6010 insn[1].imm = ext->ksym.kernel_btf_obj_fd; 6011 } else { /* typeless ksyms or unresolved typed ksyms */ 6012 insn[0].imm = (__u32)ext->ksym.addr; 6013 insn[1].imm = ext->ksym.addr >> 32; 6014 } 6015 } 6016 break; 6017 case RELO_EXTERN_CALL: 6018 ext = &obj->externs[relo->ext_idx]; 6019 insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL; 6020 if (ext->is_set) { 6021 insn[0].imm = ext->ksym.kernel_btf_id; 6022 insn[0].off = ext->ksym.btf_fd_idx; 6023 } else { /* unresolved weak kfunc call */ 6024 poison_kfunc_call(prog, i, relo->insn_idx, insn, 6025 relo->ext_idx, ext); 6026 } 6027 break; 6028 case RELO_SUBPROG_ADDR: 6029 if (insn[0].src_reg != BPF_PSEUDO_FUNC) { 6030 pr_warn("prog '%s': relo #%d: bad insn\n", 6031 prog->name, i); 6032 return -EINVAL; 6033 } 6034 /* handled already */ 6035 break; 6036 case RELO_CALL: 6037 /* handled already */ 6038 break; 6039 case RELO_CORE: 6040 /* will be handled by bpf_program_record_relos() */ 6041 break; 6042 default: 6043 pr_warn("prog '%s': relo #%d: bad relo type %d\n", 6044 prog->name, i, relo->type); 6045 return -EINVAL; 6046 } 6047 } 6048 6049 return 0; 6050 } 6051 6052 static int adjust_prog_btf_ext_info(const struct bpf_object *obj, 6053 const struct bpf_program *prog, 6054 const struct btf_ext_info *ext_info, 6055 void **prog_info, __u32 *prog_rec_cnt, 6056 __u32 *prog_rec_sz) 6057 { 6058 void *copy_start = NULL, *copy_end = NULL; 6059 void *rec, *rec_end, *new_prog_info; 6060 const struct btf_ext_info_sec *sec; 6061 size_t old_sz, new_sz; 6062 int i, sec_num, sec_idx, off_adj; 6063 6064 sec_num = 0; 6065 for_each_btf_ext_sec(ext_info, sec) { 6066 sec_idx = ext_info->sec_idxs[sec_num]; 6067 sec_num++; 6068 if (prog->sec_idx != sec_idx) 6069 continue; 6070 6071 for_each_btf_ext_rec(ext_info, sec, i, rec) { 6072 __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ; 6073 6074 if (insn_off < prog->sec_insn_off) 6075 continue; 6076 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt) 6077 break; 6078 6079 if (!copy_start) 6080 copy_start = rec; 6081 copy_end = rec + ext_info->rec_size; 6082 } 6083 6084 if (!copy_start) 6085 return -ENOENT; 6086 6087 /* append func/line info of a given (sub-)program to the main 6088 * program func/line info 6089 */ 6090 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size; 6091 new_sz = old_sz + (copy_end - copy_start); 6092 new_prog_info = realloc(*prog_info, new_sz); 6093 if (!new_prog_info) 6094 return -ENOMEM; 6095 *prog_info = new_prog_info; 6096 *prog_rec_cnt = new_sz / ext_info->rec_size; 6097 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start); 6098 6099 /* Kernel instruction offsets are in units of 8-byte 6100 * instructions, while .BTF.ext instruction offsets generated 6101 * by Clang are in units of bytes. So convert Clang offsets 6102 * into kernel offsets and adjust offset according to program 6103 * relocated position. 6104 */ 6105 off_adj = prog->sub_insn_off - prog->sec_insn_off; 6106 rec = new_prog_info + old_sz; 6107 rec_end = new_prog_info + new_sz; 6108 for (; rec < rec_end; rec += ext_info->rec_size) { 6109 __u32 *insn_off = rec; 6110 6111 *insn_off = *insn_off / BPF_INSN_SZ + off_adj; 6112 } 6113 *prog_rec_sz = ext_info->rec_size; 6114 return 0; 6115 } 6116 6117 return -ENOENT; 6118 } 6119 6120 static int 6121 reloc_prog_func_and_line_info(const struct bpf_object *obj, 6122 struct bpf_program *main_prog, 6123 const struct bpf_program *prog) 6124 { 6125 int err; 6126 6127 /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't 6128 * supprot func/line info 6129 */ 6130 if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC)) 6131 return 0; 6132 6133 /* only attempt func info relocation if main program's func_info 6134 * relocation was successful 6135 */ 6136 if (main_prog != prog && !main_prog->func_info) 6137 goto line_info; 6138 6139 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info, 6140 &main_prog->func_info, 6141 &main_prog->func_info_cnt, 6142 &main_prog->func_info_rec_size); 6143 if (err) { 6144 if (err != -ENOENT) { 6145 pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n", 6146 prog->name, err); 6147 return err; 6148 } 6149 if (main_prog->func_info) { 6150 /* 6151 * Some info has already been found but has problem 6152 * in the last btf_ext reloc. Must have to error out. 6153 */ 6154 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name); 6155 return err; 6156 } 6157 /* Have problem loading the very first info. Ignore the rest. */ 6158 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n", 6159 prog->name); 6160 } 6161 6162 line_info: 6163 /* don't relocate line info if main program's relocation failed */ 6164 if (main_prog != prog && !main_prog->line_info) 6165 return 0; 6166 6167 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info, 6168 &main_prog->line_info, 6169 &main_prog->line_info_cnt, 6170 &main_prog->line_info_rec_size); 6171 if (err) { 6172 if (err != -ENOENT) { 6173 pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n", 6174 prog->name, err); 6175 return err; 6176 } 6177 if (main_prog->line_info) { 6178 /* 6179 * Some info has already been found but has problem 6180 * in the last btf_ext reloc. Must have to error out. 6181 */ 6182 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name); 6183 return err; 6184 } 6185 /* Have problem loading the very first info. Ignore the rest. */ 6186 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n", 6187 prog->name); 6188 } 6189 return 0; 6190 } 6191 6192 static int cmp_relo_by_insn_idx(const void *key, const void *elem) 6193 { 6194 size_t insn_idx = *(const size_t *)key; 6195 const struct reloc_desc *relo = elem; 6196 6197 if (insn_idx == relo->insn_idx) 6198 return 0; 6199 return insn_idx < relo->insn_idx ? -1 : 1; 6200 } 6201 6202 static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx) 6203 { 6204 if (!prog->nr_reloc) 6205 return NULL; 6206 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc, 6207 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx); 6208 } 6209 6210 static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog) 6211 { 6212 int new_cnt = main_prog->nr_reloc + subprog->nr_reloc; 6213 struct reloc_desc *relos; 6214 int i; 6215 6216 if (main_prog == subprog) 6217 return 0; 6218 relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos)); 6219 /* if new count is zero, reallocarray can return a valid NULL result; 6220 * in this case the previous pointer will be freed, so we *have to* 6221 * reassign old pointer to the new value (even if it's NULL) 6222 */ 6223 if (!relos && new_cnt) 6224 return -ENOMEM; 6225 if (subprog->nr_reloc) 6226 memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc, 6227 sizeof(*relos) * subprog->nr_reloc); 6228 6229 for (i = main_prog->nr_reloc; i < new_cnt; i++) 6230 relos[i].insn_idx += subprog->sub_insn_off; 6231 /* After insn_idx adjustment the 'relos' array is still sorted 6232 * by insn_idx and doesn't break bsearch. 6233 */ 6234 main_prog->reloc_desc = relos; 6235 main_prog->nr_reloc = new_cnt; 6236 return 0; 6237 } 6238 6239 static int 6240 bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog, 6241 struct bpf_program *prog) 6242 { 6243 size_t sub_insn_idx, insn_idx, new_cnt; 6244 struct bpf_program *subprog; 6245 struct bpf_insn *insns, *insn; 6246 struct reloc_desc *relo; 6247 int err; 6248 6249 err = reloc_prog_func_and_line_info(obj, main_prog, prog); 6250 if (err) 6251 return err; 6252 6253 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) { 6254 insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; 6255 if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn)) 6256 continue; 6257 6258 relo = find_prog_insn_relo(prog, insn_idx); 6259 if (relo && relo->type == RELO_EXTERN_CALL) 6260 /* kfunc relocations will be handled later 6261 * in bpf_object__relocate_data() 6262 */ 6263 continue; 6264 if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) { 6265 pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n", 6266 prog->name, insn_idx, relo->type); 6267 return -LIBBPF_ERRNO__RELOC; 6268 } 6269 if (relo) { 6270 /* sub-program instruction index is a combination of 6271 * an offset of a symbol pointed to by relocation and 6272 * call instruction's imm field; for global functions, 6273 * call always has imm = -1, but for static functions 6274 * relocation is against STT_SECTION and insn->imm 6275 * points to a start of a static function 6276 * 6277 * for subprog addr relocation, the relo->sym_off + insn->imm is 6278 * the byte offset in the corresponding section. 6279 */ 6280 if (relo->type == RELO_CALL) 6281 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1; 6282 else 6283 sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ; 6284 } else if (insn_is_pseudo_func(insn)) { 6285 /* 6286 * RELO_SUBPROG_ADDR relo is always emitted even if both 6287 * functions are in the same section, so it shouldn't reach here. 6288 */ 6289 pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n", 6290 prog->name, insn_idx); 6291 return -LIBBPF_ERRNO__RELOC; 6292 } else { 6293 /* if subprogram call is to a static function within 6294 * the same ELF section, there won't be any relocation 6295 * emitted, but it also means there is no additional 6296 * offset necessary, insns->imm is relative to 6297 * instruction's original position within the section 6298 */ 6299 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1; 6300 } 6301 6302 /* we enforce that sub-programs should be in .text section */ 6303 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx); 6304 if (!subprog) { 6305 pr_warn("prog '%s': no .text section found yet sub-program call exists\n", 6306 prog->name); 6307 return -LIBBPF_ERRNO__RELOC; 6308 } 6309 6310 /* if it's the first call instruction calling into this 6311 * subprogram (meaning this subprog hasn't been processed 6312 * yet) within the context of current main program: 6313 * - append it at the end of main program's instructions blog; 6314 * - process is recursively, while current program is put on hold; 6315 * - if that subprogram calls some other not yet processes 6316 * subprogram, same thing will happen recursively until 6317 * there are no more unprocesses subprograms left to append 6318 * and relocate. 6319 */ 6320 if (subprog->sub_insn_off == 0) { 6321 subprog->sub_insn_off = main_prog->insns_cnt; 6322 6323 new_cnt = main_prog->insns_cnt + subprog->insns_cnt; 6324 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns)); 6325 if (!insns) { 6326 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name); 6327 return -ENOMEM; 6328 } 6329 main_prog->insns = insns; 6330 main_prog->insns_cnt = new_cnt; 6331 6332 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns, 6333 subprog->insns_cnt * sizeof(*insns)); 6334 6335 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n", 6336 main_prog->name, subprog->insns_cnt, subprog->name); 6337 6338 /* The subprog insns are now appended. Append its relos too. */ 6339 err = append_subprog_relos(main_prog, subprog); 6340 if (err) 6341 return err; 6342 err = bpf_object__reloc_code(obj, main_prog, subprog); 6343 if (err) 6344 return err; 6345 } 6346 6347 /* main_prog->insns memory could have been re-allocated, so 6348 * calculate pointer again 6349 */ 6350 insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; 6351 /* calculate correct instruction position within current main 6352 * prog; each main prog can have a different set of 6353 * subprograms appended (potentially in different order as 6354 * well), so position of any subprog can be different for 6355 * different main programs 6356 */ 6357 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1; 6358 6359 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n", 6360 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off); 6361 } 6362 6363 return 0; 6364 } 6365 6366 /* 6367 * Relocate sub-program calls. 6368 * 6369 * Algorithm operates as follows. Each entry-point BPF program (referred to as 6370 * main prog) is processed separately. For each subprog (non-entry functions, 6371 * that can be called from either entry progs or other subprogs) gets their 6372 * sub_insn_off reset to zero. This serves as indicator that this subprogram 6373 * hasn't been yet appended and relocated within current main prog. Once its 6374 * relocated, sub_insn_off will point at the position within current main prog 6375 * where given subprog was appended. This will further be used to relocate all 6376 * the call instructions jumping into this subprog. 6377 * 6378 * We start with main program and process all call instructions. If the call 6379 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off 6380 * is zero), subprog instructions are appended at the end of main program's 6381 * instruction array. Then main program is "put on hold" while we recursively 6382 * process newly appended subprogram. If that subprogram calls into another 6383 * subprogram that hasn't been appended, new subprogram is appended again to 6384 * the *main* prog's instructions (subprog's instructions are always left 6385 * untouched, as they need to be in unmodified state for subsequent main progs 6386 * and subprog instructions are always sent only as part of a main prog) and 6387 * the process continues recursively. Once all the subprogs called from a main 6388 * prog or any of its subprogs are appended (and relocated), all their 6389 * positions within finalized instructions array are known, so it's easy to 6390 * rewrite call instructions with correct relative offsets, corresponding to 6391 * desired target subprog. 6392 * 6393 * Its important to realize that some subprogs might not be called from some 6394 * main prog and any of its called/used subprogs. Those will keep their 6395 * subprog->sub_insn_off as zero at all times and won't be appended to current 6396 * main prog and won't be relocated within the context of current main prog. 6397 * They might still be used from other main progs later. 6398 * 6399 * Visually this process can be shown as below. Suppose we have two main 6400 * programs mainA and mainB and BPF object contains three subprogs: subA, 6401 * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and 6402 * subC both call subB: 6403 * 6404 * +--------+ +-------+ 6405 * | v v | 6406 * +--+---+ +--+-+-+ +---+--+ 6407 * | subA | | subB | | subC | 6408 * +--+---+ +------+ +---+--+ 6409 * ^ ^ 6410 * | | 6411 * +---+-------+ +------+----+ 6412 * | mainA | | mainB | 6413 * +-----------+ +-----------+ 6414 * 6415 * We'll start relocating mainA, will find subA, append it and start 6416 * processing sub A recursively: 6417 * 6418 * +-----------+------+ 6419 * | mainA | subA | 6420 * +-----------+------+ 6421 * 6422 * At this point we notice that subB is used from subA, so we append it and 6423 * relocate (there are no further subcalls from subB): 6424 * 6425 * +-----------+------+------+ 6426 * | mainA | subA | subB | 6427 * +-----------+------+------+ 6428 * 6429 * At this point, we relocate subA calls, then go one level up and finish with 6430 * relocatin mainA calls. mainA is done. 6431 * 6432 * For mainB process is similar but results in different order. We start with 6433 * mainB and skip subA and subB, as mainB never calls them (at least 6434 * directly), but we see subC is needed, so we append and start processing it: 6435 * 6436 * +-----------+------+ 6437 * | mainB | subC | 6438 * +-----------+------+ 6439 * Now we see subC needs subB, so we go back to it, append and relocate it: 6440 * 6441 * +-----------+------+------+ 6442 * | mainB | subC | subB | 6443 * +-----------+------+------+ 6444 * 6445 * At this point we unwind recursion, relocate calls in subC, then in mainB. 6446 */ 6447 static int 6448 bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog) 6449 { 6450 struct bpf_program *subprog; 6451 int i, err; 6452 6453 /* mark all subprogs as not relocated (yet) within the context of 6454 * current main program 6455 */ 6456 for (i = 0; i < obj->nr_programs; i++) { 6457 subprog = &obj->programs[i]; 6458 if (!prog_is_subprog(obj, subprog)) 6459 continue; 6460 6461 subprog->sub_insn_off = 0; 6462 } 6463 6464 err = bpf_object__reloc_code(obj, prog, prog); 6465 if (err) 6466 return err; 6467 6468 return 0; 6469 } 6470 6471 static void 6472 bpf_object__free_relocs(struct bpf_object *obj) 6473 { 6474 struct bpf_program *prog; 6475 int i; 6476 6477 /* free up relocation descriptors */ 6478 for (i = 0; i < obj->nr_programs; i++) { 6479 prog = &obj->programs[i]; 6480 zfree(&prog->reloc_desc); 6481 prog->nr_reloc = 0; 6482 } 6483 } 6484 6485 static int cmp_relocs(const void *_a, const void *_b) 6486 { 6487 const struct reloc_desc *a = _a; 6488 const struct reloc_desc *b = _b; 6489 6490 if (a->insn_idx != b->insn_idx) 6491 return a->insn_idx < b->insn_idx ? -1 : 1; 6492 6493 /* no two relocations should have the same insn_idx, but ... */ 6494 if (a->type != b->type) 6495 return a->type < b->type ? -1 : 1; 6496 6497 return 0; 6498 } 6499 6500 static void bpf_object__sort_relos(struct bpf_object *obj) 6501 { 6502 int i; 6503 6504 for (i = 0; i < obj->nr_programs; i++) { 6505 struct bpf_program *p = &obj->programs[i]; 6506 6507 if (!p->nr_reloc) 6508 continue; 6509 6510 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs); 6511 } 6512 } 6513 6514 static int 6515 bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) 6516 { 6517 struct bpf_program *prog; 6518 size_t i, j; 6519 int err; 6520 6521 if (obj->btf_ext) { 6522 err = bpf_object__relocate_core(obj, targ_btf_path); 6523 if (err) { 6524 pr_warn("failed to perform CO-RE relocations: %d\n", 6525 err); 6526 return err; 6527 } 6528 bpf_object__sort_relos(obj); 6529 } 6530 6531 /* Before relocating calls pre-process relocations and mark 6532 * few ld_imm64 instructions that points to subprogs. 6533 * Otherwise bpf_object__reloc_code() later would have to consider 6534 * all ld_imm64 insns as relocation candidates. That would 6535 * reduce relocation speed, since amount of find_prog_insn_relo() 6536 * would increase and most of them will fail to find a relo. 6537 */ 6538 for (i = 0; i < obj->nr_programs; i++) { 6539 prog = &obj->programs[i]; 6540 for (j = 0; j < prog->nr_reloc; j++) { 6541 struct reloc_desc *relo = &prog->reloc_desc[j]; 6542 struct bpf_insn *insn = &prog->insns[relo->insn_idx]; 6543 6544 /* mark the insn, so it's recognized by insn_is_pseudo_func() */ 6545 if (relo->type == RELO_SUBPROG_ADDR) 6546 insn[0].src_reg = BPF_PSEUDO_FUNC; 6547 } 6548 } 6549 6550 /* relocate subprogram calls and append used subprograms to main 6551 * programs; each copy of subprogram code needs to be relocated 6552 * differently for each main program, because its code location might 6553 * have changed. 6554 * Append subprog relos to main programs to allow data relos to be 6555 * processed after text is completely relocated. 6556 */ 6557 for (i = 0; i < obj->nr_programs; i++) { 6558 prog = &obj->programs[i]; 6559 /* sub-program's sub-calls are relocated within the context of 6560 * its main program only 6561 */ 6562 if (prog_is_subprog(obj, prog)) 6563 continue; 6564 if (!prog->autoload) 6565 continue; 6566 6567 err = bpf_object__relocate_calls(obj, prog); 6568 if (err) { 6569 pr_warn("prog '%s': failed to relocate calls: %d\n", 6570 prog->name, err); 6571 return err; 6572 } 6573 } 6574 /* Process data relos for main programs */ 6575 for (i = 0; i < obj->nr_programs; i++) { 6576 prog = &obj->programs[i]; 6577 if (prog_is_subprog(obj, prog)) 6578 continue; 6579 if (!prog->autoload) 6580 continue; 6581 err = bpf_object__relocate_data(obj, prog); 6582 if (err) { 6583 pr_warn("prog '%s': failed to relocate data references: %d\n", 6584 prog->name, err); 6585 return err; 6586 } 6587 } 6588 6589 return 0; 6590 } 6591 6592 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, 6593 Elf64_Shdr *shdr, Elf_Data *data); 6594 6595 static int bpf_object__collect_map_relos(struct bpf_object *obj, 6596 Elf64_Shdr *shdr, Elf_Data *data) 6597 { 6598 const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *); 6599 int i, j, nrels, new_sz; 6600 const struct btf_var_secinfo *vi = NULL; 6601 const struct btf_type *sec, *var, *def; 6602 struct bpf_map *map = NULL, *targ_map = NULL; 6603 struct bpf_program *targ_prog = NULL; 6604 bool is_prog_array, is_map_in_map; 6605 const struct btf_member *member; 6606 const char *name, *mname, *type; 6607 unsigned int moff; 6608 Elf64_Sym *sym; 6609 Elf64_Rel *rel; 6610 void *tmp; 6611 6612 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf) 6613 return -EINVAL; 6614 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id); 6615 if (!sec) 6616 return -EINVAL; 6617 6618 nrels = shdr->sh_size / shdr->sh_entsize; 6619 for (i = 0; i < nrels; i++) { 6620 rel = elf_rel_by_idx(data, i); 6621 if (!rel) { 6622 pr_warn(".maps relo #%d: failed to get ELF relo\n", i); 6623 return -LIBBPF_ERRNO__FORMAT; 6624 } 6625 6626 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); 6627 if (!sym) { 6628 pr_warn(".maps relo #%d: symbol %zx not found\n", 6629 i, (size_t)ELF64_R_SYM(rel->r_info)); 6630 return -LIBBPF_ERRNO__FORMAT; 6631 } 6632 name = elf_sym_str(obj, sym->st_name) ?: "<?>"; 6633 6634 pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n", 6635 i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value, 6636 (size_t)rel->r_offset, sym->st_name, name); 6637 6638 for (j = 0; j < obj->nr_maps; j++) { 6639 map = &obj->maps[j]; 6640 if (map->sec_idx != obj->efile.btf_maps_shndx) 6641 continue; 6642 6643 vi = btf_var_secinfos(sec) + map->btf_var_idx; 6644 if (vi->offset <= rel->r_offset && 6645 rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size) 6646 break; 6647 } 6648 if (j == obj->nr_maps) { 6649 pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n", 6650 i, name, (size_t)rel->r_offset); 6651 return -EINVAL; 6652 } 6653 6654 is_map_in_map = bpf_map_type__is_map_in_map(map->def.type); 6655 is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY; 6656 type = is_map_in_map ? "map" : "prog"; 6657 if (is_map_in_map) { 6658 if (sym->st_shndx != obj->efile.btf_maps_shndx) { 6659 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", 6660 i, name); 6661 return -LIBBPF_ERRNO__RELOC; 6662 } 6663 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS && 6664 map->def.key_size != sizeof(int)) { 6665 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n", 6666 i, map->name, sizeof(int)); 6667 return -EINVAL; 6668 } 6669 targ_map = bpf_object__find_map_by_name(obj, name); 6670 if (!targ_map) { 6671 pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n", 6672 i, name); 6673 return -ESRCH; 6674 } 6675 } else if (is_prog_array) { 6676 targ_prog = bpf_object__find_program_by_name(obj, name); 6677 if (!targ_prog) { 6678 pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n", 6679 i, name); 6680 return -ESRCH; 6681 } 6682 if (targ_prog->sec_idx != sym->st_shndx || 6683 targ_prog->sec_insn_off * 8 != sym->st_value || 6684 prog_is_subprog(obj, targ_prog)) { 6685 pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n", 6686 i, name); 6687 return -LIBBPF_ERRNO__RELOC; 6688 } 6689 } else { 6690 return -EINVAL; 6691 } 6692 6693 var = btf__type_by_id(obj->btf, vi->type); 6694 def = skip_mods_and_typedefs(obj->btf, var->type, NULL); 6695 if (btf_vlen(def) == 0) 6696 return -EINVAL; 6697 member = btf_members(def) + btf_vlen(def) - 1; 6698 mname = btf__name_by_offset(obj->btf, member->name_off); 6699 if (strcmp(mname, "values")) 6700 return -EINVAL; 6701 6702 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8; 6703 if (rel->r_offset - vi->offset < moff) 6704 return -EINVAL; 6705 6706 moff = rel->r_offset - vi->offset - moff; 6707 /* here we use BPF pointer size, which is always 64 bit, as we 6708 * are parsing ELF that was built for BPF target 6709 */ 6710 if (moff % bpf_ptr_sz) 6711 return -EINVAL; 6712 moff /= bpf_ptr_sz; 6713 if (moff >= map->init_slots_sz) { 6714 new_sz = moff + 1; 6715 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz); 6716 if (!tmp) 6717 return -ENOMEM; 6718 map->init_slots = tmp; 6719 memset(map->init_slots + map->init_slots_sz, 0, 6720 (new_sz - map->init_slots_sz) * host_ptr_sz); 6721 map->init_slots_sz = new_sz; 6722 } 6723 map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog; 6724 6725 pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n", 6726 i, map->name, moff, type, name); 6727 } 6728 6729 return 0; 6730 } 6731 6732 static int bpf_object__collect_relos(struct bpf_object *obj) 6733 { 6734 int i, err; 6735 6736 for (i = 0; i < obj->efile.sec_cnt; i++) { 6737 struct elf_sec_desc *sec_desc = &obj->efile.secs[i]; 6738 Elf64_Shdr *shdr; 6739 Elf_Data *data; 6740 int idx; 6741 6742 if (sec_desc->sec_type != SEC_RELO) 6743 continue; 6744 6745 shdr = sec_desc->shdr; 6746 data = sec_desc->data; 6747 idx = shdr->sh_info; 6748 6749 if (shdr->sh_type != SHT_REL) { 6750 pr_warn("internal error at %d\n", __LINE__); 6751 return -LIBBPF_ERRNO__INTERNAL; 6752 } 6753 6754 if (idx == obj->efile.st_ops_shndx || idx == obj->efile.st_ops_link_shndx) 6755 err = bpf_object__collect_st_ops_relos(obj, shdr, data); 6756 else if (idx == obj->efile.btf_maps_shndx) 6757 err = bpf_object__collect_map_relos(obj, shdr, data); 6758 else 6759 err = bpf_object__collect_prog_relos(obj, shdr, data); 6760 if (err) 6761 return err; 6762 } 6763 6764 bpf_object__sort_relos(obj); 6765 return 0; 6766 } 6767 6768 static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id) 6769 { 6770 if (BPF_CLASS(insn->code) == BPF_JMP && 6771 BPF_OP(insn->code) == BPF_CALL && 6772 BPF_SRC(insn->code) == BPF_K && 6773 insn->src_reg == 0 && 6774 insn->dst_reg == 0) { 6775 *func_id = insn->imm; 6776 return true; 6777 } 6778 return false; 6779 } 6780 6781 static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog) 6782 { 6783 struct bpf_insn *insn = prog->insns; 6784 enum bpf_func_id func_id; 6785 int i; 6786 6787 if (obj->gen_loader) 6788 return 0; 6789 6790 for (i = 0; i < prog->insns_cnt; i++, insn++) { 6791 if (!insn_is_helper_call(insn, &func_id)) 6792 continue; 6793 6794 /* on kernels that don't yet support 6795 * bpf_probe_read_{kernel,user}[_str] helpers, fall back 6796 * to bpf_probe_read() which works well for old kernels 6797 */ 6798 switch (func_id) { 6799 case BPF_FUNC_probe_read_kernel: 6800 case BPF_FUNC_probe_read_user: 6801 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN)) 6802 insn->imm = BPF_FUNC_probe_read; 6803 break; 6804 case BPF_FUNC_probe_read_kernel_str: 6805 case BPF_FUNC_probe_read_user_str: 6806 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN)) 6807 insn->imm = BPF_FUNC_probe_read_str; 6808 break; 6809 default: 6810 break; 6811 } 6812 } 6813 return 0; 6814 } 6815 6816 static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name, 6817 int *btf_obj_fd, int *btf_type_id); 6818 6819 /* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */ 6820 static int libbpf_prepare_prog_load(struct bpf_program *prog, 6821 struct bpf_prog_load_opts *opts, long cookie) 6822 { 6823 enum sec_def_flags def = cookie; 6824 6825 /* old kernels might not support specifying expected_attach_type */ 6826 if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE)) 6827 opts->expected_attach_type = 0; 6828 6829 if (def & SEC_SLEEPABLE) 6830 opts->prog_flags |= BPF_F_SLEEPABLE; 6831 6832 if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS)) 6833 opts->prog_flags |= BPF_F_XDP_HAS_FRAGS; 6834 6835 /* special check for usdt to use uprobe_multi link */ 6836 if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK)) 6837 prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI; 6838 6839 if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) { 6840 int btf_obj_fd = 0, btf_type_id = 0, err; 6841 const char *attach_name; 6842 6843 attach_name = strchr(prog->sec_name, '/'); 6844 if (!attach_name) { 6845 /* if BPF program is annotated with just SEC("fentry") 6846 * (or similar) without declaratively specifying 6847 * target, then it is expected that target will be 6848 * specified with bpf_program__set_attach_target() at 6849 * runtime before BPF object load step. If not, then 6850 * there is nothing to load into the kernel as BPF 6851 * verifier won't be able to validate BPF program 6852 * correctness anyways. 6853 */ 6854 pr_warn("prog '%s': no BTF-based attach target is specified, use bpf_program__set_attach_target()\n", 6855 prog->name); 6856 return -EINVAL; 6857 } 6858 attach_name++; /* skip over / */ 6859 6860 err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id); 6861 if (err) 6862 return err; 6863 6864 /* cache resolved BTF FD and BTF type ID in the prog */ 6865 prog->attach_btf_obj_fd = btf_obj_fd; 6866 prog->attach_btf_id = btf_type_id; 6867 6868 /* but by now libbpf common logic is not utilizing 6869 * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because 6870 * this callback is called after opts were populated by 6871 * libbpf, so this callback has to update opts explicitly here 6872 */ 6873 opts->attach_btf_obj_fd = btf_obj_fd; 6874 opts->attach_btf_id = btf_type_id; 6875 } 6876 return 0; 6877 } 6878 6879 static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz); 6880 6881 static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog, 6882 struct bpf_insn *insns, int insns_cnt, 6883 const char *license, __u32 kern_version, int *prog_fd) 6884 { 6885 LIBBPF_OPTS(bpf_prog_load_opts, load_attr); 6886 const char *prog_name = NULL; 6887 char *cp, errmsg[STRERR_BUFSIZE]; 6888 size_t log_buf_size = 0; 6889 char *log_buf = NULL, *tmp; 6890 int btf_fd, ret, err; 6891 bool own_log_buf = true; 6892 __u32 log_level = prog->log_level; 6893 6894 if (prog->type == BPF_PROG_TYPE_UNSPEC) { 6895 /* 6896 * The program type must be set. Most likely we couldn't find a proper 6897 * section definition at load time, and thus we didn't infer the type. 6898 */ 6899 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n", 6900 prog->name, prog->sec_name); 6901 return -EINVAL; 6902 } 6903 6904 if (!insns || !insns_cnt) 6905 return -EINVAL; 6906 6907 if (kernel_supports(obj, FEAT_PROG_NAME)) 6908 prog_name = prog->name; 6909 load_attr.attach_prog_fd = prog->attach_prog_fd; 6910 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd; 6911 load_attr.attach_btf_id = prog->attach_btf_id; 6912 load_attr.kern_version = kern_version; 6913 load_attr.prog_ifindex = prog->prog_ifindex; 6914 6915 /* specify func_info/line_info only if kernel supports them */ 6916 btf_fd = bpf_object__btf_fd(obj); 6917 if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) { 6918 load_attr.prog_btf_fd = btf_fd; 6919 load_attr.func_info = prog->func_info; 6920 load_attr.func_info_rec_size = prog->func_info_rec_size; 6921 load_attr.func_info_cnt = prog->func_info_cnt; 6922 load_attr.line_info = prog->line_info; 6923 load_attr.line_info_rec_size = prog->line_info_rec_size; 6924 load_attr.line_info_cnt = prog->line_info_cnt; 6925 } 6926 load_attr.log_level = log_level; 6927 load_attr.prog_flags = prog->prog_flags; 6928 load_attr.fd_array = obj->fd_array; 6929 6930 /* adjust load_attr if sec_def provides custom preload callback */ 6931 if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) { 6932 err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie); 6933 if (err < 0) { 6934 pr_warn("prog '%s': failed to prepare load attributes: %d\n", 6935 prog->name, err); 6936 return err; 6937 } 6938 insns = prog->insns; 6939 insns_cnt = prog->insns_cnt; 6940 } 6941 6942 /* allow prog_prepare_load_fn to change expected_attach_type */ 6943 load_attr.expected_attach_type = prog->expected_attach_type; 6944 6945 if (obj->gen_loader) { 6946 bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name, 6947 license, insns, insns_cnt, &load_attr, 6948 prog - obj->programs); 6949 *prog_fd = -1; 6950 return 0; 6951 } 6952 6953 retry_load: 6954 /* if log_level is zero, we don't request logs initially even if 6955 * custom log_buf is specified; if the program load fails, then we'll 6956 * bump log_level to 1 and use either custom log_buf or we'll allocate 6957 * our own and retry the load to get details on what failed 6958 */ 6959 if (log_level) { 6960 if (prog->log_buf) { 6961 log_buf = prog->log_buf; 6962 log_buf_size = prog->log_size; 6963 own_log_buf = false; 6964 } else if (obj->log_buf) { 6965 log_buf = obj->log_buf; 6966 log_buf_size = obj->log_size; 6967 own_log_buf = false; 6968 } else { 6969 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2); 6970 tmp = realloc(log_buf, log_buf_size); 6971 if (!tmp) { 6972 ret = -ENOMEM; 6973 goto out; 6974 } 6975 log_buf = tmp; 6976 log_buf[0] = '\0'; 6977 own_log_buf = true; 6978 } 6979 } 6980 6981 load_attr.log_buf = log_buf; 6982 load_attr.log_size = log_buf_size; 6983 load_attr.log_level = log_level; 6984 6985 ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr); 6986 if (ret >= 0) { 6987 if (log_level && own_log_buf) { 6988 pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n", 6989 prog->name, log_buf); 6990 } 6991 6992 if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) { 6993 struct bpf_map *map; 6994 int i; 6995 6996 for (i = 0; i < obj->nr_maps; i++) { 6997 map = &prog->obj->maps[i]; 6998 if (map->libbpf_type != LIBBPF_MAP_RODATA) 6999 continue; 7000 7001 if (bpf_prog_bind_map(ret, bpf_map__fd(map), NULL)) { 7002 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 7003 pr_warn("prog '%s': failed to bind map '%s': %s\n", 7004 prog->name, map->real_name, cp); 7005 /* Don't fail hard if can't bind rodata. */ 7006 } 7007 } 7008 } 7009 7010 *prog_fd = ret; 7011 ret = 0; 7012 goto out; 7013 } 7014 7015 if (log_level == 0) { 7016 log_level = 1; 7017 goto retry_load; 7018 } 7019 /* On ENOSPC, increase log buffer size and retry, unless custom 7020 * log_buf is specified. 7021 * Be careful to not overflow u32, though. Kernel's log buf size limit 7022 * isn't part of UAPI so it can always be bumped to full 4GB. So don't 7023 * multiply by 2 unless we are sure we'll fit within 32 bits. 7024 * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2). 7025 */ 7026 if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2) 7027 goto retry_load; 7028 7029 ret = -errno; 7030 7031 /* post-process verifier log to improve error descriptions */ 7032 fixup_verifier_log(prog, log_buf, log_buf_size); 7033 7034 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 7035 pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp); 7036 pr_perm_msg(ret); 7037 7038 if (own_log_buf && log_buf && log_buf[0] != '\0') { 7039 pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n", 7040 prog->name, log_buf); 7041 } 7042 7043 out: 7044 if (own_log_buf) 7045 free(log_buf); 7046 return ret; 7047 } 7048 7049 static char *find_prev_line(char *buf, char *cur) 7050 { 7051 char *p; 7052 7053 if (cur == buf) /* end of a log buf */ 7054 return NULL; 7055 7056 p = cur - 1; 7057 while (p - 1 >= buf && *(p - 1) != '\n') 7058 p--; 7059 7060 return p; 7061 } 7062 7063 static void patch_log(char *buf, size_t buf_sz, size_t log_sz, 7064 char *orig, size_t orig_sz, const char *patch) 7065 { 7066 /* size of the remaining log content to the right from the to-be-replaced part */ 7067 size_t rem_sz = (buf + log_sz) - (orig + orig_sz); 7068 size_t patch_sz = strlen(patch); 7069 7070 if (patch_sz != orig_sz) { 7071 /* If patch line(s) are longer than original piece of verifier log, 7072 * shift log contents by (patch_sz - orig_sz) bytes to the right 7073 * starting from after to-be-replaced part of the log. 7074 * 7075 * If patch line(s) are shorter than original piece of verifier log, 7076 * shift log contents by (orig_sz - patch_sz) bytes to the left 7077 * starting from after to-be-replaced part of the log 7078 * 7079 * We need to be careful about not overflowing available 7080 * buf_sz capacity. If that's the case, we'll truncate the end 7081 * of the original log, as necessary. 7082 */ 7083 if (patch_sz > orig_sz) { 7084 if (orig + patch_sz >= buf + buf_sz) { 7085 /* patch is big enough to cover remaining space completely */ 7086 patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1; 7087 rem_sz = 0; 7088 } else if (patch_sz - orig_sz > buf_sz - log_sz) { 7089 /* patch causes part of remaining log to be truncated */ 7090 rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz); 7091 } 7092 } 7093 /* shift remaining log to the right by calculated amount */ 7094 memmove(orig + patch_sz, orig + orig_sz, rem_sz); 7095 } 7096 7097 memcpy(orig, patch, patch_sz); 7098 } 7099 7100 static void fixup_log_failed_core_relo(struct bpf_program *prog, 7101 char *buf, size_t buf_sz, size_t log_sz, 7102 char *line1, char *line2, char *line3) 7103 { 7104 /* Expected log for failed and not properly guarded CO-RE relocation: 7105 * line1 -> 123: (85) call unknown#195896080 7106 * line2 -> invalid func unknown#195896080 7107 * line3 -> <anything else or end of buffer> 7108 * 7109 * "123" is the index of the instruction that was poisoned. We extract 7110 * instruction index to find corresponding CO-RE relocation and 7111 * replace this part of the log with more relevant information about 7112 * failed CO-RE relocation. 7113 */ 7114 const struct bpf_core_relo *relo; 7115 struct bpf_core_spec spec; 7116 char patch[512], spec_buf[256]; 7117 int insn_idx, err, spec_len; 7118 7119 if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1) 7120 return; 7121 7122 relo = find_relo_core(prog, insn_idx); 7123 if (!relo) 7124 return; 7125 7126 err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec); 7127 if (err) 7128 return; 7129 7130 spec_len = bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec); 7131 snprintf(patch, sizeof(patch), 7132 "%d: <invalid CO-RE relocation>\n" 7133 "failed to resolve CO-RE relocation %s%s\n", 7134 insn_idx, spec_buf, spec_len >= sizeof(spec_buf) ? "..." : ""); 7135 7136 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); 7137 } 7138 7139 static void fixup_log_missing_map_load(struct bpf_program *prog, 7140 char *buf, size_t buf_sz, size_t log_sz, 7141 char *line1, char *line2, char *line3) 7142 { 7143 /* Expected log for failed and not properly guarded map reference: 7144 * line1 -> 123: (85) call unknown#2001000345 7145 * line2 -> invalid func unknown#2001000345 7146 * line3 -> <anything else or end of buffer> 7147 * 7148 * "123" is the index of the instruction that was poisoned. 7149 * "345" in "2001000345" is a map index in obj->maps to fetch map name. 7150 */ 7151 struct bpf_object *obj = prog->obj; 7152 const struct bpf_map *map; 7153 int insn_idx, map_idx; 7154 char patch[128]; 7155 7156 if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2) 7157 return; 7158 7159 map_idx -= POISON_LDIMM64_MAP_BASE; 7160 if (map_idx < 0 || map_idx >= obj->nr_maps) 7161 return; 7162 map = &obj->maps[map_idx]; 7163 7164 snprintf(patch, sizeof(patch), 7165 "%d: <invalid BPF map reference>\n" 7166 "BPF map '%s' is referenced but wasn't created\n", 7167 insn_idx, map->name); 7168 7169 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); 7170 } 7171 7172 static void fixup_log_missing_kfunc_call(struct bpf_program *prog, 7173 char *buf, size_t buf_sz, size_t log_sz, 7174 char *line1, char *line2, char *line3) 7175 { 7176 /* Expected log for failed and not properly guarded kfunc call: 7177 * line1 -> 123: (85) call unknown#2002000345 7178 * line2 -> invalid func unknown#2002000345 7179 * line3 -> <anything else or end of buffer> 7180 * 7181 * "123" is the index of the instruction that was poisoned. 7182 * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name. 7183 */ 7184 struct bpf_object *obj = prog->obj; 7185 const struct extern_desc *ext; 7186 int insn_idx, ext_idx; 7187 char patch[128]; 7188 7189 if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &ext_idx) != 2) 7190 return; 7191 7192 ext_idx -= POISON_CALL_KFUNC_BASE; 7193 if (ext_idx < 0 || ext_idx >= obj->nr_extern) 7194 return; 7195 ext = &obj->externs[ext_idx]; 7196 7197 snprintf(patch, sizeof(patch), 7198 "%d: <invalid kfunc call>\n" 7199 "kfunc '%s' is referenced but wasn't resolved\n", 7200 insn_idx, ext->name); 7201 7202 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); 7203 } 7204 7205 static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz) 7206 { 7207 /* look for familiar error patterns in last N lines of the log */ 7208 const size_t max_last_line_cnt = 10; 7209 char *prev_line, *cur_line, *next_line; 7210 size_t log_sz; 7211 int i; 7212 7213 if (!buf) 7214 return; 7215 7216 log_sz = strlen(buf) + 1; 7217 next_line = buf + log_sz - 1; 7218 7219 for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) { 7220 cur_line = find_prev_line(buf, next_line); 7221 if (!cur_line) 7222 return; 7223 7224 if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) { 7225 prev_line = find_prev_line(buf, cur_line); 7226 if (!prev_line) 7227 continue; 7228 7229 /* failed CO-RE relocation case */ 7230 fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz, 7231 prev_line, cur_line, next_line); 7232 return; 7233 } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_LDIMM64_MAP_PFX)) { 7234 prev_line = find_prev_line(buf, cur_line); 7235 if (!prev_line) 7236 continue; 7237 7238 /* reference to uncreated BPF map */ 7239 fixup_log_missing_map_load(prog, buf, buf_sz, log_sz, 7240 prev_line, cur_line, next_line); 7241 return; 7242 } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_CALL_KFUNC_PFX)) { 7243 prev_line = find_prev_line(buf, cur_line); 7244 if (!prev_line) 7245 continue; 7246 7247 /* reference to unresolved kfunc */ 7248 fixup_log_missing_kfunc_call(prog, buf, buf_sz, log_sz, 7249 prev_line, cur_line, next_line); 7250 return; 7251 } 7252 } 7253 } 7254 7255 static int bpf_program_record_relos(struct bpf_program *prog) 7256 { 7257 struct bpf_object *obj = prog->obj; 7258 int i; 7259 7260 for (i = 0; i < prog->nr_reloc; i++) { 7261 struct reloc_desc *relo = &prog->reloc_desc[i]; 7262 struct extern_desc *ext = &obj->externs[relo->ext_idx]; 7263 int kind; 7264 7265 switch (relo->type) { 7266 case RELO_EXTERN_LD64: 7267 if (ext->type != EXT_KSYM) 7268 continue; 7269 kind = btf_is_var(btf__type_by_id(obj->btf, ext->btf_id)) ? 7270 BTF_KIND_VAR : BTF_KIND_FUNC; 7271 bpf_gen__record_extern(obj->gen_loader, ext->name, 7272 ext->is_weak, !ext->ksym.type_id, 7273 true, kind, relo->insn_idx); 7274 break; 7275 case RELO_EXTERN_CALL: 7276 bpf_gen__record_extern(obj->gen_loader, ext->name, 7277 ext->is_weak, false, false, BTF_KIND_FUNC, 7278 relo->insn_idx); 7279 break; 7280 case RELO_CORE: { 7281 struct bpf_core_relo cr = { 7282 .insn_off = relo->insn_idx * 8, 7283 .type_id = relo->core_relo->type_id, 7284 .access_str_off = relo->core_relo->access_str_off, 7285 .kind = relo->core_relo->kind, 7286 }; 7287 7288 bpf_gen__record_relo_core(obj->gen_loader, &cr); 7289 break; 7290 } 7291 default: 7292 continue; 7293 } 7294 } 7295 return 0; 7296 } 7297 7298 static int 7299 bpf_object__load_progs(struct bpf_object *obj, int log_level) 7300 { 7301 struct bpf_program *prog; 7302 size_t i; 7303 int err; 7304 7305 for (i = 0; i < obj->nr_programs; i++) { 7306 prog = &obj->programs[i]; 7307 err = bpf_object__sanitize_prog(obj, prog); 7308 if (err) 7309 return err; 7310 } 7311 7312 for (i = 0; i < obj->nr_programs; i++) { 7313 prog = &obj->programs[i]; 7314 if (prog_is_subprog(obj, prog)) 7315 continue; 7316 if (!prog->autoload) { 7317 pr_debug("prog '%s': skipped loading\n", prog->name); 7318 continue; 7319 } 7320 prog->log_level |= log_level; 7321 7322 if (obj->gen_loader) 7323 bpf_program_record_relos(prog); 7324 7325 err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt, 7326 obj->license, obj->kern_version, &prog->fd); 7327 if (err) { 7328 pr_warn("prog '%s': failed to load: %d\n", prog->name, err); 7329 return err; 7330 } 7331 } 7332 7333 bpf_object__free_relocs(obj); 7334 return 0; 7335 } 7336 7337 static const struct bpf_sec_def *find_sec_def(const char *sec_name); 7338 7339 static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts) 7340 { 7341 struct bpf_program *prog; 7342 int err; 7343 7344 bpf_object__for_each_program(prog, obj) { 7345 prog->sec_def = find_sec_def(prog->sec_name); 7346 if (!prog->sec_def) { 7347 /* couldn't guess, but user might manually specify */ 7348 pr_debug("prog '%s': unrecognized ELF section name '%s'\n", 7349 prog->name, prog->sec_name); 7350 continue; 7351 } 7352 7353 prog->type = prog->sec_def->prog_type; 7354 prog->expected_attach_type = prog->sec_def->expected_attach_type; 7355 7356 /* sec_def can have custom callback which should be called 7357 * after bpf_program is initialized to adjust its properties 7358 */ 7359 if (prog->sec_def->prog_setup_fn) { 7360 err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie); 7361 if (err < 0) { 7362 pr_warn("prog '%s': failed to initialize: %d\n", 7363 prog->name, err); 7364 return err; 7365 } 7366 } 7367 } 7368 7369 return 0; 7370 } 7371 7372 static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz, 7373 const struct bpf_object_open_opts *opts) 7374 { 7375 const char *obj_name, *kconfig, *btf_tmp_path; 7376 struct bpf_object *obj; 7377 char tmp_name[64]; 7378 int err; 7379 char *log_buf; 7380 size_t log_size; 7381 __u32 log_level; 7382 7383 if (elf_version(EV_CURRENT) == EV_NONE) { 7384 pr_warn("failed to init libelf for %s\n", 7385 path ? : "(mem buf)"); 7386 return ERR_PTR(-LIBBPF_ERRNO__LIBELF); 7387 } 7388 7389 if (!OPTS_VALID(opts, bpf_object_open_opts)) 7390 return ERR_PTR(-EINVAL); 7391 7392 obj_name = OPTS_GET(opts, object_name, NULL); 7393 if (obj_buf) { 7394 if (!obj_name) { 7395 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", 7396 (unsigned long)obj_buf, 7397 (unsigned long)obj_buf_sz); 7398 obj_name = tmp_name; 7399 } 7400 path = obj_name; 7401 pr_debug("loading object '%s' from buffer\n", obj_name); 7402 } 7403 7404 log_buf = OPTS_GET(opts, kernel_log_buf, NULL); 7405 log_size = OPTS_GET(opts, kernel_log_size, 0); 7406 log_level = OPTS_GET(opts, kernel_log_level, 0); 7407 if (log_size > UINT_MAX) 7408 return ERR_PTR(-EINVAL); 7409 if (log_size && !log_buf) 7410 return ERR_PTR(-EINVAL); 7411 7412 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name); 7413 if (IS_ERR(obj)) 7414 return obj; 7415 7416 obj->log_buf = log_buf; 7417 obj->log_size = log_size; 7418 obj->log_level = log_level; 7419 7420 btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL); 7421 if (btf_tmp_path) { 7422 if (strlen(btf_tmp_path) >= PATH_MAX) { 7423 err = -ENAMETOOLONG; 7424 goto out; 7425 } 7426 obj->btf_custom_path = strdup(btf_tmp_path); 7427 if (!obj->btf_custom_path) { 7428 err = -ENOMEM; 7429 goto out; 7430 } 7431 } 7432 7433 kconfig = OPTS_GET(opts, kconfig, NULL); 7434 if (kconfig) { 7435 obj->kconfig = strdup(kconfig); 7436 if (!obj->kconfig) { 7437 err = -ENOMEM; 7438 goto out; 7439 } 7440 } 7441 7442 err = bpf_object__elf_init(obj); 7443 err = err ? : bpf_object__check_endianness(obj); 7444 err = err ? : bpf_object__elf_collect(obj); 7445 err = err ? : bpf_object__collect_externs(obj); 7446 err = err ? : bpf_object_fixup_btf(obj); 7447 err = err ? : bpf_object__init_maps(obj, opts); 7448 err = err ? : bpf_object_init_progs(obj, opts); 7449 err = err ? : bpf_object__collect_relos(obj); 7450 if (err) 7451 goto out; 7452 7453 bpf_object__elf_finish(obj); 7454 7455 return obj; 7456 out: 7457 bpf_object__close(obj); 7458 return ERR_PTR(err); 7459 } 7460 7461 struct bpf_object * 7462 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts) 7463 { 7464 if (!path) 7465 return libbpf_err_ptr(-EINVAL); 7466 7467 pr_debug("loading %s\n", path); 7468 7469 return libbpf_ptr(bpf_object_open(path, NULL, 0, opts)); 7470 } 7471 7472 struct bpf_object *bpf_object__open(const char *path) 7473 { 7474 return bpf_object__open_file(path, NULL); 7475 } 7476 7477 struct bpf_object * 7478 bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, 7479 const struct bpf_object_open_opts *opts) 7480 { 7481 if (!obj_buf || obj_buf_sz == 0) 7482 return libbpf_err_ptr(-EINVAL); 7483 7484 return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts)); 7485 } 7486 7487 static int bpf_object_unload(struct bpf_object *obj) 7488 { 7489 size_t i; 7490 7491 if (!obj) 7492 return libbpf_err(-EINVAL); 7493 7494 for (i = 0; i < obj->nr_maps; i++) { 7495 zclose(obj->maps[i].fd); 7496 if (obj->maps[i].st_ops) 7497 zfree(&obj->maps[i].st_ops->kern_vdata); 7498 } 7499 7500 for (i = 0; i < obj->nr_programs; i++) 7501 bpf_program__unload(&obj->programs[i]); 7502 7503 return 0; 7504 } 7505 7506 static int bpf_object__sanitize_maps(struct bpf_object *obj) 7507 { 7508 struct bpf_map *m; 7509 7510 bpf_object__for_each_map(m, obj) { 7511 if (!bpf_map__is_internal(m)) 7512 continue; 7513 if (!kernel_supports(obj, FEAT_ARRAY_MMAP)) 7514 m->def.map_flags &= ~BPF_F_MMAPABLE; 7515 } 7516 7517 return 0; 7518 } 7519 7520 int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx) 7521 { 7522 char sym_type, sym_name[500]; 7523 unsigned long long sym_addr; 7524 int ret, err = 0; 7525 FILE *f; 7526 7527 f = fopen("/proc/kallsyms", "re"); 7528 if (!f) { 7529 err = -errno; 7530 pr_warn("failed to open /proc/kallsyms: %d\n", err); 7531 return err; 7532 } 7533 7534 while (true) { 7535 ret = fscanf(f, "%llx %c %499s%*[^\n]\n", 7536 &sym_addr, &sym_type, sym_name); 7537 if (ret == EOF && feof(f)) 7538 break; 7539 if (ret != 3) { 7540 pr_warn("failed to read kallsyms entry: %d\n", ret); 7541 err = -EINVAL; 7542 break; 7543 } 7544 7545 err = cb(sym_addr, sym_type, sym_name, ctx); 7546 if (err) 7547 break; 7548 } 7549 7550 fclose(f); 7551 return err; 7552 } 7553 7554 static int kallsyms_cb(unsigned long long sym_addr, char sym_type, 7555 const char *sym_name, void *ctx) 7556 { 7557 struct bpf_object *obj = ctx; 7558 const struct btf_type *t; 7559 struct extern_desc *ext; 7560 7561 ext = find_extern_by_name(obj, sym_name); 7562 if (!ext || ext->type != EXT_KSYM) 7563 return 0; 7564 7565 t = btf__type_by_id(obj->btf, ext->btf_id); 7566 if (!btf_is_var(t)) 7567 return 0; 7568 7569 if (ext->is_set && ext->ksym.addr != sym_addr) { 7570 pr_warn("extern (ksym) '%s': resolution is ambiguous: 0x%llx or 0x%llx\n", 7571 sym_name, ext->ksym.addr, sym_addr); 7572 return -EINVAL; 7573 } 7574 if (!ext->is_set) { 7575 ext->is_set = true; 7576 ext->ksym.addr = sym_addr; 7577 pr_debug("extern (ksym) '%s': set to 0x%llx\n", sym_name, sym_addr); 7578 } 7579 return 0; 7580 } 7581 7582 static int bpf_object__read_kallsyms_file(struct bpf_object *obj) 7583 { 7584 return libbpf_kallsyms_parse(kallsyms_cb, obj); 7585 } 7586 7587 static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, 7588 __u16 kind, struct btf **res_btf, 7589 struct module_btf **res_mod_btf) 7590 { 7591 struct module_btf *mod_btf; 7592 struct btf *btf; 7593 int i, id, err; 7594 7595 btf = obj->btf_vmlinux; 7596 mod_btf = NULL; 7597 id = btf__find_by_name_kind(btf, ksym_name, kind); 7598 7599 if (id == -ENOENT) { 7600 err = load_module_btfs(obj); 7601 if (err) 7602 return err; 7603 7604 for (i = 0; i < obj->btf_module_cnt; i++) { 7605 /* we assume module_btf's BTF FD is always >0 */ 7606 mod_btf = &obj->btf_modules[i]; 7607 btf = mod_btf->btf; 7608 id = btf__find_by_name_kind_own(btf, ksym_name, kind); 7609 if (id != -ENOENT) 7610 break; 7611 } 7612 } 7613 if (id <= 0) 7614 return -ESRCH; 7615 7616 *res_btf = btf; 7617 *res_mod_btf = mod_btf; 7618 return id; 7619 } 7620 7621 static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj, 7622 struct extern_desc *ext) 7623 { 7624 const struct btf_type *targ_var, *targ_type; 7625 __u32 targ_type_id, local_type_id; 7626 struct module_btf *mod_btf = NULL; 7627 const char *targ_var_name; 7628 struct btf *btf = NULL; 7629 int id, err; 7630 7631 id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf); 7632 if (id < 0) { 7633 if (id == -ESRCH && ext->is_weak) 7634 return 0; 7635 pr_warn("extern (var ksym) '%s': not found in kernel BTF\n", 7636 ext->name); 7637 return id; 7638 } 7639 7640 /* find local type_id */ 7641 local_type_id = ext->ksym.type_id; 7642 7643 /* find target type_id */ 7644 targ_var = btf__type_by_id(btf, id); 7645 targ_var_name = btf__name_by_offset(btf, targ_var->name_off); 7646 targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id); 7647 7648 err = bpf_core_types_are_compat(obj->btf, local_type_id, 7649 btf, targ_type_id); 7650 if (err <= 0) { 7651 const struct btf_type *local_type; 7652 const char *targ_name, *local_name; 7653 7654 local_type = btf__type_by_id(obj->btf, local_type_id); 7655 local_name = btf__name_by_offset(obj->btf, local_type->name_off); 7656 targ_name = btf__name_by_offset(btf, targ_type->name_off); 7657 7658 pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n", 7659 ext->name, local_type_id, 7660 btf_kind_str(local_type), local_name, targ_type_id, 7661 btf_kind_str(targ_type), targ_name); 7662 return -EINVAL; 7663 } 7664 7665 ext->is_set = true; 7666 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0; 7667 ext->ksym.kernel_btf_id = id; 7668 pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n", 7669 ext->name, id, btf_kind_str(targ_var), targ_var_name); 7670 7671 return 0; 7672 } 7673 7674 static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, 7675 struct extern_desc *ext) 7676 { 7677 int local_func_proto_id, kfunc_proto_id, kfunc_id; 7678 struct module_btf *mod_btf = NULL; 7679 const struct btf_type *kern_func; 7680 struct btf *kern_btf = NULL; 7681 int ret; 7682 7683 local_func_proto_id = ext->ksym.type_id; 7684 7685 kfunc_id = find_ksym_btf_id(obj, ext->essent_name ?: ext->name, BTF_KIND_FUNC, &kern_btf, 7686 &mod_btf); 7687 if (kfunc_id < 0) { 7688 if (kfunc_id == -ESRCH && ext->is_weak) 7689 return 0; 7690 pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n", 7691 ext->name); 7692 return kfunc_id; 7693 } 7694 7695 kern_func = btf__type_by_id(kern_btf, kfunc_id); 7696 kfunc_proto_id = kern_func->type; 7697 7698 ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id, 7699 kern_btf, kfunc_proto_id); 7700 if (ret <= 0) { 7701 if (ext->is_weak) 7702 return 0; 7703 7704 pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n", 7705 ext->name, local_func_proto_id, 7706 mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id); 7707 return -EINVAL; 7708 } 7709 7710 /* set index for module BTF fd in fd_array, if unset */ 7711 if (mod_btf && !mod_btf->fd_array_idx) { 7712 /* insn->off is s16 */ 7713 if (obj->fd_array_cnt == INT16_MAX) { 7714 pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n", 7715 ext->name, mod_btf->fd_array_idx); 7716 return -E2BIG; 7717 } 7718 /* Cannot use index 0 for module BTF fd */ 7719 if (!obj->fd_array_cnt) 7720 obj->fd_array_cnt = 1; 7721 7722 ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int), 7723 obj->fd_array_cnt + 1); 7724 if (ret) 7725 return ret; 7726 mod_btf->fd_array_idx = obj->fd_array_cnt; 7727 /* we assume module BTF FD is always >0 */ 7728 obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd; 7729 } 7730 7731 ext->is_set = true; 7732 ext->ksym.kernel_btf_id = kfunc_id; 7733 ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0; 7734 /* Also set kernel_btf_obj_fd to make sure that bpf_object__relocate_data() 7735 * populates FD into ld_imm64 insn when it's used to point to kfunc. 7736 * {kernel_btf_id, btf_fd_idx} -> fixup bpf_call. 7737 * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64. 7738 */ 7739 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0; 7740 pr_debug("extern (func ksym) '%s': resolved to %s [%d]\n", 7741 ext->name, mod_btf ? mod_btf->name : "vmlinux", kfunc_id); 7742 7743 return 0; 7744 } 7745 7746 static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj) 7747 { 7748 const struct btf_type *t; 7749 struct extern_desc *ext; 7750 int i, err; 7751 7752 for (i = 0; i < obj->nr_extern; i++) { 7753 ext = &obj->externs[i]; 7754 if (ext->type != EXT_KSYM || !ext->ksym.type_id) 7755 continue; 7756 7757 if (obj->gen_loader) { 7758 ext->is_set = true; 7759 ext->ksym.kernel_btf_obj_fd = 0; 7760 ext->ksym.kernel_btf_id = 0; 7761 continue; 7762 } 7763 t = btf__type_by_id(obj->btf, ext->btf_id); 7764 if (btf_is_var(t)) 7765 err = bpf_object__resolve_ksym_var_btf_id(obj, ext); 7766 else 7767 err = bpf_object__resolve_ksym_func_btf_id(obj, ext); 7768 if (err) 7769 return err; 7770 } 7771 return 0; 7772 } 7773 7774 static int bpf_object__resolve_externs(struct bpf_object *obj, 7775 const char *extra_kconfig) 7776 { 7777 bool need_config = false, need_kallsyms = false; 7778 bool need_vmlinux_btf = false; 7779 struct extern_desc *ext; 7780 void *kcfg_data = NULL; 7781 int err, i; 7782 7783 if (obj->nr_extern == 0) 7784 return 0; 7785 7786 if (obj->kconfig_map_idx >= 0) 7787 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped; 7788 7789 for (i = 0; i < obj->nr_extern; i++) { 7790 ext = &obj->externs[i]; 7791 7792 if (ext->type == EXT_KSYM) { 7793 if (ext->ksym.type_id) 7794 need_vmlinux_btf = true; 7795 else 7796 need_kallsyms = true; 7797 continue; 7798 } else if (ext->type == EXT_KCFG) { 7799 void *ext_ptr = kcfg_data + ext->kcfg.data_off; 7800 __u64 value = 0; 7801 7802 /* Kconfig externs need actual /proc/config.gz */ 7803 if (str_has_pfx(ext->name, "CONFIG_")) { 7804 need_config = true; 7805 continue; 7806 } 7807 7808 /* Virtual kcfg externs are customly handled by libbpf */ 7809 if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) { 7810 value = get_kernel_version(); 7811 if (!value) { 7812 pr_warn("extern (kcfg) '%s': failed to get kernel version\n", ext->name); 7813 return -EINVAL; 7814 } 7815 } else if (strcmp(ext->name, "LINUX_HAS_BPF_COOKIE") == 0) { 7816 value = kernel_supports(obj, FEAT_BPF_COOKIE); 7817 } else if (strcmp(ext->name, "LINUX_HAS_SYSCALL_WRAPPER") == 0) { 7818 value = kernel_supports(obj, FEAT_SYSCALL_WRAPPER); 7819 } else if (!str_has_pfx(ext->name, "LINUX_") || !ext->is_weak) { 7820 /* Currently libbpf supports only CONFIG_ and LINUX_ prefixed 7821 * __kconfig externs, where LINUX_ ones are virtual and filled out 7822 * customly by libbpf (their values don't come from Kconfig). 7823 * If LINUX_xxx variable is not recognized by libbpf, but is marked 7824 * __weak, it defaults to zero value, just like for CONFIG_xxx 7825 * externs. 7826 */ 7827 pr_warn("extern (kcfg) '%s': unrecognized virtual extern\n", ext->name); 7828 return -EINVAL; 7829 } 7830 7831 err = set_kcfg_value_num(ext, ext_ptr, value); 7832 if (err) 7833 return err; 7834 pr_debug("extern (kcfg) '%s': set to 0x%llx\n", 7835 ext->name, (long long)value); 7836 } else { 7837 pr_warn("extern '%s': unrecognized extern kind\n", ext->name); 7838 return -EINVAL; 7839 } 7840 } 7841 if (need_config && extra_kconfig) { 7842 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data); 7843 if (err) 7844 return -EINVAL; 7845 need_config = false; 7846 for (i = 0; i < obj->nr_extern; i++) { 7847 ext = &obj->externs[i]; 7848 if (ext->type == EXT_KCFG && !ext->is_set) { 7849 need_config = true; 7850 break; 7851 } 7852 } 7853 } 7854 if (need_config) { 7855 err = bpf_object__read_kconfig_file(obj, kcfg_data); 7856 if (err) 7857 return -EINVAL; 7858 } 7859 if (need_kallsyms) { 7860 err = bpf_object__read_kallsyms_file(obj); 7861 if (err) 7862 return -EINVAL; 7863 } 7864 if (need_vmlinux_btf) { 7865 err = bpf_object__resolve_ksyms_btf_id(obj); 7866 if (err) 7867 return -EINVAL; 7868 } 7869 for (i = 0; i < obj->nr_extern; i++) { 7870 ext = &obj->externs[i]; 7871 7872 if (!ext->is_set && !ext->is_weak) { 7873 pr_warn("extern '%s' (strong): not resolved\n", ext->name); 7874 return -ESRCH; 7875 } else if (!ext->is_set) { 7876 pr_debug("extern '%s' (weak): not resolved, defaulting to zero\n", 7877 ext->name); 7878 } 7879 } 7880 7881 return 0; 7882 } 7883 7884 static void bpf_map_prepare_vdata(const struct bpf_map *map) 7885 { 7886 struct bpf_struct_ops *st_ops; 7887 __u32 i; 7888 7889 st_ops = map->st_ops; 7890 for (i = 0; i < btf_vlen(st_ops->type); i++) { 7891 struct bpf_program *prog = st_ops->progs[i]; 7892 void *kern_data; 7893 int prog_fd; 7894 7895 if (!prog) 7896 continue; 7897 7898 prog_fd = bpf_program__fd(prog); 7899 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i]; 7900 *(unsigned long *)kern_data = prog_fd; 7901 } 7902 } 7903 7904 static int bpf_object_prepare_struct_ops(struct bpf_object *obj) 7905 { 7906 int i; 7907 7908 for (i = 0; i < obj->nr_maps; i++) 7909 if (bpf_map__is_struct_ops(&obj->maps[i])) 7910 bpf_map_prepare_vdata(&obj->maps[i]); 7911 7912 return 0; 7913 } 7914 7915 static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path) 7916 { 7917 int err, i; 7918 7919 if (!obj) 7920 return libbpf_err(-EINVAL); 7921 7922 if (obj->loaded) { 7923 pr_warn("object '%s': load can't be attempted twice\n", obj->name); 7924 return libbpf_err(-EINVAL); 7925 } 7926 7927 if (obj->gen_loader) 7928 bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps); 7929 7930 err = bpf_object__probe_loading(obj); 7931 err = err ? : bpf_object__load_vmlinux_btf(obj, false); 7932 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); 7933 err = err ? : bpf_object__sanitize_and_load_btf(obj); 7934 err = err ? : bpf_object__sanitize_maps(obj); 7935 err = err ? : bpf_object__init_kern_struct_ops_maps(obj); 7936 err = err ? : bpf_object__create_maps(obj); 7937 err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path); 7938 err = err ? : bpf_object__load_progs(obj, extra_log_level); 7939 err = err ? : bpf_object_init_prog_arrays(obj); 7940 err = err ? : bpf_object_prepare_struct_ops(obj); 7941 7942 if (obj->gen_loader) { 7943 /* reset FDs */ 7944 if (obj->btf) 7945 btf__set_fd(obj->btf, -1); 7946 for (i = 0; i < obj->nr_maps; i++) 7947 obj->maps[i].fd = -1; 7948 if (!err) 7949 err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps); 7950 } 7951 7952 /* clean up fd_array */ 7953 zfree(&obj->fd_array); 7954 7955 /* clean up module BTFs */ 7956 for (i = 0; i < obj->btf_module_cnt; i++) { 7957 close(obj->btf_modules[i].fd); 7958 btf__free(obj->btf_modules[i].btf); 7959 free(obj->btf_modules[i].name); 7960 } 7961 free(obj->btf_modules); 7962 7963 /* clean up vmlinux BTF */ 7964 btf__free(obj->btf_vmlinux); 7965 obj->btf_vmlinux = NULL; 7966 7967 obj->loaded = true; /* doesn't matter if successfully or not */ 7968 7969 if (err) 7970 goto out; 7971 7972 return 0; 7973 out: 7974 /* unpin any maps that were auto-pinned during load */ 7975 for (i = 0; i < obj->nr_maps; i++) 7976 if (obj->maps[i].pinned && !obj->maps[i].reused) 7977 bpf_map__unpin(&obj->maps[i], NULL); 7978 7979 bpf_object_unload(obj); 7980 pr_warn("failed to load object '%s'\n", obj->path); 7981 return libbpf_err(err); 7982 } 7983 7984 int bpf_object__load(struct bpf_object *obj) 7985 { 7986 return bpf_object_load(obj, 0, NULL); 7987 } 7988 7989 static int make_parent_dir(const char *path) 7990 { 7991 char *cp, errmsg[STRERR_BUFSIZE]; 7992 char *dname, *dir; 7993 int err = 0; 7994 7995 dname = strdup(path); 7996 if (dname == NULL) 7997 return -ENOMEM; 7998 7999 dir = dirname(dname); 8000 if (mkdir(dir, 0700) && errno != EEXIST) 8001 err = -errno; 8002 8003 free(dname); 8004 if (err) { 8005 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); 8006 pr_warn("failed to mkdir %s: %s\n", path, cp); 8007 } 8008 return err; 8009 } 8010 8011 static int check_path(const char *path) 8012 { 8013 char *cp, errmsg[STRERR_BUFSIZE]; 8014 struct statfs st_fs; 8015 char *dname, *dir; 8016 int err = 0; 8017 8018 if (path == NULL) 8019 return -EINVAL; 8020 8021 dname = strdup(path); 8022 if (dname == NULL) 8023 return -ENOMEM; 8024 8025 dir = dirname(dname); 8026 if (statfs(dir, &st_fs)) { 8027 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 8028 pr_warn("failed to statfs %s: %s\n", dir, cp); 8029 err = -errno; 8030 } 8031 free(dname); 8032 8033 if (!err && st_fs.f_type != BPF_FS_MAGIC) { 8034 pr_warn("specified path %s is not on BPF FS\n", path); 8035 err = -EINVAL; 8036 } 8037 8038 return err; 8039 } 8040 8041 int bpf_program__pin(struct bpf_program *prog, const char *path) 8042 { 8043 char *cp, errmsg[STRERR_BUFSIZE]; 8044 int err; 8045 8046 if (prog->fd < 0) { 8047 pr_warn("prog '%s': can't pin program that wasn't loaded\n", prog->name); 8048 return libbpf_err(-EINVAL); 8049 } 8050 8051 err = make_parent_dir(path); 8052 if (err) 8053 return libbpf_err(err); 8054 8055 err = check_path(path); 8056 if (err) 8057 return libbpf_err(err); 8058 8059 if (bpf_obj_pin(prog->fd, path)) { 8060 err = -errno; 8061 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); 8062 pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, cp); 8063 return libbpf_err(err); 8064 } 8065 8066 pr_debug("prog '%s': pinned at '%s'\n", prog->name, path); 8067 return 0; 8068 } 8069 8070 int bpf_program__unpin(struct bpf_program *prog, const char *path) 8071 { 8072 int err; 8073 8074 if (prog->fd < 0) { 8075 pr_warn("prog '%s': can't unpin program that wasn't loaded\n", prog->name); 8076 return libbpf_err(-EINVAL); 8077 } 8078 8079 err = check_path(path); 8080 if (err) 8081 return libbpf_err(err); 8082 8083 err = unlink(path); 8084 if (err) 8085 return libbpf_err(-errno); 8086 8087 pr_debug("prog '%s': unpinned from '%s'\n", prog->name, path); 8088 return 0; 8089 } 8090 8091 int bpf_map__pin(struct bpf_map *map, const char *path) 8092 { 8093 char *cp, errmsg[STRERR_BUFSIZE]; 8094 int err; 8095 8096 if (map == NULL) { 8097 pr_warn("invalid map pointer\n"); 8098 return libbpf_err(-EINVAL); 8099 } 8100 8101 if (map->pin_path) { 8102 if (path && strcmp(path, map->pin_path)) { 8103 pr_warn("map '%s' already has pin path '%s' different from '%s'\n", 8104 bpf_map__name(map), map->pin_path, path); 8105 return libbpf_err(-EINVAL); 8106 } else if (map->pinned) { 8107 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n", 8108 bpf_map__name(map), map->pin_path); 8109 return 0; 8110 } 8111 } else { 8112 if (!path) { 8113 pr_warn("missing a path to pin map '%s' at\n", 8114 bpf_map__name(map)); 8115 return libbpf_err(-EINVAL); 8116 } else if (map->pinned) { 8117 pr_warn("map '%s' already pinned\n", bpf_map__name(map)); 8118 return libbpf_err(-EEXIST); 8119 } 8120 8121 map->pin_path = strdup(path); 8122 if (!map->pin_path) { 8123 err = -errno; 8124 goto out_err; 8125 } 8126 } 8127 8128 err = make_parent_dir(map->pin_path); 8129 if (err) 8130 return libbpf_err(err); 8131 8132 err = check_path(map->pin_path); 8133 if (err) 8134 return libbpf_err(err); 8135 8136 if (bpf_obj_pin(map->fd, map->pin_path)) { 8137 err = -errno; 8138 goto out_err; 8139 } 8140 8141 map->pinned = true; 8142 pr_debug("pinned map '%s'\n", map->pin_path); 8143 8144 return 0; 8145 8146 out_err: 8147 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); 8148 pr_warn("failed to pin map: %s\n", cp); 8149 return libbpf_err(err); 8150 } 8151 8152 int bpf_map__unpin(struct bpf_map *map, const char *path) 8153 { 8154 int err; 8155 8156 if (map == NULL) { 8157 pr_warn("invalid map pointer\n"); 8158 return libbpf_err(-EINVAL); 8159 } 8160 8161 if (map->pin_path) { 8162 if (path && strcmp(path, map->pin_path)) { 8163 pr_warn("map '%s' already has pin path '%s' different from '%s'\n", 8164 bpf_map__name(map), map->pin_path, path); 8165 return libbpf_err(-EINVAL); 8166 } 8167 path = map->pin_path; 8168 } else if (!path) { 8169 pr_warn("no path to unpin map '%s' from\n", 8170 bpf_map__name(map)); 8171 return libbpf_err(-EINVAL); 8172 } 8173 8174 err = check_path(path); 8175 if (err) 8176 return libbpf_err(err); 8177 8178 err = unlink(path); 8179 if (err != 0) 8180 return libbpf_err(-errno); 8181 8182 map->pinned = false; 8183 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path); 8184 8185 return 0; 8186 } 8187 8188 int bpf_map__set_pin_path(struct bpf_map *map, const char *path) 8189 { 8190 char *new = NULL; 8191 8192 if (path) { 8193 new = strdup(path); 8194 if (!new) 8195 return libbpf_err(-errno); 8196 } 8197 8198 free(map->pin_path); 8199 map->pin_path = new; 8200 return 0; 8201 } 8202 8203 __alias(bpf_map__pin_path) 8204 const char *bpf_map__get_pin_path(const struct bpf_map *map); 8205 8206 const char *bpf_map__pin_path(const struct bpf_map *map) 8207 { 8208 return map->pin_path; 8209 } 8210 8211 bool bpf_map__is_pinned(const struct bpf_map *map) 8212 { 8213 return map->pinned; 8214 } 8215 8216 static void sanitize_pin_path(char *s) 8217 { 8218 /* bpffs disallows periods in path names */ 8219 while (*s) { 8220 if (*s == '.') 8221 *s = '_'; 8222 s++; 8223 } 8224 } 8225 8226 int bpf_object__pin_maps(struct bpf_object *obj, const char *path) 8227 { 8228 struct bpf_map *map; 8229 int err; 8230 8231 if (!obj) 8232 return libbpf_err(-ENOENT); 8233 8234 if (!obj->loaded) { 8235 pr_warn("object not yet loaded; load it first\n"); 8236 return libbpf_err(-ENOENT); 8237 } 8238 8239 bpf_object__for_each_map(map, obj) { 8240 char *pin_path = NULL; 8241 char buf[PATH_MAX]; 8242 8243 if (!map->autocreate) 8244 continue; 8245 8246 if (path) { 8247 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map)); 8248 if (err) 8249 goto err_unpin_maps; 8250 sanitize_pin_path(buf); 8251 pin_path = buf; 8252 } else if (!map->pin_path) { 8253 continue; 8254 } 8255 8256 err = bpf_map__pin(map, pin_path); 8257 if (err) 8258 goto err_unpin_maps; 8259 } 8260 8261 return 0; 8262 8263 err_unpin_maps: 8264 while ((map = bpf_object__prev_map(obj, map))) { 8265 if (!map->pin_path) 8266 continue; 8267 8268 bpf_map__unpin(map, NULL); 8269 } 8270 8271 return libbpf_err(err); 8272 } 8273 8274 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) 8275 { 8276 struct bpf_map *map; 8277 int err; 8278 8279 if (!obj) 8280 return libbpf_err(-ENOENT); 8281 8282 bpf_object__for_each_map(map, obj) { 8283 char *pin_path = NULL; 8284 char buf[PATH_MAX]; 8285 8286 if (path) { 8287 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map)); 8288 if (err) 8289 return libbpf_err(err); 8290 sanitize_pin_path(buf); 8291 pin_path = buf; 8292 } else if (!map->pin_path) { 8293 continue; 8294 } 8295 8296 err = bpf_map__unpin(map, pin_path); 8297 if (err) 8298 return libbpf_err(err); 8299 } 8300 8301 return 0; 8302 } 8303 8304 int bpf_object__pin_programs(struct bpf_object *obj, const char *path) 8305 { 8306 struct bpf_program *prog; 8307 char buf[PATH_MAX]; 8308 int err; 8309 8310 if (!obj) 8311 return libbpf_err(-ENOENT); 8312 8313 if (!obj->loaded) { 8314 pr_warn("object not yet loaded; load it first\n"); 8315 return libbpf_err(-ENOENT); 8316 } 8317 8318 bpf_object__for_each_program(prog, obj) { 8319 err = pathname_concat(buf, sizeof(buf), path, prog->name); 8320 if (err) 8321 goto err_unpin_programs; 8322 8323 err = bpf_program__pin(prog, buf); 8324 if (err) 8325 goto err_unpin_programs; 8326 } 8327 8328 return 0; 8329 8330 err_unpin_programs: 8331 while ((prog = bpf_object__prev_program(obj, prog))) { 8332 if (pathname_concat(buf, sizeof(buf), path, prog->name)) 8333 continue; 8334 8335 bpf_program__unpin(prog, buf); 8336 } 8337 8338 return libbpf_err(err); 8339 } 8340 8341 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path) 8342 { 8343 struct bpf_program *prog; 8344 int err; 8345 8346 if (!obj) 8347 return libbpf_err(-ENOENT); 8348 8349 bpf_object__for_each_program(prog, obj) { 8350 char buf[PATH_MAX]; 8351 8352 err = pathname_concat(buf, sizeof(buf), path, prog->name); 8353 if (err) 8354 return libbpf_err(err); 8355 8356 err = bpf_program__unpin(prog, buf); 8357 if (err) 8358 return libbpf_err(err); 8359 } 8360 8361 return 0; 8362 } 8363 8364 int bpf_object__pin(struct bpf_object *obj, const char *path) 8365 { 8366 int err; 8367 8368 err = bpf_object__pin_maps(obj, path); 8369 if (err) 8370 return libbpf_err(err); 8371 8372 err = bpf_object__pin_programs(obj, path); 8373 if (err) { 8374 bpf_object__unpin_maps(obj, path); 8375 return libbpf_err(err); 8376 } 8377 8378 return 0; 8379 } 8380 8381 int bpf_object__unpin(struct bpf_object *obj, const char *path) 8382 { 8383 int err; 8384 8385 err = bpf_object__unpin_programs(obj, path); 8386 if (err) 8387 return libbpf_err(err); 8388 8389 err = bpf_object__unpin_maps(obj, path); 8390 if (err) 8391 return libbpf_err(err); 8392 8393 return 0; 8394 } 8395 8396 static void bpf_map__destroy(struct bpf_map *map) 8397 { 8398 if (map->inner_map) { 8399 bpf_map__destroy(map->inner_map); 8400 zfree(&map->inner_map); 8401 } 8402 8403 zfree(&map->init_slots); 8404 map->init_slots_sz = 0; 8405 8406 if (map->mmaped) { 8407 size_t mmap_sz; 8408 8409 mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries); 8410 munmap(map->mmaped, mmap_sz); 8411 map->mmaped = NULL; 8412 } 8413 8414 if (map->st_ops) { 8415 zfree(&map->st_ops->data); 8416 zfree(&map->st_ops->progs); 8417 zfree(&map->st_ops->kern_func_off); 8418 zfree(&map->st_ops); 8419 } 8420 8421 zfree(&map->name); 8422 zfree(&map->real_name); 8423 zfree(&map->pin_path); 8424 8425 if (map->fd >= 0) 8426 zclose(map->fd); 8427 } 8428 8429 void bpf_object__close(struct bpf_object *obj) 8430 { 8431 size_t i; 8432 8433 if (IS_ERR_OR_NULL(obj)) 8434 return; 8435 8436 usdt_manager_free(obj->usdt_man); 8437 obj->usdt_man = NULL; 8438 8439 bpf_gen__free(obj->gen_loader); 8440 bpf_object__elf_finish(obj); 8441 bpf_object_unload(obj); 8442 btf__free(obj->btf); 8443 btf__free(obj->btf_vmlinux); 8444 btf_ext__free(obj->btf_ext); 8445 8446 for (i = 0; i < obj->nr_maps; i++) 8447 bpf_map__destroy(&obj->maps[i]); 8448 8449 zfree(&obj->btf_custom_path); 8450 zfree(&obj->kconfig); 8451 8452 for (i = 0; i < obj->nr_extern; i++) 8453 zfree(&obj->externs[i].essent_name); 8454 8455 zfree(&obj->externs); 8456 obj->nr_extern = 0; 8457 8458 zfree(&obj->maps); 8459 obj->nr_maps = 0; 8460 8461 if (obj->programs && obj->nr_programs) { 8462 for (i = 0; i < obj->nr_programs; i++) 8463 bpf_program__exit(&obj->programs[i]); 8464 } 8465 zfree(&obj->programs); 8466 8467 free(obj); 8468 } 8469 8470 const char *bpf_object__name(const struct bpf_object *obj) 8471 { 8472 return obj ? obj->name : libbpf_err_ptr(-EINVAL); 8473 } 8474 8475 unsigned int bpf_object__kversion(const struct bpf_object *obj) 8476 { 8477 return obj ? obj->kern_version : 0; 8478 } 8479 8480 struct btf *bpf_object__btf(const struct bpf_object *obj) 8481 { 8482 return obj ? obj->btf : NULL; 8483 } 8484 8485 int bpf_object__btf_fd(const struct bpf_object *obj) 8486 { 8487 return obj->btf ? btf__fd(obj->btf) : -1; 8488 } 8489 8490 int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version) 8491 { 8492 if (obj->loaded) 8493 return libbpf_err(-EINVAL); 8494 8495 obj->kern_version = kern_version; 8496 8497 return 0; 8498 } 8499 8500 int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts) 8501 { 8502 struct bpf_gen *gen; 8503 8504 if (!opts) 8505 return -EFAULT; 8506 if (!OPTS_VALID(opts, gen_loader_opts)) 8507 return -EINVAL; 8508 gen = calloc(sizeof(*gen), 1); 8509 if (!gen) 8510 return -ENOMEM; 8511 gen->opts = opts; 8512 obj->gen_loader = gen; 8513 return 0; 8514 } 8515 8516 static struct bpf_program * 8517 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj, 8518 bool forward) 8519 { 8520 size_t nr_programs = obj->nr_programs; 8521 ssize_t idx; 8522 8523 if (!nr_programs) 8524 return NULL; 8525 8526 if (!p) 8527 /* Iter from the beginning */ 8528 return forward ? &obj->programs[0] : 8529 &obj->programs[nr_programs - 1]; 8530 8531 if (p->obj != obj) { 8532 pr_warn("error: program handler doesn't match object\n"); 8533 return errno = EINVAL, NULL; 8534 } 8535 8536 idx = (p - obj->programs) + (forward ? 1 : -1); 8537 if (idx >= obj->nr_programs || idx < 0) 8538 return NULL; 8539 return &obj->programs[idx]; 8540 } 8541 8542 struct bpf_program * 8543 bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev) 8544 { 8545 struct bpf_program *prog = prev; 8546 8547 do { 8548 prog = __bpf_program__iter(prog, obj, true); 8549 } while (prog && prog_is_subprog(obj, prog)); 8550 8551 return prog; 8552 } 8553 8554 struct bpf_program * 8555 bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next) 8556 { 8557 struct bpf_program *prog = next; 8558 8559 do { 8560 prog = __bpf_program__iter(prog, obj, false); 8561 } while (prog && prog_is_subprog(obj, prog)); 8562 8563 return prog; 8564 } 8565 8566 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex) 8567 { 8568 prog->prog_ifindex = ifindex; 8569 } 8570 8571 const char *bpf_program__name(const struct bpf_program *prog) 8572 { 8573 return prog->name; 8574 } 8575 8576 const char *bpf_program__section_name(const struct bpf_program *prog) 8577 { 8578 return prog->sec_name; 8579 } 8580 8581 bool bpf_program__autoload(const struct bpf_program *prog) 8582 { 8583 return prog->autoload; 8584 } 8585 8586 int bpf_program__set_autoload(struct bpf_program *prog, bool autoload) 8587 { 8588 if (prog->obj->loaded) 8589 return libbpf_err(-EINVAL); 8590 8591 prog->autoload = autoload; 8592 return 0; 8593 } 8594 8595 bool bpf_program__autoattach(const struct bpf_program *prog) 8596 { 8597 return prog->autoattach; 8598 } 8599 8600 void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach) 8601 { 8602 prog->autoattach = autoattach; 8603 } 8604 8605 const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog) 8606 { 8607 return prog->insns; 8608 } 8609 8610 size_t bpf_program__insn_cnt(const struct bpf_program *prog) 8611 { 8612 return prog->insns_cnt; 8613 } 8614 8615 int bpf_program__set_insns(struct bpf_program *prog, 8616 struct bpf_insn *new_insns, size_t new_insn_cnt) 8617 { 8618 struct bpf_insn *insns; 8619 8620 if (prog->obj->loaded) 8621 return -EBUSY; 8622 8623 insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns)); 8624 /* NULL is a valid return from reallocarray if the new count is zero */ 8625 if (!insns && new_insn_cnt) { 8626 pr_warn("prog '%s': failed to realloc prog code\n", prog->name); 8627 return -ENOMEM; 8628 } 8629 memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns)); 8630 8631 prog->insns = insns; 8632 prog->insns_cnt = new_insn_cnt; 8633 return 0; 8634 } 8635 8636 int bpf_program__fd(const struct bpf_program *prog) 8637 { 8638 if (!prog) 8639 return libbpf_err(-EINVAL); 8640 8641 if (prog->fd < 0) 8642 return libbpf_err(-ENOENT); 8643 8644 return prog->fd; 8645 } 8646 8647 __alias(bpf_program__type) 8648 enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog); 8649 8650 enum bpf_prog_type bpf_program__type(const struct bpf_program *prog) 8651 { 8652 return prog->type; 8653 } 8654 8655 static size_t custom_sec_def_cnt; 8656 static struct bpf_sec_def *custom_sec_defs; 8657 static struct bpf_sec_def custom_fallback_def; 8658 static bool has_custom_fallback_def; 8659 static int last_custom_sec_def_handler_id; 8660 8661 int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) 8662 { 8663 if (prog->obj->loaded) 8664 return libbpf_err(-EBUSY); 8665 8666 /* if type is not changed, do nothing */ 8667 if (prog->type == type) 8668 return 0; 8669 8670 prog->type = type; 8671 8672 /* If a program type was changed, we need to reset associated SEC() 8673 * handler, as it will be invalid now. The only exception is a generic 8674 * fallback handler, which by definition is program type-agnostic and 8675 * is a catch-all custom handler, optionally set by the application, 8676 * so should be able to handle any type of BPF program. 8677 */ 8678 if (prog->sec_def != &custom_fallback_def) 8679 prog->sec_def = NULL; 8680 return 0; 8681 } 8682 8683 __alias(bpf_program__expected_attach_type) 8684 enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog); 8685 8686 enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog) 8687 { 8688 return prog->expected_attach_type; 8689 } 8690 8691 int bpf_program__set_expected_attach_type(struct bpf_program *prog, 8692 enum bpf_attach_type type) 8693 { 8694 if (prog->obj->loaded) 8695 return libbpf_err(-EBUSY); 8696 8697 prog->expected_attach_type = type; 8698 return 0; 8699 } 8700 8701 __u32 bpf_program__flags(const struct bpf_program *prog) 8702 { 8703 return prog->prog_flags; 8704 } 8705 8706 int bpf_program__set_flags(struct bpf_program *prog, __u32 flags) 8707 { 8708 if (prog->obj->loaded) 8709 return libbpf_err(-EBUSY); 8710 8711 prog->prog_flags = flags; 8712 return 0; 8713 } 8714 8715 __u32 bpf_program__log_level(const struct bpf_program *prog) 8716 { 8717 return prog->log_level; 8718 } 8719 8720 int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level) 8721 { 8722 if (prog->obj->loaded) 8723 return libbpf_err(-EBUSY); 8724 8725 prog->log_level = log_level; 8726 return 0; 8727 } 8728 8729 const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size) 8730 { 8731 *log_size = prog->log_size; 8732 return prog->log_buf; 8733 } 8734 8735 int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size) 8736 { 8737 if (log_size && !log_buf) 8738 return -EINVAL; 8739 if (prog->log_size > UINT_MAX) 8740 return -EINVAL; 8741 if (prog->obj->loaded) 8742 return -EBUSY; 8743 8744 prog->log_buf = log_buf; 8745 prog->log_size = log_size; 8746 return 0; 8747 } 8748 8749 #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \ 8750 .sec = (char *)sec_pfx, \ 8751 .prog_type = BPF_PROG_TYPE_##ptype, \ 8752 .expected_attach_type = atype, \ 8753 .cookie = (long)(flags), \ 8754 .prog_prepare_load_fn = libbpf_prepare_prog_load, \ 8755 __VA_ARGS__ \ 8756 } 8757 8758 static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link); 8759 static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link); 8760 static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link); 8761 static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link); 8762 static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); 8763 static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); 8764 static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link); 8765 static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link); 8766 static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link); 8767 static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link); 8768 static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link); 8769 8770 static const struct bpf_sec_def section_defs[] = { 8771 SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE), 8772 SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE), 8773 SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE), 8774 SEC_DEF("kprobe+", KPROBE, 0, SEC_NONE, attach_kprobe), 8775 SEC_DEF("uprobe+", KPROBE, 0, SEC_NONE, attach_uprobe), 8776 SEC_DEF("uprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe), 8777 SEC_DEF("kretprobe+", KPROBE, 0, SEC_NONE, attach_kprobe), 8778 SEC_DEF("uretprobe+", KPROBE, 0, SEC_NONE, attach_uprobe), 8779 SEC_DEF("uretprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe), 8780 SEC_DEF("kprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), 8781 SEC_DEF("kretprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), 8782 SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi), 8783 SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi), 8784 SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi), 8785 SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi), 8786 SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall), 8787 SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall), 8788 SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt), 8789 SEC_DEF("usdt.s+", KPROBE, 0, SEC_USDT | SEC_SLEEPABLE, attach_usdt), 8790 SEC_DEF("tc/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), /* alias for tcx */ 8791 SEC_DEF("tc/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE), /* alias for tcx */ 8792 SEC_DEF("tcx/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), 8793 SEC_DEF("tcx/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE), 8794 SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */ 8795 SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */ 8796 SEC_DEF("action", SCHED_ACT, 0, SEC_NONE), /* deprecated / legacy, use tcx */ 8797 SEC_DEF("tracepoint+", TRACEPOINT, 0, SEC_NONE, attach_tp), 8798 SEC_DEF("tp+", TRACEPOINT, 0, SEC_NONE, attach_tp), 8799 SEC_DEF("raw_tracepoint+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp), 8800 SEC_DEF("raw_tp+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp), 8801 SEC_DEF("raw_tracepoint.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp), 8802 SEC_DEF("raw_tp.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp), 8803 SEC_DEF("tp_btf+", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace), 8804 SEC_DEF("fentry+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace), 8805 SEC_DEF("fmod_ret+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace), 8806 SEC_DEF("fexit+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace), 8807 SEC_DEF("fentry.s+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), 8808 SEC_DEF("fmod_ret.s+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), 8809 SEC_DEF("fexit.s+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), 8810 SEC_DEF("freplace+", EXT, 0, SEC_ATTACH_BTF, attach_trace), 8811 SEC_DEF("lsm+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm), 8812 SEC_DEF("lsm.s+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm), 8813 SEC_DEF("lsm_cgroup+", LSM, BPF_LSM_CGROUP, SEC_ATTACH_BTF), 8814 SEC_DEF("iter+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter), 8815 SEC_DEF("iter.s+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter), 8816 SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE), 8817 SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS), 8818 SEC_DEF("xdp/devmap", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE), 8819 SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS), 8820 SEC_DEF("xdp/cpumap", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE), 8821 SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS), 8822 SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT), 8823 SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE), 8824 SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE), 8825 SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE), 8826 SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE), 8827 SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE), 8828 SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT), 8829 SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT), 8830 SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT), 8831 SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE), 8832 SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT), 8833 SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT), 8834 SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT), 8835 SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT), 8836 SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT), 8837 SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE), 8838 SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE), 8839 SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE), 8840 SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT), 8841 SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE), 8842 SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE), 8843 SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE), 8844 SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE), 8845 SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE), 8846 SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE), 8847 SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE), 8848 SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE), 8849 SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE), 8850 SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE), 8851 SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE), 8852 SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE), 8853 SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE), 8854 SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE), 8855 SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE), 8856 SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE), 8857 SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE), 8858 SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT), 8859 SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE), 8860 SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE), 8861 SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE), 8862 SEC_DEF("netfilter", NETFILTER, BPF_NETFILTER, SEC_NONE), 8863 }; 8864 8865 int libbpf_register_prog_handler(const char *sec, 8866 enum bpf_prog_type prog_type, 8867 enum bpf_attach_type exp_attach_type, 8868 const struct libbpf_prog_handler_opts *opts) 8869 { 8870 struct bpf_sec_def *sec_def; 8871 8872 if (!OPTS_VALID(opts, libbpf_prog_handler_opts)) 8873 return libbpf_err(-EINVAL); 8874 8875 if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */ 8876 return libbpf_err(-E2BIG); 8877 8878 if (sec) { 8879 sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1, 8880 sizeof(*sec_def)); 8881 if (!sec_def) 8882 return libbpf_err(-ENOMEM); 8883 8884 custom_sec_defs = sec_def; 8885 sec_def = &custom_sec_defs[custom_sec_def_cnt]; 8886 } else { 8887 if (has_custom_fallback_def) 8888 return libbpf_err(-EBUSY); 8889 8890 sec_def = &custom_fallback_def; 8891 } 8892 8893 sec_def->sec = sec ? strdup(sec) : NULL; 8894 if (sec && !sec_def->sec) 8895 return libbpf_err(-ENOMEM); 8896 8897 sec_def->prog_type = prog_type; 8898 sec_def->expected_attach_type = exp_attach_type; 8899 sec_def->cookie = OPTS_GET(opts, cookie, 0); 8900 8901 sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL); 8902 sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL); 8903 sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL); 8904 8905 sec_def->handler_id = ++last_custom_sec_def_handler_id; 8906 8907 if (sec) 8908 custom_sec_def_cnt++; 8909 else 8910 has_custom_fallback_def = true; 8911 8912 return sec_def->handler_id; 8913 } 8914 8915 int libbpf_unregister_prog_handler(int handler_id) 8916 { 8917 struct bpf_sec_def *sec_defs; 8918 int i; 8919 8920 if (handler_id <= 0) 8921 return libbpf_err(-EINVAL); 8922 8923 if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) { 8924 memset(&custom_fallback_def, 0, sizeof(custom_fallback_def)); 8925 has_custom_fallback_def = false; 8926 return 0; 8927 } 8928 8929 for (i = 0; i < custom_sec_def_cnt; i++) { 8930 if (custom_sec_defs[i].handler_id == handler_id) 8931 break; 8932 } 8933 8934 if (i == custom_sec_def_cnt) 8935 return libbpf_err(-ENOENT); 8936 8937 free(custom_sec_defs[i].sec); 8938 for (i = i + 1; i < custom_sec_def_cnt; i++) 8939 custom_sec_defs[i - 1] = custom_sec_defs[i]; 8940 custom_sec_def_cnt--; 8941 8942 /* try to shrink the array, but it's ok if we couldn't */ 8943 sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs)); 8944 /* if new count is zero, reallocarray can return a valid NULL result; 8945 * in this case the previous pointer will be freed, so we *have to* 8946 * reassign old pointer to the new value (even if it's NULL) 8947 */ 8948 if (sec_defs || custom_sec_def_cnt == 0) 8949 custom_sec_defs = sec_defs; 8950 8951 return 0; 8952 } 8953 8954 static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name) 8955 { 8956 size_t len = strlen(sec_def->sec); 8957 8958 /* "type/" always has to have proper SEC("type/extras") form */ 8959 if (sec_def->sec[len - 1] == '/') { 8960 if (str_has_pfx(sec_name, sec_def->sec)) 8961 return true; 8962 return false; 8963 } 8964 8965 /* "type+" means it can be either exact SEC("type") or 8966 * well-formed SEC("type/extras") with proper '/' separator 8967 */ 8968 if (sec_def->sec[len - 1] == '+') { 8969 len--; 8970 /* not even a prefix */ 8971 if (strncmp(sec_name, sec_def->sec, len) != 0) 8972 return false; 8973 /* exact match or has '/' separator */ 8974 if (sec_name[len] == '\0' || sec_name[len] == '/') 8975 return true; 8976 return false; 8977 } 8978 8979 return strcmp(sec_name, sec_def->sec) == 0; 8980 } 8981 8982 static const struct bpf_sec_def *find_sec_def(const char *sec_name) 8983 { 8984 const struct bpf_sec_def *sec_def; 8985 int i, n; 8986 8987 n = custom_sec_def_cnt; 8988 for (i = 0; i < n; i++) { 8989 sec_def = &custom_sec_defs[i]; 8990 if (sec_def_matches(sec_def, sec_name)) 8991 return sec_def; 8992 } 8993 8994 n = ARRAY_SIZE(section_defs); 8995 for (i = 0; i < n; i++) { 8996 sec_def = §ion_defs[i]; 8997 if (sec_def_matches(sec_def, sec_name)) 8998 return sec_def; 8999 } 9000 9001 if (has_custom_fallback_def) 9002 return &custom_fallback_def; 9003 9004 return NULL; 9005 } 9006 9007 #define MAX_TYPE_NAME_SIZE 32 9008 9009 static char *libbpf_get_type_names(bool attach_type) 9010 { 9011 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE; 9012 char *buf; 9013 9014 buf = malloc(len); 9015 if (!buf) 9016 return NULL; 9017 9018 buf[0] = '\0'; 9019 /* Forge string buf with all available names */ 9020 for (i = 0; i < ARRAY_SIZE(section_defs); i++) { 9021 const struct bpf_sec_def *sec_def = §ion_defs[i]; 9022 9023 if (attach_type) { 9024 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load) 9025 continue; 9026 9027 if (!(sec_def->cookie & SEC_ATTACHABLE)) 9028 continue; 9029 } 9030 9031 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) { 9032 free(buf); 9033 return NULL; 9034 } 9035 strcat(buf, " "); 9036 strcat(buf, section_defs[i].sec); 9037 } 9038 9039 return buf; 9040 } 9041 9042 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, 9043 enum bpf_attach_type *expected_attach_type) 9044 { 9045 const struct bpf_sec_def *sec_def; 9046 char *type_names; 9047 9048 if (!name) 9049 return libbpf_err(-EINVAL); 9050 9051 sec_def = find_sec_def(name); 9052 if (sec_def) { 9053 *prog_type = sec_def->prog_type; 9054 *expected_attach_type = sec_def->expected_attach_type; 9055 return 0; 9056 } 9057 9058 pr_debug("failed to guess program type from ELF section '%s'\n", name); 9059 type_names = libbpf_get_type_names(false); 9060 if (type_names != NULL) { 9061 pr_debug("supported section(type) names are:%s\n", type_names); 9062 free(type_names); 9063 } 9064 9065 return libbpf_err(-ESRCH); 9066 } 9067 9068 const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t) 9069 { 9070 if (t < 0 || t >= ARRAY_SIZE(attach_type_name)) 9071 return NULL; 9072 9073 return attach_type_name[t]; 9074 } 9075 9076 const char *libbpf_bpf_link_type_str(enum bpf_link_type t) 9077 { 9078 if (t < 0 || t >= ARRAY_SIZE(link_type_name)) 9079 return NULL; 9080 9081 return link_type_name[t]; 9082 } 9083 9084 const char *libbpf_bpf_map_type_str(enum bpf_map_type t) 9085 { 9086 if (t < 0 || t >= ARRAY_SIZE(map_type_name)) 9087 return NULL; 9088 9089 return map_type_name[t]; 9090 } 9091 9092 const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t) 9093 { 9094 if (t < 0 || t >= ARRAY_SIZE(prog_type_name)) 9095 return NULL; 9096 9097 return prog_type_name[t]; 9098 } 9099 9100 static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj, 9101 int sec_idx, 9102 size_t offset) 9103 { 9104 struct bpf_map *map; 9105 size_t i; 9106 9107 for (i = 0; i < obj->nr_maps; i++) { 9108 map = &obj->maps[i]; 9109 if (!bpf_map__is_struct_ops(map)) 9110 continue; 9111 if (map->sec_idx == sec_idx && 9112 map->sec_offset <= offset && 9113 offset - map->sec_offset < map->def.value_size) 9114 return map; 9115 } 9116 9117 return NULL; 9118 } 9119 9120 /* Collect the reloc from ELF and populate the st_ops->progs[] */ 9121 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, 9122 Elf64_Shdr *shdr, Elf_Data *data) 9123 { 9124 const struct btf_member *member; 9125 struct bpf_struct_ops *st_ops; 9126 struct bpf_program *prog; 9127 unsigned int shdr_idx; 9128 const struct btf *btf; 9129 struct bpf_map *map; 9130 unsigned int moff, insn_idx; 9131 const char *name; 9132 __u32 member_idx; 9133 Elf64_Sym *sym; 9134 Elf64_Rel *rel; 9135 int i, nrels; 9136 9137 btf = obj->btf; 9138 nrels = shdr->sh_size / shdr->sh_entsize; 9139 for (i = 0; i < nrels; i++) { 9140 rel = elf_rel_by_idx(data, i); 9141 if (!rel) { 9142 pr_warn("struct_ops reloc: failed to get %d reloc\n", i); 9143 return -LIBBPF_ERRNO__FORMAT; 9144 } 9145 9146 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); 9147 if (!sym) { 9148 pr_warn("struct_ops reloc: symbol %zx not found\n", 9149 (size_t)ELF64_R_SYM(rel->r_info)); 9150 return -LIBBPF_ERRNO__FORMAT; 9151 } 9152 9153 name = elf_sym_str(obj, sym->st_name) ?: "<?>"; 9154 map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset); 9155 if (!map) { 9156 pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n", 9157 (size_t)rel->r_offset); 9158 return -EINVAL; 9159 } 9160 9161 moff = rel->r_offset - map->sec_offset; 9162 shdr_idx = sym->st_shndx; 9163 st_ops = map->st_ops; 9164 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n", 9165 map->name, 9166 (long long)(rel->r_info >> 32), 9167 (long long)sym->st_value, 9168 shdr_idx, (size_t)rel->r_offset, 9169 map->sec_offset, sym->st_name, name); 9170 9171 if (shdr_idx >= SHN_LORESERVE) { 9172 pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n", 9173 map->name, (size_t)rel->r_offset, shdr_idx); 9174 return -LIBBPF_ERRNO__RELOC; 9175 } 9176 if (sym->st_value % BPF_INSN_SZ) { 9177 pr_warn("struct_ops reloc %s: invalid target program offset %llu\n", 9178 map->name, (unsigned long long)sym->st_value); 9179 return -LIBBPF_ERRNO__FORMAT; 9180 } 9181 insn_idx = sym->st_value / BPF_INSN_SZ; 9182 9183 member = find_member_by_offset(st_ops->type, moff * 8); 9184 if (!member) { 9185 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n", 9186 map->name, moff); 9187 return -EINVAL; 9188 } 9189 member_idx = member - btf_members(st_ops->type); 9190 name = btf__name_by_offset(btf, member->name_off); 9191 9192 if (!resolve_func_ptr(btf, member->type, NULL)) { 9193 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n", 9194 map->name, name); 9195 return -EINVAL; 9196 } 9197 9198 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx); 9199 if (!prog) { 9200 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n", 9201 map->name, shdr_idx, name); 9202 return -EINVAL; 9203 } 9204 9205 /* prevent the use of BPF prog with invalid type */ 9206 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) { 9207 pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n", 9208 map->name, prog->name); 9209 return -EINVAL; 9210 } 9211 9212 /* if we haven't yet processed this BPF program, record proper 9213 * attach_btf_id and member_idx 9214 */ 9215 if (!prog->attach_btf_id) { 9216 prog->attach_btf_id = st_ops->type_id; 9217 prog->expected_attach_type = member_idx; 9218 } 9219 9220 /* struct_ops BPF prog can be re-used between multiple 9221 * .struct_ops & .struct_ops.link as long as it's the 9222 * same struct_ops struct definition and the same 9223 * function pointer field 9224 */ 9225 if (prog->attach_btf_id != st_ops->type_id || 9226 prog->expected_attach_type != member_idx) { 9227 pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n", 9228 map->name, prog->name, prog->sec_name, prog->type, 9229 prog->attach_btf_id, prog->expected_attach_type, name); 9230 return -EINVAL; 9231 } 9232 9233 st_ops->progs[member_idx] = prog; 9234 } 9235 9236 return 0; 9237 } 9238 9239 #define BTF_TRACE_PREFIX "btf_trace_" 9240 #define BTF_LSM_PREFIX "bpf_lsm_" 9241 #define BTF_ITER_PREFIX "bpf_iter_" 9242 #define BTF_MAX_NAME_SIZE 128 9243 9244 void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type, 9245 const char **prefix, int *kind) 9246 { 9247 switch (attach_type) { 9248 case BPF_TRACE_RAW_TP: 9249 *prefix = BTF_TRACE_PREFIX; 9250 *kind = BTF_KIND_TYPEDEF; 9251 break; 9252 case BPF_LSM_MAC: 9253 case BPF_LSM_CGROUP: 9254 *prefix = BTF_LSM_PREFIX; 9255 *kind = BTF_KIND_FUNC; 9256 break; 9257 case BPF_TRACE_ITER: 9258 *prefix = BTF_ITER_PREFIX; 9259 *kind = BTF_KIND_FUNC; 9260 break; 9261 default: 9262 *prefix = ""; 9263 *kind = BTF_KIND_FUNC; 9264 } 9265 } 9266 9267 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, 9268 const char *name, __u32 kind) 9269 { 9270 char btf_type_name[BTF_MAX_NAME_SIZE]; 9271 int ret; 9272 9273 ret = snprintf(btf_type_name, sizeof(btf_type_name), 9274 "%s%s", prefix, name); 9275 /* snprintf returns the number of characters written excluding the 9276 * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it 9277 * indicates truncation. 9278 */ 9279 if (ret < 0 || ret >= sizeof(btf_type_name)) 9280 return -ENAMETOOLONG; 9281 return btf__find_by_name_kind(btf, btf_type_name, kind); 9282 } 9283 9284 static inline int find_attach_btf_id(struct btf *btf, const char *name, 9285 enum bpf_attach_type attach_type) 9286 { 9287 const char *prefix; 9288 int kind; 9289 9290 btf_get_kernel_prefix_kind(attach_type, &prefix, &kind); 9291 return find_btf_by_prefix_kind(btf, prefix, name, kind); 9292 } 9293 9294 int libbpf_find_vmlinux_btf_id(const char *name, 9295 enum bpf_attach_type attach_type) 9296 { 9297 struct btf *btf; 9298 int err; 9299 9300 btf = btf__load_vmlinux_btf(); 9301 err = libbpf_get_error(btf); 9302 if (err) { 9303 pr_warn("vmlinux BTF is not found\n"); 9304 return libbpf_err(err); 9305 } 9306 9307 err = find_attach_btf_id(btf, name, attach_type); 9308 if (err <= 0) 9309 pr_warn("%s is not found in vmlinux BTF\n", name); 9310 9311 btf__free(btf); 9312 return libbpf_err(err); 9313 } 9314 9315 static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) 9316 { 9317 struct bpf_prog_info info; 9318 __u32 info_len = sizeof(info); 9319 struct btf *btf; 9320 int err; 9321 9322 memset(&info, 0, info_len); 9323 err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len); 9324 if (err) { 9325 pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %d\n", 9326 attach_prog_fd, err); 9327 return err; 9328 } 9329 9330 err = -EINVAL; 9331 if (!info.btf_id) { 9332 pr_warn("The target program doesn't have BTF\n"); 9333 goto out; 9334 } 9335 btf = btf__load_from_kernel_by_id(info.btf_id); 9336 err = libbpf_get_error(btf); 9337 if (err) { 9338 pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err); 9339 goto out; 9340 } 9341 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); 9342 btf__free(btf); 9343 if (err <= 0) { 9344 pr_warn("%s is not found in prog's BTF\n", name); 9345 goto out; 9346 } 9347 out: 9348 return err; 9349 } 9350 9351 static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name, 9352 enum bpf_attach_type attach_type, 9353 int *btf_obj_fd, int *btf_type_id) 9354 { 9355 int ret, i; 9356 9357 ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type); 9358 if (ret > 0) { 9359 *btf_obj_fd = 0; /* vmlinux BTF */ 9360 *btf_type_id = ret; 9361 return 0; 9362 } 9363 if (ret != -ENOENT) 9364 return ret; 9365 9366 ret = load_module_btfs(obj); 9367 if (ret) 9368 return ret; 9369 9370 for (i = 0; i < obj->btf_module_cnt; i++) { 9371 const struct module_btf *mod = &obj->btf_modules[i]; 9372 9373 ret = find_attach_btf_id(mod->btf, attach_name, attach_type); 9374 if (ret > 0) { 9375 *btf_obj_fd = mod->fd; 9376 *btf_type_id = ret; 9377 return 0; 9378 } 9379 if (ret == -ENOENT) 9380 continue; 9381 9382 return ret; 9383 } 9384 9385 return -ESRCH; 9386 } 9387 9388 static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name, 9389 int *btf_obj_fd, int *btf_type_id) 9390 { 9391 enum bpf_attach_type attach_type = prog->expected_attach_type; 9392 __u32 attach_prog_fd = prog->attach_prog_fd; 9393 int err = 0; 9394 9395 /* BPF program's BTF ID */ 9396 if (prog->type == BPF_PROG_TYPE_EXT || attach_prog_fd) { 9397 if (!attach_prog_fd) { 9398 pr_warn("prog '%s': attach program FD is not set\n", prog->name); 9399 return -EINVAL; 9400 } 9401 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd); 9402 if (err < 0) { 9403 pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %d\n", 9404 prog->name, attach_prog_fd, attach_name, err); 9405 return err; 9406 } 9407 *btf_obj_fd = 0; 9408 *btf_type_id = err; 9409 return 0; 9410 } 9411 9412 /* kernel/module BTF ID */ 9413 if (prog->obj->gen_loader) { 9414 bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type); 9415 *btf_obj_fd = 0; 9416 *btf_type_id = 1; 9417 } else { 9418 err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id); 9419 } 9420 if (err) { 9421 pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n", 9422 prog->name, attach_name, err); 9423 return err; 9424 } 9425 return 0; 9426 } 9427 9428 int libbpf_attach_type_by_name(const char *name, 9429 enum bpf_attach_type *attach_type) 9430 { 9431 char *type_names; 9432 const struct bpf_sec_def *sec_def; 9433 9434 if (!name) 9435 return libbpf_err(-EINVAL); 9436 9437 sec_def = find_sec_def(name); 9438 if (!sec_def) { 9439 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name); 9440 type_names = libbpf_get_type_names(true); 9441 if (type_names != NULL) { 9442 pr_debug("attachable section(type) names are:%s\n", type_names); 9443 free(type_names); 9444 } 9445 9446 return libbpf_err(-EINVAL); 9447 } 9448 9449 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load) 9450 return libbpf_err(-EINVAL); 9451 if (!(sec_def->cookie & SEC_ATTACHABLE)) 9452 return libbpf_err(-EINVAL); 9453 9454 *attach_type = sec_def->expected_attach_type; 9455 return 0; 9456 } 9457 9458 int bpf_map__fd(const struct bpf_map *map) 9459 { 9460 return map ? map->fd : libbpf_err(-EINVAL); 9461 } 9462 9463 static bool map_uses_real_name(const struct bpf_map *map) 9464 { 9465 /* Since libbpf started to support custom .data.* and .rodata.* maps, 9466 * their user-visible name differs from kernel-visible name. Users see 9467 * such map's corresponding ELF section name as a map name. 9468 * This check distinguishes .data/.rodata from .data.* and .rodata.* 9469 * maps to know which name has to be returned to the user. 9470 */ 9471 if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0) 9472 return true; 9473 if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0) 9474 return true; 9475 return false; 9476 } 9477 9478 const char *bpf_map__name(const struct bpf_map *map) 9479 { 9480 if (!map) 9481 return NULL; 9482 9483 if (map_uses_real_name(map)) 9484 return map->real_name; 9485 9486 return map->name; 9487 } 9488 9489 enum bpf_map_type bpf_map__type(const struct bpf_map *map) 9490 { 9491 return map->def.type; 9492 } 9493 9494 int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type) 9495 { 9496 if (map->fd >= 0) 9497 return libbpf_err(-EBUSY); 9498 map->def.type = type; 9499 return 0; 9500 } 9501 9502 __u32 bpf_map__map_flags(const struct bpf_map *map) 9503 { 9504 return map->def.map_flags; 9505 } 9506 9507 int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags) 9508 { 9509 if (map->fd >= 0) 9510 return libbpf_err(-EBUSY); 9511 map->def.map_flags = flags; 9512 return 0; 9513 } 9514 9515 __u64 bpf_map__map_extra(const struct bpf_map *map) 9516 { 9517 return map->map_extra; 9518 } 9519 9520 int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra) 9521 { 9522 if (map->fd >= 0) 9523 return libbpf_err(-EBUSY); 9524 map->map_extra = map_extra; 9525 return 0; 9526 } 9527 9528 __u32 bpf_map__numa_node(const struct bpf_map *map) 9529 { 9530 return map->numa_node; 9531 } 9532 9533 int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node) 9534 { 9535 if (map->fd >= 0) 9536 return libbpf_err(-EBUSY); 9537 map->numa_node = numa_node; 9538 return 0; 9539 } 9540 9541 __u32 bpf_map__key_size(const struct bpf_map *map) 9542 { 9543 return map->def.key_size; 9544 } 9545 9546 int bpf_map__set_key_size(struct bpf_map *map, __u32 size) 9547 { 9548 if (map->fd >= 0) 9549 return libbpf_err(-EBUSY); 9550 map->def.key_size = size; 9551 return 0; 9552 } 9553 9554 __u32 bpf_map__value_size(const struct bpf_map *map) 9555 { 9556 return map->def.value_size; 9557 } 9558 9559 static int map_btf_datasec_resize(struct bpf_map *map, __u32 size) 9560 { 9561 struct btf *btf; 9562 struct btf_type *datasec_type, *var_type; 9563 struct btf_var_secinfo *var; 9564 const struct btf_type *array_type; 9565 const struct btf_array *array; 9566 int vlen, element_sz, new_array_id; 9567 __u32 nr_elements; 9568 9569 /* check btf existence */ 9570 btf = bpf_object__btf(map->obj); 9571 if (!btf) 9572 return -ENOENT; 9573 9574 /* verify map is datasec */ 9575 datasec_type = btf_type_by_id(btf, bpf_map__btf_value_type_id(map)); 9576 if (!btf_is_datasec(datasec_type)) { 9577 pr_warn("map '%s': cannot be resized, map value type is not a datasec\n", 9578 bpf_map__name(map)); 9579 return -EINVAL; 9580 } 9581 9582 /* verify datasec has at least one var */ 9583 vlen = btf_vlen(datasec_type); 9584 if (vlen == 0) { 9585 pr_warn("map '%s': cannot be resized, map value datasec is empty\n", 9586 bpf_map__name(map)); 9587 return -EINVAL; 9588 } 9589 9590 /* verify last var in the datasec is an array */ 9591 var = &btf_var_secinfos(datasec_type)[vlen - 1]; 9592 var_type = btf_type_by_id(btf, var->type); 9593 array_type = skip_mods_and_typedefs(btf, var_type->type, NULL); 9594 if (!btf_is_array(array_type)) { 9595 pr_warn("map '%s': cannot be resized, last var must be an array\n", 9596 bpf_map__name(map)); 9597 return -EINVAL; 9598 } 9599 9600 /* verify request size aligns with array */ 9601 array = btf_array(array_type); 9602 element_sz = btf__resolve_size(btf, array->type); 9603 if (element_sz <= 0 || (size - var->offset) % element_sz != 0) { 9604 pr_warn("map '%s': cannot be resized, element size (%d) doesn't align with new total size (%u)\n", 9605 bpf_map__name(map), element_sz, size); 9606 return -EINVAL; 9607 } 9608 9609 /* create a new array based on the existing array, but with new length */ 9610 nr_elements = (size - var->offset) / element_sz; 9611 new_array_id = btf__add_array(btf, array->index_type, array->type, nr_elements); 9612 if (new_array_id < 0) 9613 return new_array_id; 9614 9615 /* adding a new btf type invalidates existing pointers to btf objects, 9616 * so refresh pointers before proceeding 9617 */ 9618 datasec_type = btf_type_by_id(btf, map->btf_value_type_id); 9619 var = &btf_var_secinfos(datasec_type)[vlen - 1]; 9620 var_type = btf_type_by_id(btf, var->type); 9621 9622 /* finally update btf info */ 9623 datasec_type->size = size; 9624 var->size = size - var->offset; 9625 var_type->type = new_array_id; 9626 9627 return 0; 9628 } 9629 9630 int bpf_map__set_value_size(struct bpf_map *map, __u32 size) 9631 { 9632 if (map->fd >= 0) 9633 return libbpf_err(-EBUSY); 9634 9635 if (map->mmaped) { 9636 int err; 9637 size_t mmap_old_sz, mmap_new_sz; 9638 9639 mmap_old_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries); 9640 mmap_new_sz = bpf_map_mmap_sz(size, map->def.max_entries); 9641 err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz); 9642 if (err) { 9643 pr_warn("map '%s': failed to resize memory-mapped region: %d\n", 9644 bpf_map__name(map), err); 9645 return err; 9646 } 9647 err = map_btf_datasec_resize(map, size); 9648 if (err && err != -ENOENT) { 9649 pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %d\n", 9650 bpf_map__name(map), err); 9651 map->btf_value_type_id = 0; 9652 map->btf_key_type_id = 0; 9653 } 9654 } 9655 9656 map->def.value_size = size; 9657 return 0; 9658 } 9659 9660 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) 9661 { 9662 return map ? map->btf_key_type_id : 0; 9663 } 9664 9665 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map) 9666 { 9667 return map ? map->btf_value_type_id : 0; 9668 } 9669 9670 int bpf_map__set_initial_value(struct bpf_map *map, 9671 const void *data, size_t size) 9672 { 9673 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG || 9674 size != map->def.value_size || map->fd >= 0) 9675 return libbpf_err(-EINVAL); 9676 9677 memcpy(map->mmaped, data, size); 9678 return 0; 9679 } 9680 9681 void *bpf_map__initial_value(struct bpf_map *map, size_t *psize) 9682 { 9683 if (!map->mmaped) 9684 return NULL; 9685 *psize = map->def.value_size; 9686 return map->mmaped; 9687 } 9688 9689 bool bpf_map__is_internal(const struct bpf_map *map) 9690 { 9691 return map->libbpf_type != LIBBPF_MAP_UNSPEC; 9692 } 9693 9694 __u32 bpf_map__ifindex(const struct bpf_map *map) 9695 { 9696 return map->map_ifindex; 9697 } 9698 9699 int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) 9700 { 9701 if (map->fd >= 0) 9702 return libbpf_err(-EBUSY); 9703 map->map_ifindex = ifindex; 9704 return 0; 9705 } 9706 9707 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) 9708 { 9709 if (!bpf_map_type__is_map_in_map(map->def.type)) { 9710 pr_warn("error: unsupported map type\n"); 9711 return libbpf_err(-EINVAL); 9712 } 9713 if (map->inner_map_fd != -1) { 9714 pr_warn("error: inner_map_fd already specified\n"); 9715 return libbpf_err(-EINVAL); 9716 } 9717 if (map->inner_map) { 9718 bpf_map__destroy(map->inner_map); 9719 zfree(&map->inner_map); 9720 } 9721 map->inner_map_fd = fd; 9722 return 0; 9723 } 9724 9725 static struct bpf_map * 9726 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i) 9727 { 9728 ssize_t idx; 9729 struct bpf_map *s, *e; 9730 9731 if (!obj || !obj->maps) 9732 return errno = EINVAL, NULL; 9733 9734 s = obj->maps; 9735 e = obj->maps + obj->nr_maps; 9736 9737 if ((m < s) || (m >= e)) { 9738 pr_warn("error in %s: map handler doesn't belong to object\n", 9739 __func__); 9740 return errno = EINVAL, NULL; 9741 } 9742 9743 idx = (m - obj->maps) + i; 9744 if (idx >= obj->nr_maps || idx < 0) 9745 return NULL; 9746 return &obj->maps[idx]; 9747 } 9748 9749 struct bpf_map * 9750 bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev) 9751 { 9752 if (prev == NULL) 9753 return obj->maps; 9754 9755 return __bpf_map__iter(prev, obj, 1); 9756 } 9757 9758 struct bpf_map * 9759 bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next) 9760 { 9761 if (next == NULL) { 9762 if (!obj->nr_maps) 9763 return NULL; 9764 return obj->maps + obj->nr_maps - 1; 9765 } 9766 9767 return __bpf_map__iter(next, obj, -1); 9768 } 9769 9770 struct bpf_map * 9771 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name) 9772 { 9773 struct bpf_map *pos; 9774 9775 bpf_object__for_each_map(pos, obj) { 9776 /* if it's a special internal map name (which always starts 9777 * with dot) then check if that special name matches the 9778 * real map name (ELF section name) 9779 */ 9780 if (name[0] == '.') { 9781 if (pos->real_name && strcmp(pos->real_name, name) == 0) 9782 return pos; 9783 continue; 9784 } 9785 /* otherwise map name has to be an exact match */ 9786 if (map_uses_real_name(pos)) { 9787 if (strcmp(pos->real_name, name) == 0) 9788 return pos; 9789 continue; 9790 } 9791 if (strcmp(pos->name, name) == 0) 9792 return pos; 9793 } 9794 return errno = ENOENT, NULL; 9795 } 9796 9797 int 9798 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name) 9799 { 9800 return bpf_map__fd(bpf_object__find_map_by_name(obj, name)); 9801 } 9802 9803 static int validate_map_op(const struct bpf_map *map, size_t key_sz, 9804 size_t value_sz, bool check_value_sz) 9805 { 9806 if (map->fd <= 0) 9807 return -ENOENT; 9808 9809 if (map->def.key_size != key_sz) { 9810 pr_warn("map '%s': unexpected key size %zu provided, expected %u\n", 9811 map->name, key_sz, map->def.key_size); 9812 return -EINVAL; 9813 } 9814 9815 if (!check_value_sz) 9816 return 0; 9817 9818 switch (map->def.type) { 9819 case BPF_MAP_TYPE_PERCPU_ARRAY: 9820 case BPF_MAP_TYPE_PERCPU_HASH: 9821 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 9822 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: { 9823 int num_cpu = libbpf_num_possible_cpus(); 9824 size_t elem_sz = roundup(map->def.value_size, 8); 9825 9826 if (value_sz != num_cpu * elem_sz) { 9827 pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n", 9828 map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz); 9829 return -EINVAL; 9830 } 9831 break; 9832 } 9833 default: 9834 if (map->def.value_size != value_sz) { 9835 pr_warn("map '%s': unexpected value size %zu provided, expected %u\n", 9836 map->name, value_sz, map->def.value_size); 9837 return -EINVAL; 9838 } 9839 break; 9840 } 9841 return 0; 9842 } 9843 9844 int bpf_map__lookup_elem(const struct bpf_map *map, 9845 const void *key, size_t key_sz, 9846 void *value, size_t value_sz, __u64 flags) 9847 { 9848 int err; 9849 9850 err = validate_map_op(map, key_sz, value_sz, true); 9851 if (err) 9852 return libbpf_err(err); 9853 9854 return bpf_map_lookup_elem_flags(map->fd, key, value, flags); 9855 } 9856 9857 int bpf_map__update_elem(const struct bpf_map *map, 9858 const void *key, size_t key_sz, 9859 const void *value, size_t value_sz, __u64 flags) 9860 { 9861 int err; 9862 9863 err = validate_map_op(map, key_sz, value_sz, true); 9864 if (err) 9865 return libbpf_err(err); 9866 9867 return bpf_map_update_elem(map->fd, key, value, flags); 9868 } 9869 9870 int bpf_map__delete_elem(const struct bpf_map *map, 9871 const void *key, size_t key_sz, __u64 flags) 9872 { 9873 int err; 9874 9875 err = validate_map_op(map, key_sz, 0, false /* check_value_sz */); 9876 if (err) 9877 return libbpf_err(err); 9878 9879 return bpf_map_delete_elem_flags(map->fd, key, flags); 9880 } 9881 9882 int bpf_map__lookup_and_delete_elem(const struct bpf_map *map, 9883 const void *key, size_t key_sz, 9884 void *value, size_t value_sz, __u64 flags) 9885 { 9886 int err; 9887 9888 err = validate_map_op(map, key_sz, value_sz, true); 9889 if (err) 9890 return libbpf_err(err); 9891 9892 return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags); 9893 } 9894 9895 int bpf_map__get_next_key(const struct bpf_map *map, 9896 const void *cur_key, void *next_key, size_t key_sz) 9897 { 9898 int err; 9899 9900 err = validate_map_op(map, key_sz, 0, false /* check_value_sz */); 9901 if (err) 9902 return libbpf_err(err); 9903 9904 return bpf_map_get_next_key(map->fd, cur_key, next_key); 9905 } 9906 9907 long libbpf_get_error(const void *ptr) 9908 { 9909 if (!IS_ERR_OR_NULL(ptr)) 9910 return 0; 9911 9912 if (IS_ERR(ptr)) 9913 errno = -PTR_ERR(ptr); 9914 9915 /* If ptr == NULL, then errno should be already set by the failing 9916 * API, because libbpf never returns NULL on success and it now always 9917 * sets errno on error. So no extra errno handling for ptr == NULL 9918 * case. 9919 */ 9920 return -errno; 9921 } 9922 9923 /* Replace link's underlying BPF program with the new one */ 9924 int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog) 9925 { 9926 int ret; 9927 9928 ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL); 9929 return libbpf_err_errno(ret); 9930 } 9931 9932 /* Release "ownership" of underlying BPF resource (typically, BPF program 9933 * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected 9934 * link, when destructed through bpf_link__destroy() call won't attempt to 9935 * detach/unregisted that BPF resource. This is useful in situations where, 9936 * say, attached BPF program has to outlive userspace program that attached it 9937 * in the system. Depending on type of BPF program, though, there might be 9938 * additional steps (like pinning BPF program in BPF FS) necessary to ensure 9939 * exit of userspace program doesn't trigger automatic detachment and clean up 9940 * inside the kernel. 9941 */ 9942 void bpf_link__disconnect(struct bpf_link *link) 9943 { 9944 link->disconnected = true; 9945 } 9946 9947 int bpf_link__destroy(struct bpf_link *link) 9948 { 9949 int err = 0; 9950 9951 if (IS_ERR_OR_NULL(link)) 9952 return 0; 9953 9954 if (!link->disconnected && link->detach) 9955 err = link->detach(link); 9956 if (link->pin_path) 9957 free(link->pin_path); 9958 if (link->dealloc) 9959 link->dealloc(link); 9960 else 9961 free(link); 9962 9963 return libbpf_err(err); 9964 } 9965 9966 int bpf_link__fd(const struct bpf_link *link) 9967 { 9968 return link->fd; 9969 } 9970 9971 const char *bpf_link__pin_path(const struct bpf_link *link) 9972 { 9973 return link->pin_path; 9974 } 9975 9976 static int bpf_link__detach_fd(struct bpf_link *link) 9977 { 9978 return libbpf_err_errno(close(link->fd)); 9979 } 9980 9981 struct bpf_link *bpf_link__open(const char *path) 9982 { 9983 struct bpf_link *link; 9984 int fd; 9985 9986 fd = bpf_obj_get(path); 9987 if (fd < 0) { 9988 fd = -errno; 9989 pr_warn("failed to open link at %s: %d\n", path, fd); 9990 return libbpf_err_ptr(fd); 9991 } 9992 9993 link = calloc(1, sizeof(*link)); 9994 if (!link) { 9995 close(fd); 9996 return libbpf_err_ptr(-ENOMEM); 9997 } 9998 link->detach = &bpf_link__detach_fd; 9999 link->fd = fd; 10000 10001 link->pin_path = strdup(path); 10002 if (!link->pin_path) { 10003 bpf_link__destroy(link); 10004 return libbpf_err_ptr(-ENOMEM); 10005 } 10006 10007 return link; 10008 } 10009 10010 int bpf_link__detach(struct bpf_link *link) 10011 { 10012 return bpf_link_detach(link->fd) ? -errno : 0; 10013 } 10014 10015 int bpf_link__pin(struct bpf_link *link, const char *path) 10016 { 10017 int err; 10018 10019 if (link->pin_path) 10020 return libbpf_err(-EBUSY); 10021 err = make_parent_dir(path); 10022 if (err) 10023 return libbpf_err(err); 10024 err = check_path(path); 10025 if (err) 10026 return libbpf_err(err); 10027 10028 link->pin_path = strdup(path); 10029 if (!link->pin_path) 10030 return libbpf_err(-ENOMEM); 10031 10032 if (bpf_obj_pin(link->fd, link->pin_path)) { 10033 err = -errno; 10034 zfree(&link->pin_path); 10035 return libbpf_err(err); 10036 } 10037 10038 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path); 10039 return 0; 10040 } 10041 10042 int bpf_link__unpin(struct bpf_link *link) 10043 { 10044 int err; 10045 10046 if (!link->pin_path) 10047 return libbpf_err(-EINVAL); 10048 10049 err = unlink(link->pin_path); 10050 if (err != 0) 10051 return -errno; 10052 10053 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path); 10054 zfree(&link->pin_path); 10055 return 0; 10056 } 10057 10058 struct bpf_link_perf { 10059 struct bpf_link link; 10060 int perf_event_fd; 10061 /* legacy kprobe support: keep track of probe identifier and type */ 10062 char *legacy_probe_name; 10063 bool legacy_is_kprobe; 10064 bool legacy_is_retprobe; 10065 }; 10066 10067 static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe); 10068 static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe); 10069 10070 static int bpf_link_perf_detach(struct bpf_link *link) 10071 { 10072 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); 10073 int err = 0; 10074 10075 if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0) 10076 err = -errno; 10077 10078 if (perf_link->perf_event_fd != link->fd) 10079 close(perf_link->perf_event_fd); 10080 close(link->fd); 10081 10082 /* legacy uprobe/kprobe needs to be removed after perf event fd closure */ 10083 if (perf_link->legacy_probe_name) { 10084 if (perf_link->legacy_is_kprobe) { 10085 err = remove_kprobe_event_legacy(perf_link->legacy_probe_name, 10086 perf_link->legacy_is_retprobe); 10087 } else { 10088 err = remove_uprobe_event_legacy(perf_link->legacy_probe_name, 10089 perf_link->legacy_is_retprobe); 10090 } 10091 } 10092 10093 return err; 10094 } 10095 10096 static void bpf_link_perf_dealloc(struct bpf_link *link) 10097 { 10098 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); 10099 10100 free(perf_link->legacy_probe_name); 10101 free(perf_link); 10102 } 10103 10104 struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd, 10105 const struct bpf_perf_event_opts *opts) 10106 { 10107 char errmsg[STRERR_BUFSIZE]; 10108 struct bpf_link_perf *link; 10109 int prog_fd, link_fd = -1, err; 10110 bool force_ioctl_attach; 10111 10112 if (!OPTS_VALID(opts, bpf_perf_event_opts)) 10113 return libbpf_err_ptr(-EINVAL); 10114 10115 if (pfd < 0) { 10116 pr_warn("prog '%s': invalid perf event FD %d\n", 10117 prog->name, pfd); 10118 return libbpf_err_ptr(-EINVAL); 10119 } 10120 prog_fd = bpf_program__fd(prog); 10121 if (prog_fd < 0) { 10122 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n", 10123 prog->name); 10124 return libbpf_err_ptr(-EINVAL); 10125 } 10126 10127 link = calloc(1, sizeof(*link)); 10128 if (!link) 10129 return libbpf_err_ptr(-ENOMEM); 10130 link->link.detach = &bpf_link_perf_detach; 10131 link->link.dealloc = &bpf_link_perf_dealloc; 10132 link->perf_event_fd = pfd; 10133 10134 force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false); 10135 if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) { 10136 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts, 10137 .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0)); 10138 10139 link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts); 10140 if (link_fd < 0) { 10141 err = -errno; 10142 pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n", 10143 prog->name, pfd, 10144 err, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 10145 goto err_out; 10146 } 10147 link->link.fd = link_fd; 10148 } else { 10149 if (OPTS_GET(opts, bpf_cookie, 0)) { 10150 pr_warn("prog '%s': user context value is not supported\n", prog->name); 10151 err = -EOPNOTSUPP; 10152 goto err_out; 10153 } 10154 10155 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) { 10156 err = -errno; 10157 pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n", 10158 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 10159 if (err == -EPROTO) 10160 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n", 10161 prog->name, pfd); 10162 goto err_out; 10163 } 10164 link->link.fd = pfd; 10165 } 10166 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { 10167 err = -errno; 10168 pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n", 10169 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 10170 goto err_out; 10171 } 10172 10173 return &link->link; 10174 err_out: 10175 if (link_fd >= 0) 10176 close(link_fd); 10177 free(link); 10178 return libbpf_err_ptr(err); 10179 } 10180 10181 struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd) 10182 { 10183 return bpf_program__attach_perf_event_opts(prog, pfd, NULL); 10184 } 10185 10186 /* 10187 * this function is expected to parse integer in the range of [0, 2^31-1] from 10188 * given file using scanf format string fmt. If actual parsed value is 10189 * negative, the result might be indistinguishable from error 10190 */ 10191 static int parse_uint_from_file(const char *file, const char *fmt) 10192 { 10193 char buf[STRERR_BUFSIZE]; 10194 int err, ret; 10195 FILE *f; 10196 10197 f = fopen(file, "re"); 10198 if (!f) { 10199 err = -errno; 10200 pr_debug("failed to open '%s': %s\n", file, 10201 libbpf_strerror_r(err, buf, sizeof(buf))); 10202 return err; 10203 } 10204 err = fscanf(f, fmt, &ret); 10205 if (err != 1) { 10206 err = err == EOF ? -EIO : -errno; 10207 pr_debug("failed to parse '%s': %s\n", file, 10208 libbpf_strerror_r(err, buf, sizeof(buf))); 10209 fclose(f); 10210 return err; 10211 } 10212 fclose(f); 10213 return ret; 10214 } 10215 10216 static int determine_kprobe_perf_type(void) 10217 { 10218 const char *file = "/sys/bus/event_source/devices/kprobe/type"; 10219 10220 return parse_uint_from_file(file, "%d\n"); 10221 } 10222 10223 static int determine_uprobe_perf_type(void) 10224 { 10225 const char *file = "/sys/bus/event_source/devices/uprobe/type"; 10226 10227 return parse_uint_from_file(file, "%d\n"); 10228 } 10229 10230 static int determine_kprobe_retprobe_bit(void) 10231 { 10232 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe"; 10233 10234 return parse_uint_from_file(file, "config:%d\n"); 10235 } 10236 10237 static int determine_uprobe_retprobe_bit(void) 10238 { 10239 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe"; 10240 10241 return parse_uint_from_file(file, "config:%d\n"); 10242 } 10243 10244 #define PERF_UPROBE_REF_CTR_OFFSET_BITS 32 10245 #define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32 10246 10247 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, 10248 uint64_t offset, int pid, size_t ref_ctr_off) 10249 { 10250 const size_t attr_sz = sizeof(struct perf_event_attr); 10251 struct perf_event_attr attr; 10252 char errmsg[STRERR_BUFSIZE]; 10253 int type, pfd; 10254 10255 if ((__u64)ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS)) 10256 return -EINVAL; 10257 10258 memset(&attr, 0, attr_sz); 10259 10260 type = uprobe ? determine_uprobe_perf_type() 10261 : determine_kprobe_perf_type(); 10262 if (type < 0) { 10263 pr_warn("failed to determine %s perf type: %s\n", 10264 uprobe ? "uprobe" : "kprobe", 10265 libbpf_strerror_r(type, errmsg, sizeof(errmsg))); 10266 return type; 10267 } 10268 if (retprobe) { 10269 int bit = uprobe ? determine_uprobe_retprobe_bit() 10270 : determine_kprobe_retprobe_bit(); 10271 10272 if (bit < 0) { 10273 pr_warn("failed to determine %s retprobe bit: %s\n", 10274 uprobe ? "uprobe" : "kprobe", 10275 libbpf_strerror_r(bit, errmsg, sizeof(errmsg))); 10276 return bit; 10277 } 10278 attr.config |= 1 << bit; 10279 } 10280 attr.size = attr_sz; 10281 attr.type = type; 10282 attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT; 10283 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */ 10284 attr.config2 = offset; /* kprobe_addr or probe_offset */ 10285 10286 /* pid filter is meaningful only for uprobes */ 10287 pfd = syscall(__NR_perf_event_open, &attr, 10288 pid < 0 ? -1 : pid /* pid */, 10289 pid == -1 ? 0 : -1 /* cpu */, 10290 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); 10291 return pfd >= 0 ? pfd : -errno; 10292 } 10293 10294 static int append_to_file(const char *file, const char *fmt, ...) 10295 { 10296 int fd, n, err = 0; 10297 va_list ap; 10298 char buf[1024]; 10299 10300 va_start(ap, fmt); 10301 n = vsnprintf(buf, sizeof(buf), fmt, ap); 10302 va_end(ap); 10303 10304 if (n < 0 || n >= sizeof(buf)) 10305 return -EINVAL; 10306 10307 fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0); 10308 if (fd < 0) 10309 return -errno; 10310 10311 if (write(fd, buf, n) < 0) 10312 err = -errno; 10313 10314 close(fd); 10315 return err; 10316 } 10317 10318 #define DEBUGFS "/sys/kernel/debug/tracing" 10319 #define TRACEFS "/sys/kernel/tracing" 10320 10321 static bool use_debugfs(void) 10322 { 10323 static int has_debugfs = -1; 10324 10325 if (has_debugfs < 0) 10326 has_debugfs = faccessat(AT_FDCWD, DEBUGFS, F_OK, AT_EACCESS) == 0; 10327 10328 return has_debugfs == 1; 10329 } 10330 10331 static const char *tracefs_path(void) 10332 { 10333 return use_debugfs() ? DEBUGFS : TRACEFS; 10334 } 10335 10336 static const char *tracefs_kprobe_events(void) 10337 { 10338 return use_debugfs() ? DEBUGFS"/kprobe_events" : TRACEFS"/kprobe_events"; 10339 } 10340 10341 static const char *tracefs_uprobe_events(void) 10342 { 10343 return use_debugfs() ? DEBUGFS"/uprobe_events" : TRACEFS"/uprobe_events"; 10344 } 10345 10346 static const char *tracefs_available_filter_functions(void) 10347 { 10348 return use_debugfs() ? DEBUGFS"/available_filter_functions" 10349 : TRACEFS"/available_filter_functions"; 10350 } 10351 10352 static const char *tracefs_available_filter_functions_addrs(void) 10353 { 10354 return use_debugfs() ? DEBUGFS"/available_filter_functions_addrs" 10355 : TRACEFS"/available_filter_functions_addrs"; 10356 } 10357 10358 static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz, 10359 const char *kfunc_name, size_t offset) 10360 { 10361 static int index = 0; 10362 int i; 10363 10364 snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset, 10365 __sync_fetch_and_add(&index, 1)); 10366 10367 /* sanitize binary_path in the probe name */ 10368 for (i = 0; buf[i]; i++) { 10369 if (!isalnum(buf[i])) 10370 buf[i] = '_'; 10371 } 10372 } 10373 10374 static int add_kprobe_event_legacy(const char *probe_name, bool retprobe, 10375 const char *kfunc_name, size_t offset) 10376 { 10377 return append_to_file(tracefs_kprobe_events(), "%c:%s/%s %s+0x%zx", 10378 retprobe ? 'r' : 'p', 10379 retprobe ? "kretprobes" : "kprobes", 10380 probe_name, kfunc_name, offset); 10381 } 10382 10383 static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe) 10384 { 10385 return append_to_file(tracefs_kprobe_events(), "-:%s/%s", 10386 retprobe ? "kretprobes" : "kprobes", probe_name); 10387 } 10388 10389 static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe) 10390 { 10391 char file[256]; 10392 10393 snprintf(file, sizeof(file), "%s/events/%s/%s/id", 10394 tracefs_path(), retprobe ? "kretprobes" : "kprobes", probe_name); 10395 10396 return parse_uint_from_file(file, "%d\n"); 10397 } 10398 10399 static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe, 10400 const char *kfunc_name, size_t offset, int pid) 10401 { 10402 const size_t attr_sz = sizeof(struct perf_event_attr); 10403 struct perf_event_attr attr; 10404 char errmsg[STRERR_BUFSIZE]; 10405 int type, pfd, err; 10406 10407 err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset); 10408 if (err < 0) { 10409 pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n", 10410 kfunc_name, offset, 10411 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 10412 return err; 10413 } 10414 type = determine_kprobe_perf_type_legacy(probe_name, retprobe); 10415 if (type < 0) { 10416 err = type; 10417 pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n", 10418 kfunc_name, offset, 10419 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 10420 goto err_clean_legacy; 10421 } 10422 10423 memset(&attr, 0, attr_sz); 10424 attr.size = attr_sz; 10425 attr.config = type; 10426 attr.type = PERF_TYPE_TRACEPOINT; 10427 10428 pfd = syscall(__NR_perf_event_open, &attr, 10429 pid < 0 ? -1 : pid, /* pid */ 10430 pid == -1 ? 0 : -1, /* cpu */ 10431 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); 10432 if (pfd < 0) { 10433 err = -errno; 10434 pr_warn("legacy kprobe perf_event_open() failed: %s\n", 10435 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 10436 goto err_clean_legacy; 10437 } 10438 return pfd; 10439 10440 err_clean_legacy: 10441 /* Clear the newly added legacy kprobe_event */ 10442 remove_kprobe_event_legacy(probe_name, retprobe); 10443 return err; 10444 } 10445 10446 static const char *arch_specific_syscall_pfx(void) 10447 { 10448 #if defined(__x86_64__) 10449 return "x64"; 10450 #elif defined(__i386__) 10451 return "ia32"; 10452 #elif defined(__s390x__) 10453 return "s390x"; 10454 #elif defined(__s390__) 10455 return "s390"; 10456 #elif defined(__arm__) 10457 return "arm"; 10458 #elif defined(__aarch64__) 10459 return "arm64"; 10460 #elif defined(__mips__) 10461 return "mips"; 10462 #elif defined(__riscv) 10463 return "riscv"; 10464 #elif defined(__powerpc__) 10465 return "powerpc"; 10466 #elif defined(__powerpc64__) 10467 return "powerpc64"; 10468 #else 10469 return NULL; 10470 #endif 10471 } 10472 10473 static int probe_kern_syscall_wrapper(void) 10474 { 10475 char syscall_name[64]; 10476 const char *ksys_pfx; 10477 10478 ksys_pfx = arch_specific_syscall_pfx(); 10479 if (!ksys_pfx) 10480 return 0; 10481 10482 snprintf(syscall_name, sizeof(syscall_name), "__%s_sys_bpf", ksys_pfx); 10483 10484 if (determine_kprobe_perf_type() >= 0) { 10485 int pfd; 10486 10487 pfd = perf_event_open_probe(false, false, syscall_name, 0, getpid(), 0); 10488 if (pfd >= 0) 10489 close(pfd); 10490 10491 return pfd >= 0 ? 1 : 0; 10492 } else { /* legacy mode */ 10493 char probe_name[128]; 10494 10495 gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0); 10496 if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0) 10497 return 0; 10498 10499 (void)remove_kprobe_event_legacy(probe_name, false); 10500 return 1; 10501 } 10502 } 10503 10504 struct bpf_link * 10505 bpf_program__attach_kprobe_opts(const struct bpf_program *prog, 10506 const char *func_name, 10507 const struct bpf_kprobe_opts *opts) 10508 { 10509 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); 10510 enum probe_attach_mode attach_mode; 10511 char errmsg[STRERR_BUFSIZE]; 10512 char *legacy_probe = NULL; 10513 struct bpf_link *link; 10514 size_t offset; 10515 bool retprobe, legacy; 10516 int pfd, err; 10517 10518 if (!OPTS_VALID(opts, bpf_kprobe_opts)) 10519 return libbpf_err_ptr(-EINVAL); 10520 10521 attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT); 10522 retprobe = OPTS_GET(opts, retprobe, false); 10523 offset = OPTS_GET(opts, offset, 0); 10524 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); 10525 10526 legacy = determine_kprobe_perf_type() < 0; 10527 switch (attach_mode) { 10528 case PROBE_ATTACH_MODE_LEGACY: 10529 legacy = true; 10530 pe_opts.force_ioctl_attach = true; 10531 break; 10532 case PROBE_ATTACH_MODE_PERF: 10533 if (legacy) 10534 return libbpf_err_ptr(-ENOTSUP); 10535 pe_opts.force_ioctl_attach = true; 10536 break; 10537 case PROBE_ATTACH_MODE_LINK: 10538 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK)) 10539 return libbpf_err_ptr(-ENOTSUP); 10540 break; 10541 case PROBE_ATTACH_MODE_DEFAULT: 10542 break; 10543 default: 10544 return libbpf_err_ptr(-EINVAL); 10545 } 10546 10547 if (!legacy) { 10548 pfd = perf_event_open_probe(false /* uprobe */, retprobe, 10549 func_name, offset, 10550 -1 /* pid */, 0 /* ref_ctr_off */); 10551 } else { 10552 char probe_name[256]; 10553 10554 gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), 10555 func_name, offset); 10556 10557 legacy_probe = strdup(probe_name); 10558 if (!legacy_probe) 10559 return libbpf_err_ptr(-ENOMEM); 10560 10561 pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name, 10562 offset, -1 /* pid */); 10563 } 10564 if (pfd < 0) { 10565 err = -errno; 10566 pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n", 10567 prog->name, retprobe ? "kretprobe" : "kprobe", 10568 func_name, offset, 10569 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 10570 goto err_out; 10571 } 10572 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); 10573 err = libbpf_get_error(link); 10574 if (err) { 10575 close(pfd); 10576 pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n", 10577 prog->name, retprobe ? "kretprobe" : "kprobe", 10578 func_name, offset, 10579 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 10580 goto err_clean_legacy; 10581 } 10582 if (legacy) { 10583 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); 10584 10585 perf_link->legacy_probe_name = legacy_probe; 10586 perf_link->legacy_is_kprobe = true; 10587 perf_link->legacy_is_retprobe = retprobe; 10588 } 10589 10590 return link; 10591 10592 err_clean_legacy: 10593 if (legacy) 10594 remove_kprobe_event_legacy(legacy_probe, retprobe); 10595 err_out: 10596 free(legacy_probe); 10597 return libbpf_err_ptr(err); 10598 } 10599 10600 struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog, 10601 bool retprobe, 10602 const char *func_name) 10603 { 10604 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts, 10605 .retprobe = retprobe, 10606 ); 10607 10608 return bpf_program__attach_kprobe_opts(prog, func_name, &opts); 10609 } 10610 10611 struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog, 10612 const char *syscall_name, 10613 const struct bpf_ksyscall_opts *opts) 10614 { 10615 LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts); 10616 char func_name[128]; 10617 10618 if (!OPTS_VALID(opts, bpf_ksyscall_opts)) 10619 return libbpf_err_ptr(-EINVAL); 10620 10621 if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) { 10622 /* arch_specific_syscall_pfx() should never return NULL here 10623 * because it is guarded by kernel_supports(). However, since 10624 * compiler does not know that we have an explicit conditional 10625 * as well. 10626 */ 10627 snprintf(func_name, sizeof(func_name), "__%s_sys_%s", 10628 arch_specific_syscall_pfx() ? : "", syscall_name); 10629 } else { 10630 snprintf(func_name, sizeof(func_name), "__se_sys_%s", syscall_name); 10631 } 10632 10633 kprobe_opts.retprobe = OPTS_GET(opts, retprobe, false); 10634 kprobe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); 10635 10636 return bpf_program__attach_kprobe_opts(prog, func_name, &kprobe_opts); 10637 } 10638 10639 /* Adapted from perf/util/string.c */ 10640 bool glob_match(const char *str, const char *pat) 10641 { 10642 while (*str && *pat && *pat != '*') { 10643 if (*pat == '?') { /* Matches any single character */ 10644 str++; 10645 pat++; 10646 continue; 10647 } 10648 if (*str != *pat) 10649 return false; 10650 str++; 10651 pat++; 10652 } 10653 /* Check wild card */ 10654 if (*pat == '*') { 10655 while (*pat == '*') 10656 pat++; 10657 if (!*pat) /* Tail wild card matches all */ 10658 return true; 10659 while (*str) 10660 if (glob_match(str++, pat)) 10661 return true; 10662 } 10663 return !*str && !*pat; 10664 } 10665 10666 struct kprobe_multi_resolve { 10667 const char *pattern; 10668 unsigned long *addrs; 10669 size_t cap; 10670 size_t cnt; 10671 }; 10672 10673 struct avail_kallsyms_data { 10674 char **syms; 10675 size_t cnt; 10676 struct kprobe_multi_resolve *res; 10677 }; 10678 10679 static int avail_func_cmp(const void *a, const void *b) 10680 { 10681 return strcmp(*(const char **)a, *(const char **)b); 10682 } 10683 10684 static int avail_kallsyms_cb(unsigned long long sym_addr, char sym_type, 10685 const char *sym_name, void *ctx) 10686 { 10687 struct avail_kallsyms_data *data = ctx; 10688 struct kprobe_multi_resolve *res = data->res; 10689 int err; 10690 10691 if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp)) 10692 return 0; 10693 10694 err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1); 10695 if (err) 10696 return err; 10697 10698 res->addrs[res->cnt++] = (unsigned long)sym_addr; 10699 return 0; 10700 } 10701 10702 static int libbpf_available_kallsyms_parse(struct kprobe_multi_resolve *res) 10703 { 10704 const char *available_functions_file = tracefs_available_filter_functions(); 10705 struct avail_kallsyms_data data; 10706 char sym_name[500]; 10707 FILE *f; 10708 int err = 0, ret, i; 10709 char **syms = NULL; 10710 size_t cap = 0, cnt = 0; 10711 10712 f = fopen(available_functions_file, "re"); 10713 if (!f) { 10714 err = -errno; 10715 pr_warn("failed to open %s: %d\n", available_functions_file, err); 10716 return err; 10717 } 10718 10719 while (true) { 10720 char *name; 10721 10722 ret = fscanf(f, "%499s%*[^\n]\n", sym_name); 10723 if (ret == EOF && feof(f)) 10724 break; 10725 10726 if (ret != 1) { 10727 pr_warn("failed to parse available_filter_functions entry: %d\n", ret); 10728 err = -EINVAL; 10729 goto cleanup; 10730 } 10731 10732 if (!glob_match(sym_name, res->pattern)) 10733 continue; 10734 10735 err = libbpf_ensure_mem((void **)&syms, &cap, sizeof(*syms), cnt + 1); 10736 if (err) 10737 goto cleanup; 10738 10739 name = strdup(sym_name); 10740 if (!name) { 10741 err = -errno; 10742 goto cleanup; 10743 } 10744 10745 syms[cnt++] = name; 10746 } 10747 10748 /* no entries found, bail out */ 10749 if (cnt == 0) { 10750 err = -ENOENT; 10751 goto cleanup; 10752 } 10753 10754 /* sort available functions */ 10755 qsort(syms, cnt, sizeof(*syms), avail_func_cmp); 10756 10757 data.syms = syms; 10758 data.res = res; 10759 data.cnt = cnt; 10760 libbpf_kallsyms_parse(avail_kallsyms_cb, &data); 10761 10762 if (res->cnt == 0) 10763 err = -ENOENT; 10764 10765 cleanup: 10766 for (i = 0; i < cnt; i++) 10767 free((char *)syms[i]); 10768 free(syms); 10769 10770 fclose(f); 10771 return err; 10772 } 10773 10774 static bool has_available_filter_functions_addrs(void) 10775 { 10776 return access(tracefs_available_filter_functions_addrs(), R_OK) != -1; 10777 } 10778 10779 static int libbpf_available_kprobes_parse(struct kprobe_multi_resolve *res) 10780 { 10781 const char *available_path = tracefs_available_filter_functions_addrs(); 10782 char sym_name[500]; 10783 FILE *f; 10784 int ret, err = 0; 10785 unsigned long long sym_addr; 10786 10787 f = fopen(available_path, "re"); 10788 if (!f) { 10789 err = -errno; 10790 pr_warn("failed to open %s: %d\n", available_path, err); 10791 return err; 10792 } 10793 10794 while (true) { 10795 ret = fscanf(f, "%llx %499s%*[^\n]\n", &sym_addr, sym_name); 10796 if (ret == EOF && feof(f)) 10797 break; 10798 10799 if (ret != 2) { 10800 pr_warn("failed to parse available_filter_functions_addrs entry: %d\n", 10801 ret); 10802 err = -EINVAL; 10803 goto cleanup; 10804 } 10805 10806 if (!glob_match(sym_name, res->pattern)) 10807 continue; 10808 10809 err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, 10810 sizeof(*res->addrs), res->cnt + 1); 10811 if (err) 10812 goto cleanup; 10813 10814 res->addrs[res->cnt++] = (unsigned long)sym_addr; 10815 } 10816 10817 if (res->cnt == 0) 10818 err = -ENOENT; 10819 10820 cleanup: 10821 fclose(f); 10822 return err; 10823 } 10824 10825 struct bpf_link * 10826 bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, 10827 const char *pattern, 10828 const struct bpf_kprobe_multi_opts *opts) 10829 { 10830 LIBBPF_OPTS(bpf_link_create_opts, lopts); 10831 struct kprobe_multi_resolve res = { 10832 .pattern = pattern, 10833 }; 10834 struct bpf_link *link = NULL; 10835 char errmsg[STRERR_BUFSIZE]; 10836 const unsigned long *addrs; 10837 int err, link_fd, prog_fd; 10838 const __u64 *cookies; 10839 const char **syms; 10840 bool retprobe; 10841 size_t cnt; 10842 10843 if (!OPTS_VALID(opts, bpf_kprobe_multi_opts)) 10844 return libbpf_err_ptr(-EINVAL); 10845 10846 syms = OPTS_GET(opts, syms, false); 10847 addrs = OPTS_GET(opts, addrs, false); 10848 cnt = OPTS_GET(opts, cnt, false); 10849 cookies = OPTS_GET(opts, cookies, false); 10850 10851 if (!pattern && !addrs && !syms) 10852 return libbpf_err_ptr(-EINVAL); 10853 if (pattern && (addrs || syms || cookies || cnt)) 10854 return libbpf_err_ptr(-EINVAL); 10855 if (!pattern && !cnt) 10856 return libbpf_err_ptr(-EINVAL); 10857 if (addrs && syms) 10858 return libbpf_err_ptr(-EINVAL); 10859 10860 if (pattern) { 10861 if (has_available_filter_functions_addrs()) 10862 err = libbpf_available_kprobes_parse(&res); 10863 else 10864 err = libbpf_available_kallsyms_parse(&res); 10865 if (err) 10866 goto error; 10867 addrs = res.addrs; 10868 cnt = res.cnt; 10869 } 10870 10871 retprobe = OPTS_GET(opts, retprobe, false); 10872 10873 lopts.kprobe_multi.syms = syms; 10874 lopts.kprobe_multi.addrs = addrs; 10875 lopts.kprobe_multi.cookies = cookies; 10876 lopts.kprobe_multi.cnt = cnt; 10877 lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0; 10878 10879 link = calloc(1, sizeof(*link)); 10880 if (!link) { 10881 err = -ENOMEM; 10882 goto error; 10883 } 10884 link->detach = &bpf_link__detach_fd; 10885 10886 prog_fd = bpf_program__fd(prog); 10887 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &lopts); 10888 if (link_fd < 0) { 10889 err = -errno; 10890 pr_warn("prog '%s': failed to attach: %s\n", 10891 prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 10892 goto error; 10893 } 10894 link->fd = link_fd; 10895 free(res.addrs); 10896 return link; 10897 10898 error: 10899 free(link); 10900 free(res.addrs); 10901 return libbpf_err_ptr(err); 10902 } 10903 10904 static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link) 10905 { 10906 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); 10907 unsigned long offset = 0; 10908 const char *func_name; 10909 char *func; 10910 int n; 10911 10912 *link = NULL; 10913 10914 /* no auto-attach for SEC("kprobe") and SEC("kretprobe") */ 10915 if (strcmp(prog->sec_name, "kprobe") == 0 || strcmp(prog->sec_name, "kretprobe") == 0) 10916 return 0; 10917 10918 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/"); 10919 if (opts.retprobe) 10920 func_name = prog->sec_name + sizeof("kretprobe/") - 1; 10921 else 10922 func_name = prog->sec_name + sizeof("kprobe/") - 1; 10923 10924 n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset); 10925 if (n < 1) { 10926 pr_warn("kprobe name is invalid: %s\n", func_name); 10927 return -EINVAL; 10928 } 10929 if (opts.retprobe && offset != 0) { 10930 free(func); 10931 pr_warn("kretprobes do not support offset specification\n"); 10932 return -EINVAL; 10933 } 10934 10935 opts.offset = offset; 10936 *link = bpf_program__attach_kprobe_opts(prog, func, &opts); 10937 free(func); 10938 return libbpf_get_error(*link); 10939 } 10940 10941 static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link) 10942 { 10943 LIBBPF_OPTS(bpf_ksyscall_opts, opts); 10944 const char *syscall_name; 10945 10946 *link = NULL; 10947 10948 /* no auto-attach for SEC("ksyscall") and SEC("kretsyscall") */ 10949 if (strcmp(prog->sec_name, "ksyscall") == 0 || strcmp(prog->sec_name, "kretsyscall") == 0) 10950 return 0; 10951 10952 opts.retprobe = str_has_pfx(prog->sec_name, "kretsyscall/"); 10953 if (opts.retprobe) 10954 syscall_name = prog->sec_name + sizeof("kretsyscall/") - 1; 10955 else 10956 syscall_name = prog->sec_name + sizeof("ksyscall/") - 1; 10957 10958 *link = bpf_program__attach_ksyscall(prog, syscall_name, &opts); 10959 return *link ? 0 : -errno; 10960 } 10961 10962 static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link) 10963 { 10964 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); 10965 const char *spec; 10966 char *pattern; 10967 int n; 10968 10969 *link = NULL; 10970 10971 /* no auto-attach for SEC("kprobe.multi") and SEC("kretprobe.multi") */ 10972 if (strcmp(prog->sec_name, "kprobe.multi") == 0 || 10973 strcmp(prog->sec_name, "kretprobe.multi") == 0) 10974 return 0; 10975 10976 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/"); 10977 if (opts.retprobe) 10978 spec = prog->sec_name + sizeof("kretprobe.multi/") - 1; 10979 else 10980 spec = prog->sec_name + sizeof("kprobe.multi/") - 1; 10981 10982 n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern); 10983 if (n < 1) { 10984 pr_warn("kprobe multi pattern is invalid: %s\n", pattern); 10985 return -EINVAL; 10986 } 10987 10988 *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts); 10989 free(pattern); 10990 return libbpf_get_error(*link); 10991 } 10992 10993 static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link) 10994 { 10995 char *probe_type = NULL, *binary_path = NULL, *func_name = NULL; 10996 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); 10997 int n, ret = -EINVAL; 10998 10999 *link = NULL; 11000 11001 n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%ms", 11002 &probe_type, &binary_path, &func_name); 11003 switch (n) { 11004 case 1: 11005 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */ 11006 ret = 0; 11007 break; 11008 case 3: 11009 opts.retprobe = strcmp(probe_type, "uretprobe.multi") == 0; 11010 *link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts); 11011 ret = libbpf_get_error(*link); 11012 break; 11013 default: 11014 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name, 11015 prog->sec_name); 11016 break; 11017 } 11018 free(probe_type); 11019 free(binary_path); 11020 free(func_name); 11021 return ret; 11022 } 11023 11024 static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz, 11025 const char *binary_path, uint64_t offset) 11026 { 11027 int i; 11028 11029 snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset); 11030 11031 /* sanitize binary_path in the probe name */ 11032 for (i = 0; buf[i]; i++) { 11033 if (!isalnum(buf[i])) 11034 buf[i] = '_'; 11035 } 11036 } 11037 11038 static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe, 11039 const char *binary_path, size_t offset) 11040 { 11041 return append_to_file(tracefs_uprobe_events(), "%c:%s/%s %s:0x%zx", 11042 retprobe ? 'r' : 'p', 11043 retprobe ? "uretprobes" : "uprobes", 11044 probe_name, binary_path, offset); 11045 } 11046 11047 static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe) 11048 { 11049 return append_to_file(tracefs_uprobe_events(), "-:%s/%s", 11050 retprobe ? "uretprobes" : "uprobes", probe_name); 11051 } 11052 11053 static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe) 11054 { 11055 char file[512]; 11056 11057 snprintf(file, sizeof(file), "%s/events/%s/%s/id", 11058 tracefs_path(), retprobe ? "uretprobes" : "uprobes", probe_name); 11059 11060 return parse_uint_from_file(file, "%d\n"); 11061 } 11062 11063 static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe, 11064 const char *binary_path, size_t offset, int pid) 11065 { 11066 const size_t attr_sz = sizeof(struct perf_event_attr); 11067 struct perf_event_attr attr; 11068 int type, pfd, err; 11069 11070 err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset); 11071 if (err < 0) { 11072 pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n", 11073 binary_path, (size_t)offset, err); 11074 return err; 11075 } 11076 type = determine_uprobe_perf_type_legacy(probe_name, retprobe); 11077 if (type < 0) { 11078 err = type; 11079 pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n", 11080 binary_path, offset, err); 11081 goto err_clean_legacy; 11082 } 11083 11084 memset(&attr, 0, attr_sz); 11085 attr.size = attr_sz; 11086 attr.config = type; 11087 attr.type = PERF_TYPE_TRACEPOINT; 11088 11089 pfd = syscall(__NR_perf_event_open, &attr, 11090 pid < 0 ? -1 : pid, /* pid */ 11091 pid == -1 ? 0 : -1, /* cpu */ 11092 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); 11093 if (pfd < 0) { 11094 err = -errno; 11095 pr_warn("legacy uprobe perf_event_open() failed: %d\n", err); 11096 goto err_clean_legacy; 11097 } 11098 return pfd; 11099 11100 err_clean_legacy: 11101 /* Clear the newly added legacy uprobe_event */ 11102 remove_uprobe_event_legacy(probe_name, retprobe); 11103 return err; 11104 } 11105 11106 /* Find offset of function name in archive specified by path. Currently 11107 * supported are .zip files that do not compress their contents, as used on 11108 * Android in the form of APKs, for example. "file_name" is the name of the ELF 11109 * file inside the archive. "func_name" matches symbol name or name@@LIB for 11110 * library functions. 11111 * 11112 * An overview of the APK format specifically provided here: 11113 * https://en.wikipedia.org/w/index.php?title=Apk_(file_format)&oldid=1139099120#Package_contents 11114 */ 11115 static long elf_find_func_offset_from_archive(const char *archive_path, const char *file_name, 11116 const char *func_name) 11117 { 11118 struct zip_archive *archive; 11119 struct zip_entry entry; 11120 long ret; 11121 Elf *elf; 11122 11123 archive = zip_archive_open(archive_path); 11124 if (IS_ERR(archive)) { 11125 ret = PTR_ERR(archive); 11126 pr_warn("zip: failed to open %s: %ld\n", archive_path, ret); 11127 return ret; 11128 } 11129 11130 ret = zip_archive_find_entry(archive, file_name, &entry); 11131 if (ret) { 11132 pr_warn("zip: could not find archive member %s in %s: %ld\n", file_name, 11133 archive_path, ret); 11134 goto out; 11135 } 11136 pr_debug("zip: found entry for %s in %s at 0x%lx\n", file_name, archive_path, 11137 (unsigned long)entry.data_offset); 11138 11139 if (entry.compression) { 11140 pr_warn("zip: entry %s of %s is compressed and cannot be handled\n", file_name, 11141 archive_path); 11142 ret = -LIBBPF_ERRNO__FORMAT; 11143 goto out; 11144 } 11145 11146 elf = elf_memory((void *)entry.data, entry.data_length); 11147 if (!elf) { 11148 pr_warn("elf: could not read elf file %s from %s: %s\n", file_name, archive_path, 11149 elf_errmsg(-1)); 11150 ret = -LIBBPF_ERRNO__LIBELF; 11151 goto out; 11152 } 11153 11154 ret = elf_find_func_offset(elf, file_name, func_name); 11155 if (ret > 0) { 11156 pr_debug("elf: symbol address match for %s of %s in %s: 0x%x + 0x%lx = 0x%lx\n", 11157 func_name, file_name, archive_path, entry.data_offset, ret, 11158 ret + entry.data_offset); 11159 ret += entry.data_offset; 11160 } 11161 elf_end(elf); 11162 11163 out: 11164 zip_archive_close(archive); 11165 return ret; 11166 } 11167 11168 static const char *arch_specific_lib_paths(void) 11169 { 11170 /* 11171 * Based on https://packages.debian.org/sid/libc6. 11172 * 11173 * Assume that the traced program is built for the same architecture 11174 * as libbpf, which should cover the vast majority of cases. 11175 */ 11176 #if defined(__x86_64__) 11177 return "/lib/x86_64-linux-gnu"; 11178 #elif defined(__i386__) 11179 return "/lib/i386-linux-gnu"; 11180 #elif defined(__s390x__) 11181 return "/lib/s390x-linux-gnu"; 11182 #elif defined(__s390__) 11183 return "/lib/s390-linux-gnu"; 11184 #elif defined(__arm__) && defined(__SOFTFP__) 11185 return "/lib/arm-linux-gnueabi"; 11186 #elif defined(__arm__) && !defined(__SOFTFP__) 11187 return "/lib/arm-linux-gnueabihf"; 11188 #elif defined(__aarch64__) 11189 return "/lib/aarch64-linux-gnu"; 11190 #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64 11191 return "/lib/mips64el-linux-gnuabi64"; 11192 #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32 11193 return "/lib/mipsel-linux-gnu"; 11194 #elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 11195 return "/lib/powerpc64le-linux-gnu"; 11196 #elif defined(__sparc__) && defined(__arch64__) 11197 return "/lib/sparc64-linux-gnu"; 11198 #elif defined(__riscv) && __riscv_xlen == 64 11199 return "/lib/riscv64-linux-gnu"; 11200 #else 11201 return NULL; 11202 #endif 11203 } 11204 11205 /* Get full path to program/shared library. */ 11206 static int resolve_full_path(const char *file, char *result, size_t result_sz) 11207 { 11208 const char *search_paths[3] = {}; 11209 int i, perm; 11210 11211 if (str_has_sfx(file, ".so") || strstr(file, ".so.")) { 11212 search_paths[0] = getenv("LD_LIBRARY_PATH"); 11213 search_paths[1] = "/usr/lib64:/usr/lib"; 11214 search_paths[2] = arch_specific_lib_paths(); 11215 perm = R_OK; 11216 } else { 11217 search_paths[0] = getenv("PATH"); 11218 search_paths[1] = "/usr/bin:/usr/sbin"; 11219 perm = R_OK | X_OK; 11220 } 11221 11222 for (i = 0; i < ARRAY_SIZE(search_paths); i++) { 11223 const char *s; 11224 11225 if (!search_paths[i]) 11226 continue; 11227 for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) { 11228 char *next_path; 11229 int seg_len; 11230 11231 if (s[0] == ':') 11232 s++; 11233 next_path = strchr(s, ':'); 11234 seg_len = next_path ? next_path - s : strlen(s); 11235 if (!seg_len) 11236 continue; 11237 snprintf(result, result_sz, "%.*s/%s", seg_len, s, file); 11238 /* ensure it has required permissions */ 11239 if (faccessat(AT_FDCWD, result, perm, AT_EACCESS) < 0) 11240 continue; 11241 pr_debug("resolved '%s' to '%s'\n", file, result); 11242 return 0; 11243 } 11244 } 11245 return -ENOENT; 11246 } 11247 11248 struct bpf_link * 11249 bpf_program__attach_uprobe_multi(const struct bpf_program *prog, 11250 pid_t pid, 11251 const char *path, 11252 const char *func_pattern, 11253 const struct bpf_uprobe_multi_opts *opts) 11254 { 11255 const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL; 11256 LIBBPF_OPTS(bpf_link_create_opts, lopts); 11257 unsigned long *resolved_offsets = NULL; 11258 int err = 0, link_fd, prog_fd; 11259 struct bpf_link *link = NULL; 11260 char errmsg[STRERR_BUFSIZE]; 11261 char full_path[PATH_MAX]; 11262 const __u64 *cookies; 11263 const char **syms; 11264 size_t cnt; 11265 11266 if (!OPTS_VALID(opts, bpf_uprobe_multi_opts)) 11267 return libbpf_err_ptr(-EINVAL); 11268 11269 syms = OPTS_GET(opts, syms, NULL); 11270 offsets = OPTS_GET(opts, offsets, NULL); 11271 ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL); 11272 cookies = OPTS_GET(opts, cookies, NULL); 11273 cnt = OPTS_GET(opts, cnt, 0); 11274 11275 /* 11276 * User can specify 2 mutually exclusive set of inputs: 11277 * 11278 * 1) use only path/func_pattern/pid arguments 11279 * 11280 * 2) use path/pid with allowed combinations of: 11281 * syms/offsets/ref_ctr_offsets/cookies/cnt 11282 * 11283 * - syms and offsets are mutually exclusive 11284 * - ref_ctr_offsets and cookies are optional 11285 * 11286 * Any other usage results in error. 11287 */ 11288 11289 if (!path) 11290 return libbpf_err_ptr(-EINVAL); 11291 if (!func_pattern && cnt == 0) 11292 return libbpf_err_ptr(-EINVAL); 11293 11294 if (func_pattern) { 11295 if (syms || offsets || ref_ctr_offsets || cookies || cnt) 11296 return libbpf_err_ptr(-EINVAL); 11297 } else { 11298 if (!!syms == !!offsets) 11299 return libbpf_err_ptr(-EINVAL); 11300 } 11301 11302 if (func_pattern) { 11303 if (!strchr(path, '/')) { 11304 err = resolve_full_path(path, full_path, sizeof(full_path)); 11305 if (err) { 11306 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n", 11307 prog->name, path, err); 11308 return libbpf_err_ptr(err); 11309 } 11310 path = full_path; 11311 } 11312 11313 err = elf_resolve_pattern_offsets(path, func_pattern, 11314 &resolved_offsets, &cnt); 11315 if (err < 0) 11316 return libbpf_err_ptr(err); 11317 offsets = resolved_offsets; 11318 } else if (syms) { 11319 err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets); 11320 if (err < 0) 11321 return libbpf_err_ptr(err); 11322 offsets = resolved_offsets; 11323 } 11324 11325 lopts.uprobe_multi.path = path; 11326 lopts.uprobe_multi.offsets = offsets; 11327 lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets; 11328 lopts.uprobe_multi.cookies = cookies; 11329 lopts.uprobe_multi.cnt = cnt; 11330 lopts.uprobe_multi.flags = OPTS_GET(opts, retprobe, false) ? BPF_F_UPROBE_MULTI_RETURN : 0; 11331 11332 if (pid == 0) 11333 pid = getpid(); 11334 if (pid > 0) 11335 lopts.uprobe_multi.pid = pid; 11336 11337 link = calloc(1, sizeof(*link)); 11338 if (!link) { 11339 err = -ENOMEM; 11340 goto error; 11341 } 11342 link->detach = &bpf_link__detach_fd; 11343 11344 prog_fd = bpf_program__fd(prog); 11345 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &lopts); 11346 if (link_fd < 0) { 11347 err = -errno; 11348 pr_warn("prog '%s': failed to attach multi-uprobe: %s\n", 11349 prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 11350 goto error; 11351 } 11352 link->fd = link_fd; 11353 free(resolved_offsets); 11354 return link; 11355 11356 error: 11357 free(resolved_offsets); 11358 free(link); 11359 return libbpf_err_ptr(err); 11360 } 11361 11362 LIBBPF_API struct bpf_link * 11363 bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, 11364 const char *binary_path, size_t func_offset, 11365 const struct bpf_uprobe_opts *opts) 11366 { 11367 const char *archive_path = NULL, *archive_sep = NULL; 11368 char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL; 11369 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); 11370 enum probe_attach_mode attach_mode; 11371 char full_path[PATH_MAX]; 11372 struct bpf_link *link; 11373 size_t ref_ctr_off; 11374 int pfd, err; 11375 bool retprobe, legacy; 11376 const char *func_name; 11377 11378 if (!OPTS_VALID(opts, bpf_uprobe_opts)) 11379 return libbpf_err_ptr(-EINVAL); 11380 11381 attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT); 11382 retprobe = OPTS_GET(opts, retprobe, false); 11383 ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0); 11384 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); 11385 11386 if (!binary_path) 11387 return libbpf_err_ptr(-EINVAL); 11388 11389 /* Check if "binary_path" refers to an archive. */ 11390 archive_sep = strstr(binary_path, "!/"); 11391 if (archive_sep) { 11392 full_path[0] = '\0'; 11393 libbpf_strlcpy(full_path, binary_path, 11394 min(sizeof(full_path), (size_t)(archive_sep - binary_path + 1))); 11395 archive_path = full_path; 11396 binary_path = archive_sep + 2; 11397 } else if (!strchr(binary_path, '/')) { 11398 err = resolve_full_path(binary_path, full_path, sizeof(full_path)); 11399 if (err) { 11400 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n", 11401 prog->name, binary_path, err); 11402 return libbpf_err_ptr(err); 11403 } 11404 binary_path = full_path; 11405 } 11406 func_name = OPTS_GET(opts, func_name, NULL); 11407 if (func_name) { 11408 long sym_off; 11409 11410 if (archive_path) { 11411 sym_off = elf_find_func_offset_from_archive(archive_path, binary_path, 11412 func_name); 11413 binary_path = archive_path; 11414 } else { 11415 sym_off = elf_find_func_offset_from_file(binary_path, func_name); 11416 } 11417 if (sym_off < 0) 11418 return libbpf_err_ptr(sym_off); 11419 func_offset += sym_off; 11420 } 11421 11422 legacy = determine_uprobe_perf_type() < 0; 11423 switch (attach_mode) { 11424 case PROBE_ATTACH_MODE_LEGACY: 11425 legacy = true; 11426 pe_opts.force_ioctl_attach = true; 11427 break; 11428 case PROBE_ATTACH_MODE_PERF: 11429 if (legacy) 11430 return libbpf_err_ptr(-ENOTSUP); 11431 pe_opts.force_ioctl_attach = true; 11432 break; 11433 case PROBE_ATTACH_MODE_LINK: 11434 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK)) 11435 return libbpf_err_ptr(-ENOTSUP); 11436 break; 11437 case PROBE_ATTACH_MODE_DEFAULT: 11438 break; 11439 default: 11440 return libbpf_err_ptr(-EINVAL); 11441 } 11442 11443 if (!legacy) { 11444 pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path, 11445 func_offset, pid, ref_ctr_off); 11446 } else { 11447 char probe_name[PATH_MAX + 64]; 11448 11449 if (ref_ctr_off) 11450 return libbpf_err_ptr(-EINVAL); 11451 11452 gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name), 11453 binary_path, func_offset); 11454 11455 legacy_probe = strdup(probe_name); 11456 if (!legacy_probe) 11457 return libbpf_err_ptr(-ENOMEM); 11458 11459 pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe, 11460 binary_path, func_offset, pid); 11461 } 11462 if (pfd < 0) { 11463 err = -errno; 11464 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n", 11465 prog->name, retprobe ? "uretprobe" : "uprobe", 11466 binary_path, func_offset, 11467 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 11468 goto err_out; 11469 } 11470 11471 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); 11472 err = libbpf_get_error(link); 11473 if (err) { 11474 close(pfd); 11475 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n", 11476 prog->name, retprobe ? "uretprobe" : "uprobe", 11477 binary_path, func_offset, 11478 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 11479 goto err_clean_legacy; 11480 } 11481 if (legacy) { 11482 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); 11483 11484 perf_link->legacy_probe_name = legacy_probe; 11485 perf_link->legacy_is_kprobe = false; 11486 perf_link->legacy_is_retprobe = retprobe; 11487 } 11488 return link; 11489 11490 err_clean_legacy: 11491 if (legacy) 11492 remove_uprobe_event_legacy(legacy_probe, retprobe); 11493 err_out: 11494 free(legacy_probe); 11495 return libbpf_err_ptr(err); 11496 } 11497 11498 /* Format of u[ret]probe section definition supporting auto-attach: 11499 * u[ret]probe/binary:function[+offset] 11500 * 11501 * binary can be an absolute/relative path or a filename; the latter is resolved to a 11502 * full binary path via bpf_program__attach_uprobe_opts. 11503 * 11504 * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be 11505 * specified (and auto-attach is not possible) or the above format is specified for 11506 * auto-attach. 11507 */ 11508 static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link) 11509 { 11510 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts); 11511 char *probe_type = NULL, *binary_path = NULL, *func_name = NULL; 11512 int n, ret = -EINVAL; 11513 long offset = 0; 11514 11515 *link = NULL; 11516 11517 n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[a-zA-Z0-9_.]+%li", 11518 &probe_type, &binary_path, &func_name, &offset); 11519 switch (n) { 11520 case 1: 11521 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */ 11522 ret = 0; 11523 break; 11524 case 2: 11525 pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n", 11526 prog->name, prog->sec_name); 11527 break; 11528 case 3: 11529 case 4: 11530 opts.retprobe = strcmp(probe_type, "uretprobe") == 0 || 11531 strcmp(probe_type, "uretprobe.s") == 0; 11532 if (opts.retprobe && offset != 0) { 11533 pr_warn("prog '%s': uretprobes do not support offset specification\n", 11534 prog->name); 11535 break; 11536 } 11537 opts.func_name = func_name; 11538 *link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts); 11539 ret = libbpf_get_error(*link); 11540 break; 11541 default: 11542 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name, 11543 prog->sec_name); 11544 break; 11545 } 11546 free(probe_type); 11547 free(binary_path); 11548 free(func_name); 11549 11550 return ret; 11551 } 11552 11553 struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog, 11554 bool retprobe, pid_t pid, 11555 const char *binary_path, 11556 size_t func_offset) 11557 { 11558 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe); 11559 11560 return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts); 11561 } 11562 11563 struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog, 11564 pid_t pid, const char *binary_path, 11565 const char *usdt_provider, const char *usdt_name, 11566 const struct bpf_usdt_opts *opts) 11567 { 11568 char resolved_path[512]; 11569 struct bpf_object *obj = prog->obj; 11570 struct bpf_link *link; 11571 __u64 usdt_cookie; 11572 int err; 11573 11574 if (!OPTS_VALID(opts, bpf_uprobe_opts)) 11575 return libbpf_err_ptr(-EINVAL); 11576 11577 if (bpf_program__fd(prog) < 0) { 11578 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n", 11579 prog->name); 11580 return libbpf_err_ptr(-EINVAL); 11581 } 11582 11583 if (!binary_path) 11584 return libbpf_err_ptr(-EINVAL); 11585 11586 if (!strchr(binary_path, '/')) { 11587 err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path)); 11588 if (err) { 11589 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n", 11590 prog->name, binary_path, err); 11591 return libbpf_err_ptr(err); 11592 } 11593 binary_path = resolved_path; 11594 } 11595 11596 /* USDT manager is instantiated lazily on first USDT attach. It will 11597 * be destroyed together with BPF object in bpf_object__close(). 11598 */ 11599 if (IS_ERR(obj->usdt_man)) 11600 return libbpf_ptr(obj->usdt_man); 11601 if (!obj->usdt_man) { 11602 obj->usdt_man = usdt_manager_new(obj); 11603 if (IS_ERR(obj->usdt_man)) 11604 return libbpf_ptr(obj->usdt_man); 11605 } 11606 11607 usdt_cookie = OPTS_GET(opts, usdt_cookie, 0); 11608 link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path, 11609 usdt_provider, usdt_name, usdt_cookie); 11610 err = libbpf_get_error(link); 11611 if (err) 11612 return libbpf_err_ptr(err); 11613 return link; 11614 } 11615 11616 static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link) 11617 { 11618 char *path = NULL, *provider = NULL, *name = NULL; 11619 const char *sec_name; 11620 int n, err; 11621 11622 sec_name = bpf_program__section_name(prog); 11623 if (strcmp(sec_name, "usdt") == 0) { 11624 /* no auto-attach for just SEC("usdt") */ 11625 *link = NULL; 11626 return 0; 11627 } 11628 11629 n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name); 11630 if (n != 3) { 11631 pr_warn("invalid section '%s', expected SEC(\"usdt/<path>:<provider>:<name>\")\n", 11632 sec_name); 11633 err = -EINVAL; 11634 } else { 11635 *link = bpf_program__attach_usdt(prog, -1 /* any process */, path, 11636 provider, name, NULL); 11637 err = libbpf_get_error(*link); 11638 } 11639 free(path); 11640 free(provider); 11641 free(name); 11642 return err; 11643 } 11644 11645 static int determine_tracepoint_id(const char *tp_category, 11646 const char *tp_name) 11647 { 11648 char file[PATH_MAX]; 11649 int ret; 11650 11651 ret = snprintf(file, sizeof(file), "%s/events/%s/%s/id", 11652 tracefs_path(), tp_category, tp_name); 11653 if (ret < 0) 11654 return -errno; 11655 if (ret >= sizeof(file)) { 11656 pr_debug("tracepoint %s/%s path is too long\n", 11657 tp_category, tp_name); 11658 return -E2BIG; 11659 } 11660 return parse_uint_from_file(file, "%d\n"); 11661 } 11662 11663 static int perf_event_open_tracepoint(const char *tp_category, 11664 const char *tp_name) 11665 { 11666 const size_t attr_sz = sizeof(struct perf_event_attr); 11667 struct perf_event_attr attr; 11668 char errmsg[STRERR_BUFSIZE]; 11669 int tp_id, pfd, err; 11670 11671 tp_id = determine_tracepoint_id(tp_category, tp_name); 11672 if (tp_id < 0) { 11673 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n", 11674 tp_category, tp_name, 11675 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg))); 11676 return tp_id; 11677 } 11678 11679 memset(&attr, 0, attr_sz); 11680 attr.type = PERF_TYPE_TRACEPOINT; 11681 attr.size = attr_sz; 11682 attr.config = tp_id; 11683 11684 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */, 11685 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); 11686 if (pfd < 0) { 11687 err = -errno; 11688 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n", 11689 tp_category, tp_name, 11690 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 11691 return err; 11692 } 11693 return pfd; 11694 } 11695 11696 struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog, 11697 const char *tp_category, 11698 const char *tp_name, 11699 const struct bpf_tracepoint_opts *opts) 11700 { 11701 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); 11702 char errmsg[STRERR_BUFSIZE]; 11703 struct bpf_link *link; 11704 int pfd, err; 11705 11706 if (!OPTS_VALID(opts, bpf_tracepoint_opts)) 11707 return libbpf_err_ptr(-EINVAL); 11708 11709 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); 11710 11711 pfd = perf_event_open_tracepoint(tp_category, tp_name); 11712 if (pfd < 0) { 11713 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n", 11714 prog->name, tp_category, tp_name, 11715 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); 11716 return libbpf_err_ptr(pfd); 11717 } 11718 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); 11719 err = libbpf_get_error(link); 11720 if (err) { 11721 close(pfd); 11722 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n", 11723 prog->name, tp_category, tp_name, 11724 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 11725 return libbpf_err_ptr(err); 11726 } 11727 return link; 11728 } 11729 11730 struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog, 11731 const char *tp_category, 11732 const char *tp_name) 11733 { 11734 return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL); 11735 } 11736 11737 static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link) 11738 { 11739 char *sec_name, *tp_cat, *tp_name; 11740 11741 *link = NULL; 11742 11743 /* no auto-attach for SEC("tp") or SEC("tracepoint") */ 11744 if (strcmp(prog->sec_name, "tp") == 0 || strcmp(prog->sec_name, "tracepoint") == 0) 11745 return 0; 11746 11747 sec_name = strdup(prog->sec_name); 11748 if (!sec_name) 11749 return -ENOMEM; 11750 11751 /* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */ 11752 if (str_has_pfx(prog->sec_name, "tp/")) 11753 tp_cat = sec_name + sizeof("tp/") - 1; 11754 else 11755 tp_cat = sec_name + sizeof("tracepoint/") - 1; 11756 tp_name = strchr(tp_cat, '/'); 11757 if (!tp_name) { 11758 free(sec_name); 11759 return -EINVAL; 11760 } 11761 *tp_name = '\0'; 11762 tp_name++; 11763 11764 *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name); 11765 free(sec_name); 11766 return libbpf_get_error(*link); 11767 } 11768 11769 struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog, 11770 const char *tp_name) 11771 { 11772 char errmsg[STRERR_BUFSIZE]; 11773 struct bpf_link *link; 11774 int prog_fd, pfd; 11775 11776 prog_fd = bpf_program__fd(prog); 11777 if (prog_fd < 0) { 11778 pr_warn("prog '%s': can't attach before loaded\n", prog->name); 11779 return libbpf_err_ptr(-EINVAL); 11780 } 11781 11782 link = calloc(1, sizeof(*link)); 11783 if (!link) 11784 return libbpf_err_ptr(-ENOMEM); 11785 link->detach = &bpf_link__detach_fd; 11786 11787 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd); 11788 if (pfd < 0) { 11789 pfd = -errno; 11790 free(link); 11791 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n", 11792 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); 11793 return libbpf_err_ptr(pfd); 11794 } 11795 link->fd = pfd; 11796 return link; 11797 } 11798 11799 static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link) 11800 { 11801 static const char *const prefixes[] = { 11802 "raw_tp", 11803 "raw_tracepoint", 11804 "raw_tp.w", 11805 "raw_tracepoint.w", 11806 }; 11807 size_t i; 11808 const char *tp_name = NULL; 11809 11810 *link = NULL; 11811 11812 for (i = 0; i < ARRAY_SIZE(prefixes); i++) { 11813 size_t pfx_len; 11814 11815 if (!str_has_pfx(prog->sec_name, prefixes[i])) 11816 continue; 11817 11818 pfx_len = strlen(prefixes[i]); 11819 /* no auto-attach case of, e.g., SEC("raw_tp") */ 11820 if (prog->sec_name[pfx_len] == '\0') 11821 return 0; 11822 11823 if (prog->sec_name[pfx_len] != '/') 11824 continue; 11825 11826 tp_name = prog->sec_name + pfx_len + 1; 11827 break; 11828 } 11829 11830 if (!tp_name) { 11831 pr_warn("prog '%s': invalid section name '%s'\n", 11832 prog->name, prog->sec_name); 11833 return -EINVAL; 11834 } 11835 11836 *link = bpf_program__attach_raw_tracepoint(prog, tp_name); 11837 return libbpf_get_error(*link); 11838 } 11839 11840 /* Common logic for all BPF program types that attach to a btf_id */ 11841 static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog, 11842 const struct bpf_trace_opts *opts) 11843 { 11844 LIBBPF_OPTS(bpf_link_create_opts, link_opts); 11845 char errmsg[STRERR_BUFSIZE]; 11846 struct bpf_link *link; 11847 int prog_fd, pfd; 11848 11849 if (!OPTS_VALID(opts, bpf_trace_opts)) 11850 return libbpf_err_ptr(-EINVAL); 11851 11852 prog_fd = bpf_program__fd(prog); 11853 if (prog_fd < 0) { 11854 pr_warn("prog '%s': can't attach before loaded\n", prog->name); 11855 return libbpf_err_ptr(-EINVAL); 11856 } 11857 11858 link = calloc(1, sizeof(*link)); 11859 if (!link) 11860 return libbpf_err_ptr(-ENOMEM); 11861 link->detach = &bpf_link__detach_fd; 11862 11863 /* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */ 11864 link_opts.tracing.cookie = OPTS_GET(opts, cookie, 0); 11865 pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), &link_opts); 11866 if (pfd < 0) { 11867 pfd = -errno; 11868 free(link); 11869 pr_warn("prog '%s': failed to attach: %s\n", 11870 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); 11871 return libbpf_err_ptr(pfd); 11872 } 11873 link->fd = pfd; 11874 return link; 11875 } 11876 11877 struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog) 11878 { 11879 return bpf_program__attach_btf_id(prog, NULL); 11880 } 11881 11882 struct bpf_link *bpf_program__attach_trace_opts(const struct bpf_program *prog, 11883 const struct bpf_trace_opts *opts) 11884 { 11885 return bpf_program__attach_btf_id(prog, opts); 11886 } 11887 11888 struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog) 11889 { 11890 return bpf_program__attach_btf_id(prog, NULL); 11891 } 11892 11893 static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link) 11894 { 11895 *link = bpf_program__attach_trace(prog); 11896 return libbpf_get_error(*link); 11897 } 11898 11899 static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link) 11900 { 11901 *link = bpf_program__attach_lsm(prog); 11902 return libbpf_get_error(*link); 11903 } 11904 11905 static struct bpf_link * 11906 bpf_program_attach_fd(const struct bpf_program *prog, 11907 int target_fd, const char *target_name, 11908 const struct bpf_link_create_opts *opts) 11909 { 11910 enum bpf_attach_type attach_type; 11911 char errmsg[STRERR_BUFSIZE]; 11912 struct bpf_link *link; 11913 int prog_fd, link_fd; 11914 11915 prog_fd = bpf_program__fd(prog); 11916 if (prog_fd < 0) { 11917 pr_warn("prog '%s': can't attach before loaded\n", prog->name); 11918 return libbpf_err_ptr(-EINVAL); 11919 } 11920 11921 link = calloc(1, sizeof(*link)); 11922 if (!link) 11923 return libbpf_err_ptr(-ENOMEM); 11924 link->detach = &bpf_link__detach_fd; 11925 11926 attach_type = bpf_program__expected_attach_type(prog); 11927 link_fd = bpf_link_create(prog_fd, target_fd, attach_type, opts); 11928 if (link_fd < 0) { 11929 link_fd = -errno; 11930 free(link); 11931 pr_warn("prog '%s': failed to attach to %s: %s\n", 11932 prog->name, target_name, 11933 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); 11934 return libbpf_err_ptr(link_fd); 11935 } 11936 link->fd = link_fd; 11937 return link; 11938 } 11939 11940 struct bpf_link * 11941 bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd) 11942 { 11943 return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", NULL); 11944 } 11945 11946 struct bpf_link * 11947 bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd) 11948 { 11949 return bpf_program_attach_fd(prog, netns_fd, "netns", NULL); 11950 } 11951 11952 struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex) 11953 { 11954 /* target_fd/target_ifindex use the same field in LINK_CREATE */ 11955 return bpf_program_attach_fd(prog, ifindex, "xdp", NULL); 11956 } 11957 11958 struct bpf_link * 11959 bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex, 11960 const struct bpf_tcx_opts *opts) 11961 { 11962 LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); 11963 __u32 relative_id; 11964 int relative_fd; 11965 11966 if (!OPTS_VALID(opts, bpf_tcx_opts)) 11967 return libbpf_err_ptr(-EINVAL); 11968 11969 relative_id = OPTS_GET(opts, relative_id, 0); 11970 relative_fd = OPTS_GET(opts, relative_fd, 0); 11971 11972 /* validate we don't have unexpected combinations of non-zero fields */ 11973 if (!ifindex) { 11974 pr_warn("prog '%s': target netdevice ifindex cannot be zero\n", 11975 prog->name); 11976 return libbpf_err_ptr(-EINVAL); 11977 } 11978 if (relative_fd && relative_id) { 11979 pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n", 11980 prog->name); 11981 return libbpf_err_ptr(-EINVAL); 11982 } 11983 11984 link_create_opts.tcx.expected_revision = OPTS_GET(opts, expected_revision, 0); 11985 link_create_opts.tcx.relative_fd = relative_fd; 11986 link_create_opts.tcx.relative_id = relative_id; 11987 link_create_opts.flags = OPTS_GET(opts, flags, 0); 11988 11989 /* target_fd/target_ifindex use the same field in LINK_CREATE */ 11990 return bpf_program_attach_fd(prog, ifindex, "tcx", &link_create_opts); 11991 } 11992 11993 struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog, 11994 int target_fd, 11995 const char *attach_func_name) 11996 { 11997 int btf_id; 11998 11999 if (!!target_fd != !!attach_func_name) { 12000 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n", 12001 prog->name); 12002 return libbpf_err_ptr(-EINVAL); 12003 } 12004 12005 if (prog->type != BPF_PROG_TYPE_EXT) { 12006 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace", 12007 prog->name); 12008 return libbpf_err_ptr(-EINVAL); 12009 } 12010 12011 if (target_fd) { 12012 LIBBPF_OPTS(bpf_link_create_opts, target_opts); 12013 12014 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd); 12015 if (btf_id < 0) 12016 return libbpf_err_ptr(btf_id); 12017 12018 target_opts.target_btf_id = btf_id; 12019 12020 return bpf_program_attach_fd(prog, target_fd, "freplace", 12021 &target_opts); 12022 } else { 12023 /* no target, so use raw_tracepoint_open for compatibility 12024 * with old kernels 12025 */ 12026 return bpf_program__attach_trace(prog); 12027 } 12028 } 12029 12030 struct bpf_link * 12031 bpf_program__attach_iter(const struct bpf_program *prog, 12032 const struct bpf_iter_attach_opts *opts) 12033 { 12034 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); 12035 char errmsg[STRERR_BUFSIZE]; 12036 struct bpf_link *link; 12037 int prog_fd, link_fd; 12038 __u32 target_fd = 0; 12039 12040 if (!OPTS_VALID(opts, bpf_iter_attach_opts)) 12041 return libbpf_err_ptr(-EINVAL); 12042 12043 link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0); 12044 link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0); 12045 12046 prog_fd = bpf_program__fd(prog); 12047 if (prog_fd < 0) { 12048 pr_warn("prog '%s': can't attach before loaded\n", prog->name); 12049 return libbpf_err_ptr(-EINVAL); 12050 } 12051 12052 link = calloc(1, sizeof(*link)); 12053 if (!link) 12054 return libbpf_err_ptr(-ENOMEM); 12055 link->detach = &bpf_link__detach_fd; 12056 12057 link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER, 12058 &link_create_opts); 12059 if (link_fd < 0) { 12060 link_fd = -errno; 12061 free(link); 12062 pr_warn("prog '%s': failed to attach to iterator: %s\n", 12063 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); 12064 return libbpf_err_ptr(link_fd); 12065 } 12066 link->fd = link_fd; 12067 return link; 12068 } 12069 12070 static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link) 12071 { 12072 *link = bpf_program__attach_iter(prog, NULL); 12073 return libbpf_get_error(*link); 12074 } 12075 12076 struct bpf_link *bpf_program__attach_netfilter(const struct bpf_program *prog, 12077 const struct bpf_netfilter_opts *opts) 12078 { 12079 LIBBPF_OPTS(bpf_link_create_opts, lopts); 12080 struct bpf_link *link; 12081 int prog_fd, link_fd; 12082 12083 if (!OPTS_VALID(opts, bpf_netfilter_opts)) 12084 return libbpf_err_ptr(-EINVAL); 12085 12086 prog_fd = bpf_program__fd(prog); 12087 if (prog_fd < 0) { 12088 pr_warn("prog '%s': can't attach before loaded\n", prog->name); 12089 return libbpf_err_ptr(-EINVAL); 12090 } 12091 12092 link = calloc(1, sizeof(*link)); 12093 if (!link) 12094 return libbpf_err_ptr(-ENOMEM); 12095 12096 link->detach = &bpf_link__detach_fd; 12097 12098 lopts.netfilter.pf = OPTS_GET(opts, pf, 0); 12099 lopts.netfilter.hooknum = OPTS_GET(opts, hooknum, 0); 12100 lopts.netfilter.priority = OPTS_GET(opts, priority, 0); 12101 lopts.netfilter.flags = OPTS_GET(opts, flags, 0); 12102 12103 link_fd = bpf_link_create(prog_fd, 0, BPF_NETFILTER, &lopts); 12104 if (link_fd < 0) { 12105 char errmsg[STRERR_BUFSIZE]; 12106 12107 link_fd = -errno; 12108 free(link); 12109 pr_warn("prog '%s': failed to attach to netfilter: %s\n", 12110 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); 12111 return libbpf_err_ptr(link_fd); 12112 } 12113 link->fd = link_fd; 12114 12115 return link; 12116 } 12117 12118 struct bpf_link *bpf_program__attach(const struct bpf_program *prog) 12119 { 12120 struct bpf_link *link = NULL; 12121 int err; 12122 12123 if (!prog->sec_def || !prog->sec_def->prog_attach_fn) 12124 return libbpf_err_ptr(-EOPNOTSUPP); 12125 12126 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link); 12127 if (err) 12128 return libbpf_err_ptr(err); 12129 12130 /* When calling bpf_program__attach() explicitly, auto-attach support 12131 * is expected to work, so NULL returned link is considered an error. 12132 * This is different for skeleton's attach, see comment in 12133 * bpf_object__attach_skeleton(). 12134 */ 12135 if (!link) 12136 return libbpf_err_ptr(-EOPNOTSUPP); 12137 12138 return link; 12139 } 12140 12141 struct bpf_link_struct_ops { 12142 struct bpf_link link; 12143 int map_fd; 12144 }; 12145 12146 static int bpf_link__detach_struct_ops(struct bpf_link *link) 12147 { 12148 struct bpf_link_struct_ops *st_link; 12149 __u32 zero = 0; 12150 12151 st_link = container_of(link, struct bpf_link_struct_ops, link); 12152 12153 if (st_link->map_fd < 0) 12154 /* w/o a real link */ 12155 return bpf_map_delete_elem(link->fd, &zero); 12156 12157 return close(link->fd); 12158 } 12159 12160 struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map) 12161 { 12162 struct bpf_link_struct_ops *link; 12163 __u32 zero = 0; 12164 int err, fd; 12165 12166 if (!bpf_map__is_struct_ops(map) || map->fd == -1) 12167 return libbpf_err_ptr(-EINVAL); 12168 12169 link = calloc(1, sizeof(*link)); 12170 if (!link) 12171 return libbpf_err_ptr(-EINVAL); 12172 12173 /* kern_vdata should be prepared during the loading phase. */ 12174 err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0); 12175 /* It can be EBUSY if the map has been used to create or 12176 * update a link before. We don't allow updating the value of 12177 * a struct_ops once it is set. That ensures that the value 12178 * never changed. So, it is safe to skip EBUSY. 12179 */ 12180 if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) { 12181 free(link); 12182 return libbpf_err_ptr(err); 12183 } 12184 12185 link->link.detach = bpf_link__detach_struct_ops; 12186 12187 if (!(map->def.map_flags & BPF_F_LINK)) { 12188 /* w/o a real link */ 12189 link->link.fd = map->fd; 12190 link->map_fd = -1; 12191 return &link->link; 12192 } 12193 12194 fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL); 12195 if (fd < 0) { 12196 free(link); 12197 return libbpf_err_ptr(fd); 12198 } 12199 12200 link->link.fd = fd; 12201 link->map_fd = map->fd; 12202 12203 return &link->link; 12204 } 12205 12206 /* 12207 * Swap the back struct_ops of a link with a new struct_ops map. 12208 */ 12209 int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map) 12210 { 12211 struct bpf_link_struct_ops *st_ops_link; 12212 __u32 zero = 0; 12213 int err; 12214 12215 if (!bpf_map__is_struct_ops(map) || map->fd < 0) 12216 return -EINVAL; 12217 12218 st_ops_link = container_of(link, struct bpf_link_struct_ops, link); 12219 /* Ensure the type of a link is correct */ 12220 if (st_ops_link->map_fd < 0) 12221 return -EINVAL; 12222 12223 err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0); 12224 /* It can be EBUSY if the map has been used to create or 12225 * update a link before. We don't allow updating the value of 12226 * a struct_ops once it is set. That ensures that the value 12227 * never changed. So, it is safe to skip EBUSY. 12228 */ 12229 if (err && err != -EBUSY) 12230 return err; 12231 12232 err = bpf_link_update(link->fd, map->fd, NULL); 12233 if (err < 0) 12234 return err; 12235 12236 st_ops_link->map_fd = map->fd; 12237 12238 return 0; 12239 } 12240 12241 typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr, 12242 void *private_data); 12243 12244 static enum bpf_perf_event_ret 12245 perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, 12246 void **copy_mem, size_t *copy_size, 12247 bpf_perf_event_print_t fn, void *private_data) 12248 { 12249 struct perf_event_mmap_page *header = mmap_mem; 12250 __u64 data_head = ring_buffer_read_head(header); 12251 __u64 data_tail = header->data_tail; 12252 void *base = ((__u8 *)header) + page_size; 12253 int ret = LIBBPF_PERF_EVENT_CONT; 12254 struct perf_event_header *ehdr; 12255 size_t ehdr_size; 12256 12257 while (data_head != data_tail) { 12258 ehdr = base + (data_tail & (mmap_size - 1)); 12259 ehdr_size = ehdr->size; 12260 12261 if (((void *)ehdr) + ehdr_size > base + mmap_size) { 12262 void *copy_start = ehdr; 12263 size_t len_first = base + mmap_size - copy_start; 12264 size_t len_secnd = ehdr_size - len_first; 12265 12266 if (*copy_size < ehdr_size) { 12267 free(*copy_mem); 12268 *copy_mem = malloc(ehdr_size); 12269 if (!*copy_mem) { 12270 *copy_size = 0; 12271 ret = LIBBPF_PERF_EVENT_ERROR; 12272 break; 12273 } 12274 *copy_size = ehdr_size; 12275 } 12276 12277 memcpy(*copy_mem, copy_start, len_first); 12278 memcpy(*copy_mem + len_first, base, len_secnd); 12279 ehdr = *copy_mem; 12280 } 12281 12282 ret = fn(ehdr, private_data); 12283 data_tail += ehdr_size; 12284 if (ret != LIBBPF_PERF_EVENT_CONT) 12285 break; 12286 } 12287 12288 ring_buffer_write_tail(header, data_tail); 12289 return libbpf_err(ret); 12290 } 12291 12292 struct perf_buffer; 12293 12294 struct perf_buffer_params { 12295 struct perf_event_attr *attr; 12296 /* if event_cb is specified, it takes precendence */ 12297 perf_buffer_event_fn event_cb; 12298 /* sample_cb and lost_cb are higher-level common-case callbacks */ 12299 perf_buffer_sample_fn sample_cb; 12300 perf_buffer_lost_fn lost_cb; 12301 void *ctx; 12302 int cpu_cnt; 12303 int *cpus; 12304 int *map_keys; 12305 }; 12306 12307 struct perf_cpu_buf { 12308 struct perf_buffer *pb; 12309 void *base; /* mmap()'ed memory */ 12310 void *buf; /* for reconstructing segmented data */ 12311 size_t buf_size; 12312 int fd; 12313 int cpu; 12314 int map_key; 12315 }; 12316 12317 struct perf_buffer { 12318 perf_buffer_event_fn event_cb; 12319 perf_buffer_sample_fn sample_cb; 12320 perf_buffer_lost_fn lost_cb; 12321 void *ctx; /* passed into callbacks */ 12322 12323 size_t page_size; 12324 size_t mmap_size; 12325 struct perf_cpu_buf **cpu_bufs; 12326 struct epoll_event *events; 12327 int cpu_cnt; /* number of allocated CPU buffers */ 12328 int epoll_fd; /* perf event FD */ 12329 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */ 12330 }; 12331 12332 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb, 12333 struct perf_cpu_buf *cpu_buf) 12334 { 12335 if (!cpu_buf) 12336 return; 12337 if (cpu_buf->base && 12338 munmap(cpu_buf->base, pb->mmap_size + pb->page_size)) 12339 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu); 12340 if (cpu_buf->fd >= 0) { 12341 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0); 12342 close(cpu_buf->fd); 12343 } 12344 free(cpu_buf->buf); 12345 free(cpu_buf); 12346 } 12347 12348 void perf_buffer__free(struct perf_buffer *pb) 12349 { 12350 int i; 12351 12352 if (IS_ERR_OR_NULL(pb)) 12353 return; 12354 if (pb->cpu_bufs) { 12355 for (i = 0; i < pb->cpu_cnt; i++) { 12356 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; 12357 12358 if (!cpu_buf) 12359 continue; 12360 12361 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key); 12362 perf_buffer__free_cpu_buf(pb, cpu_buf); 12363 } 12364 free(pb->cpu_bufs); 12365 } 12366 if (pb->epoll_fd >= 0) 12367 close(pb->epoll_fd); 12368 free(pb->events); 12369 free(pb); 12370 } 12371 12372 static struct perf_cpu_buf * 12373 perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr, 12374 int cpu, int map_key) 12375 { 12376 struct perf_cpu_buf *cpu_buf; 12377 char msg[STRERR_BUFSIZE]; 12378 int err; 12379 12380 cpu_buf = calloc(1, sizeof(*cpu_buf)); 12381 if (!cpu_buf) 12382 return ERR_PTR(-ENOMEM); 12383 12384 cpu_buf->pb = pb; 12385 cpu_buf->cpu = cpu; 12386 cpu_buf->map_key = map_key; 12387 12388 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu, 12389 -1, PERF_FLAG_FD_CLOEXEC); 12390 if (cpu_buf->fd < 0) { 12391 err = -errno; 12392 pr_warn("failed to open perf buffer event on cpu #%d: %s\n", 12393 cpu, libbpf_strerror_r(err, msg, sizeof(msg))); 12394 goto error; 12395 } 12396 12397 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size, 12398 PROT_READ | PROT_WRITE, MAP_SHARED, 12399 cpu_buf->fd, 0); 12400 if (cpu_buf->base == MAP_FAILED) { 12401 cpu_buf->base = NULL; 12402 err = -errno; 12403 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n", 12404 cpu, libbpf_strerror_r(err, msg, sizeof(msg))); 12405 goto error; 12406 } 12407 12408 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) { 12409 err = -errno; 12410 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n", 12411 cpu, libbpf_strerror_r(err, msg, sizeof(msg))); 12412 goto error; 12413 } 12414 12415 return cpu_buf; 12416 12417 error: 12418 perf_buffer__free_cpu_buf(pb, cpu_buf); 12419 return (struct perf_cpu_buf *)ERR_PTR(err); 12420 } 12421 12422 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, 12423 struct perf_buffer_params *p); 12424 12425 struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt, 12426 perf_buffer_sample_fn sample_cb, 12427 perf_buffer_lost_fn lost_cb, 12428 void *ctx, 12429 const struct perf_buffer_opts *opts) 12430 { 12431 const size_t attr_sz = sizeof(struct perf_event_attr); 12432 struct perf_buffer_params p = {}; 12433 struct perf_event_attr attr; 12434 __u32 sample_period; 12435 12436 if (!OPTS_VALID(opts, perf_buffer_opts)) 12437 return libbpf_err_ptr(-EINVAL); 12438 12439 sample_period = OPTS_GET(opts, sample_period, 1); 12440 if (!sample_period) 12441 sample_period = 1; 12442 12443 memset(&attr, 0, attr_sz); 12444 attr.size = attr_sz; 12445 attr.config = PERF_COUNT_SW_BPF_OUTPUT; 12446 attr.type = PERF_TYPE_SOFTWARE; 12447 attr.sample_type = PERF_SAMPLE_RAW; 12448 attr.sample_period = sample_period; 12449 attr.wakeup_events = sample_period; 12450 12451 p.attr = &attr; 12452 p.sample_cb = sample_cb; 12453 p.lost_cb = lost_cb; 12454 p.ctx = ctx; 12455 12456 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p)); 12457 } 12458 12459 struct perf_buffer *perf_buffer__new_raw(int map_fd, size_t page_cnt, 12460 struct perf_event_attr *attr, 12461 perf_buffer_event_fn event_cb, void *ctx, 12462 const struct perf_buffer_raw_opts *opts) 12463 { 12464 struct perf_buffer_params p = {}; 12465 12466 if (!attr) 12467 return libbpf_err_ptr(-EINVAL); 12468 12469 if (!OPTS_VALID(opts, perf_buffer_raw_opts)) 12470 return libbpf_err_ptr(-EINVAL); 12471 12472 p.attr = attr; 12473 p.event_cb = event_cb; 12474 p.ctx = ctx; 12475 p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0); 12476 p.cpus = OPTS_GET(opts, cpus, NULL); 12477 p.map_keys = OPTS_GET(opts, map_keys, NULL); 12478 12479 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p)); 12480 } 12481 12482 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, 12483 struct perf_buffer_params *p) 12484 { 12485 const char *online_cpus_file = "/sys/devices/system/cpu/online"; 12486 struct bpf_map_info map; 12487 char msg[STRERR_BUFSIZE]; 12488 struct perf_buffer *pb; 12489 bool *online = NULL; 12490 __u32 map_info_len; 12491 int err, i, j, n; 12492 12493 if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) { 12494 pr_warn("page count should be power of two, but is %zu\n", 12495 page_cnt); 12496 return ERR_PTR(-EINVAL); 12497 } 12498 12499 /* best-effort sanity checks */ 12500 memset(&map, 0, sizeof(map)); 12501 map_info_len = sizeof(map); 12502 err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len); 12503 if (err) { 12504 err = -errno; 12505 /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return 12506 * -EBADFD, -EFAULT, or -E2BIG on real error 12507 */ 12508 if (err != -EINVAL) { 12509 pr_warn("failed to get map info for map FD %d: %s\n", 12510 map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); 12511 return ERR_PTR(err); 12512 } 12513 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n", 12514 map_fd); 12515 } else { 12516 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { 12517 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", 12518 map.name); 12519 return ERR_PTR(-EINVAL); 12520 } 12521 } 12522 12523 pb = calloc(1, sizeof(*pb)); 12524 if (!pb) 12525 return ERR_PTR(-ENOMEM); 12526 12527 pb->event_cb = p->event_cb; 12528 pb->sample_cb = p->sample_cb; 12529 pb->lost_cb = p->lost_cb; 12530 pb->ctx = p->ctx; 12531 12532 pb->page_size = getpagesize(); 12533 pb->mmap_size = pb->page_size * page_cnt; 12534 pb->map_fd = map_fd; 12535 12536 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); 12537 if (pb->epoll_fd < 0) { 12538 err = -errno; 12539 pr_warn("failed to create epoll instance: %s\n", 12540 libbpf_strerror_r(err, msg, sizeof(msg))); 12541 goto error; 12542 } 12543 12544 if (p->cpu_cnt > 0) { 12545 pb->cpu_cnt = p->cpu_cnt; 12546 } else { 12547 pb->cpu_cnt = libbpf_num_possible_cpus(); 12548 if (pb->cpu_cnt < 0) { 12549 err = pb->cpu_cnt; 12550 goto error; 12551 } 12552 if (map.max_entries && map.max_entries < pb->cpu_cnt) 12553 pb->cpu_cnt = map.max_entries; 12554 } 12555 12556 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events)); 12557 if (!pb->events) { 12558 err = -ENOMEM; 12559 pr_warn("failed to allocate events: out of memory\n"); 12560 goto error; 12561 } 12562 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs)); 12563 if (!pb->cpu_bufs) { 12564 err = -ENOMEM; 12565 pr_warn("failed to allocate buffers: out of memory\n"); 12566 goto error; 12567 } 12568 12569 err = parse_cpu_mask_file(online_cpus_file, &online, &n); 12570 if (err) { 12571 pr_warn("failed to get online CPU mask: %d\n", err); 12572 goto error; 12573 } 12574 12575 for (i = 0, j = 0; i < pb->cpu_cnt; i++) { 12576 struct perf_cpu_buf *cpu_buf; 12577 int cpu, map_key; 12578 12579 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i; 12580 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i; 12581 12582 /* in case user didn't explicitly requested particular CPUs to 12583 * be attached to, skip offline/not present CPUs 12584 */ 12585 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu])) 12586 continue; 12587 12588 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key); 12589 if (IS_ERR(cpu_buf)) { 12590 err = PTR_ERR(cpu_buf); 12591 goto error; 12592 } 12593 12594 pb->cpu_bufs[j] = cpu_buf; 12595 12596 err = bpf_map_update_elem(pb->map_fd, &map_key, 12597 &cpu_buf->fd, 0); 12598 if (err) { 12599 err = -errno; 12600 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n", 12601 cpu, map_key, cpu_buf->fd, 12602 libbpf_strerror_r(err, msg, sizeof(msg))); 12603 goto error; 12604 } 12605 12606 pb->events[j].events = EPOLLIN; 12607 pb->events[j].data.ptr = cpu_buf; 12608 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd, 12609 &pb->events[j]) < 0) { 12610 err = -errno; 12611 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n", 12612 cpu, cpu_buf->fd, 12613 libbpf_strerror_r(err, msg, sizeof(msg))); 12614 goto error; 12615 } 12616 j++; 12617 } 12618 pb->cpu_cnt = j; 12619 free(online); 12620 12621 return pb; 12622 12623 error: 12624 free(online); 12625 if (pb) 12626 perf_buffer__free(pb); 12627 return ERR_PTR(err); 12628 } 12629 12630 struct perf_sample_raw { 12631 struct perf_event_header header; 12632 uint32_t size; 12633 char data[]; 12634 }; 12635 12636 struct perf_sample_lost { 12637 struct perf_event_header header; 12638 uint64_t id; 12639 uint64_t lost; 12640 uint64_t sample_id; 12641 }; 12642 12643 static enum bpf_perf_event_ret 12644 perf_buffer__process_record(struct perf_event_header *e, void *ctx) 12645 { 12646 struct perf_cpu_buf *cpu_buf = ctx; 12647 struct perf_buffer *pb = cpu_buf->pb; 12648 void *data = e; 12649 12650 /* user wants full control over parsing perf event */ 12651 if (pb->event_cb) 12652 return pb->event_cb(pb->ctx, cpu_buf->cpu, e); 12653 12654 switch (e->type) { 12655 case PERF_RECORD_SAMPLE: { 12656 struct perf_sample_raw *s = data; 12657 12658 if (pb->sample_cb) 12659 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size); 12660 break; 12661 } 12662 case PERF_RECORD_LOST: { 12663 struct perf_sample_lost *s = data; 12664 12665 if (pb->lost_cb) 12666 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost); 12667 break; 12668 } 12669 default: 12670 pr_warn("unknown perf sample type %d\n", e->type); 12671 return LIBBPF_PERF_EVENT_ERROR; 12672 } 12673 return LIBBPF_PERF_EVENT_CONT; 12674 } 12675 12676 static int perf_buffer__process_records(struct perf_buffer *pb, 12677 struct perf_cpu_buf *cpu_buf) 12678 { 12679 enum bpf_perf_event_ret ret; 12680 12681 ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size, 12682 pb->page_size, &cpu_buf->buf, 12683 &cpu_buf->buf_size, 12684 perf_buffer__process_record, cpu_buf); 12685 if (ret != LIBBPF_PERF_EVENT_CONT) 12686 return ret; 12687 return 0; 12688 } 12689 12690 int perf_buffer__epoll_fd(const struct perf_buffer *pb) 12691 { 12692 return pb->epoll_fd; 12693 } 12694 12695 int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms) 12696 { 12697 int i, cnt, err; 12698 12699 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms); 12700 if (cnt < 0) 12701 return -errno; 12702 12703 for (i = 0; i < cnt; i++) { 12704 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr; 12705 12706 err = perf_buffer__process_records(pb, cpu_buf); 12707 if (err) { 12708 pr_warn("error while processing records: %d\n", err); 12709 return libbpf_err(err); 12710 } 12711 } 12712 return cnt; 12713 } 12714 12715 /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer 12716 * manager. 12717 */ 12718 size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb) 12719 { 12720 return pb->cpu_cnt; 12721 } 12722 12723 /* 12724 * Return perf_event FD of a ring buffer in *buf_idx* slot of 12725 * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using 12726 * select()/poll()/epoll() Linux syscalls. 12727 */ 12728 int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx) 12729 { 12730 struct perf_cpu_buf *cpu_buf; 12731 12732 if (buf_idx >= pb->cpu_cnt) 12733 return libbpf_err(-EINVAL); 12734 12735 cpu_buf = pb->cpu_bufs[buf_idx]; 12736 if (!cpu_buf) 12737 return libbpf_err(-ENOENT); 12738 12739 return cpu_buf->fd; 12740 } 12741 12742 int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, size_t *buf_size) 12743 { 12744 struct perf_cpu_buf *cpu_buf; 12745 12746 if (buf_idx >= pb->cpu_cnt) 12747 return libbpf_err(-EINVAL); 12748 12749 cpu_buf = pb->cpu_bufs[buf_idx]; 12750 if (!cpu_buf) 12751 return libbpf_err(-ENOENT); 12752 12753 *buf = cpu_buf->base; 12754 *buf_size = pb->mmap_size; 12755 return 0; 12756 } 12757 12758 /* 12759 * Consume data from perf ring buffer corresponding to slot *buf_idx* in 12760 * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to 12761 * consume, do nothing and return success. 12762 * Returns: 12763 * - 0 on success; 12764 * - <0 on failure. 12765 */ 12766 int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx) 12767 { 12768 struct perf_cpu_buf *cpu_buf; 12769 12770 if (buf_idx >= pb->cpu_cnt) 12771 return libbpf_err(-EINVAL); 12772 12773 cpu_buf = pb->cpu_bufs[buf_idx]; 12774 if (!cpu_buf) 12775 return libbpf_err(-ENOENT); 12776 12777 return perf_buffer__process_records(pb, cpu_buf); 12778 } 12779 12780 int perf_buffer__consume(struct perf_buffer *pb) 12781 { 12782 int i, err; 12783 12784 for (i = 0; i < pb->cpu_cnt; i++) { 12785 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; 12786 12787 if (!cpu_buf) 12788 continue; 12789 12790 err = perf_buffer__process_records(pb, cpu_buf); 12791 if (err) { 12792 pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err); 12793 return libbpf_err(err); 12794 } 12795 } 12796 return 0; 12797 } 12798 12799 int bpf_program__set_attach_target(struct bpf_program *prog, 12800 int attach_prog_fd, 12801 const char *attach_func_name) 12802 { 12803 int btf_obj_fd = 0, btf_id = 0, err; 12804 12805 if (!prog || attach_prog_fd < 0) 12806 return libbpf_err(-EINVAL); 12807 12808 if (prog->obj->loaded) 12809 return libbpf_err(-EINVAL); 12810 12811 if (attach_prog_fd && !attach_func_name) { 12812 /* remember attach_prog_fd and let bpf_program__load() find 12813 * BTF ID during the program load 12814 */ 12815 prog->attach_prog_fd = attach_prog_fd; 12816 return 0; 12817 } 12818 12819 if (attach_prog_fd) { 12820 btf_id = libbpf_find_prog_btf_id(attach_func_name, 12821 attach_prog_fd); 12822 if (btf_id < 0) 12823 return libbpf_err(btf_id); 12824 } else { 12825 if (!attach_func_name) 12826 return libbpf_err(-EINVAL); 12827 12828 /* load btf_vmlinux, if not yet */ 12829 err = bpf_object__load_vmlinux_btf(prog->obj, true); 12830 if (err) 12831 return libbpf_err(err); 12832 err = find_kernel_btf_id(prog->obj, attach_func_name, 12833 prog->expected_attach_type, 12834 &btf_obj_fd, &btf_id); 12835 if (err) 12836 return libbpf_err(err); 12837 } 12838 12839 prog->attach_btf_id = btf_id; 12840 prog->attach_btf_obj_fd = btf_obj_fd; 12841 prog->attach_prog_fd = attach_prog_fd; 12842 return 0; 12843 } 12844 12845 int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz) 12846 { 12847 int err = 0, n, len, start, end = -1; 12848 bool *tmp; 12849 12850 *mask = NULL; 12851 *mask_sz = 0; 12852 12853 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ 12854 while (*s) { 12855 if (*s == ',' || *s == '\n') { 12856 s++; 12857 continue; 12858 } 12859 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len); 12860 if (n <= 0 || n > 2) { 12861 pr_warn("Failed to get CPU range %s: %d\n", s, n); 12862 err = -EINVAL; 12863 goto cleanup; 12864 } else if (n == 1) { 12865 end = start; 12866 } 12867 if (start < 0 || start > end) { 12868 pr_warn("Invalid CPU range [%d,%d] in %s\n", 12869 start, end, s); 12870 err = -EINVAL; 12871 goto cleanup; 12872 } 12873 tmp = realloc(*mask, end + 1); 12874 if (!tmp) { 12875 err = -ENOMEM; 12876 goto cleanup; 12877 } 12878 *mask = tmp; 12879 memset(tmp + *mask_sz, 0, start - *mask_sz); 12880 memset(tmp + start, 1, end - start + 1); 12881 *mask_sz = end + 1; 12882 s += len; 12883 } 12884 if (!*mask_sz) { 12885 pr_warn("Empty CPU range\n"); 12886 return -EINVAL; 12887 } 12888 return 0; 12889 cleanup: 12890 free(*mask); 12891 *mask = NULL; 12892 return err; 12893 } 12894 12895 int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz) 12896 { 12897 int fd, err = 0, len; 12898 char buf[128]; 12899 12900 fd = open(fcpu, O_RDONLY | O_CLOEXEC); 12901 if (fd < 0) { 12902 err = -errno; 12903 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err); 12904 return err; 12905 } 12906 len = read(fd, buf, sizeof(buf)); 12907 close(fd); 12908 if (len <= 0) { 12909 err = len ? -errno : -EINVAL; 12910 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err); 12911 return err; 12912 } 12913 if (len >= sizeof(buf)) { 12914 pr_warn("CPU mask is too big in file %s\n", fcpu); 12915 return -E2BIG; 12916 } 12917 buf[len] = '\0'; 12918 12919 return parse_cpu_mask_str(buf, mask, mask_sz); 12920 } 12921 12922 int libbpf_num_possible_cpus(void) 12923 { 12924 static const char *fcpu = "/sys/devices/system/cpu/possible"; 12925 static int cpus; 12926 int err, n, i, tmp_cpus; 12927 bool *mask; 12928 12929 tmp_cpus = READ_ONCE(cpus); 12930 if (tmp_cpus > 0) 12931 return tmp_cpus; 12932 12933 err = parse_cpu_mask_file(fcpu, &mask, &n); 12934 if (err) 12935 return libbpf_err(err); 12936 12937 tmp_cpus = 0; 12938 for (i = 0; i < n; i++) { 12939 if (mask[i]) 12940 tmp_cpus++; 12941 } 12942 free(mask); 12943 12944 WRITE_ONCE(cpus, tmp_cpus); 12945 return tmp_cpus; 12946 } 12947 12948 static int populate_skeleton_maps(const struct bpf_object *obj, 12949 struct bpf_map_skeleton *maps, 12950 size_t map_cnt) 12951 { 12952 int i; 12953 12954 for (i = 0; i < map_cnt; i++) { 12955 struct bpf_map **map = maps[i].map; 12956 const char *name = maps[i].name; 12957 void **mmaped = maps[i].mmaped; 12958 12959 *map = bpf_object__find_map_by_name(obj, name); 12960 if (!*map) { 12961 pr_warn("failed to find skeleton map '%s'\n", name); 12962 return -ESRCH; 12963 } 12964 12965 /* externs shouldn't be pre-setup from user code */ 12966 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG) 12967 *mmaped = (*map)->mmaped; 12968 } 12969 return 0; 12970 } 12971 12972 static int populate_skeleton_progs(const struct bpf_object *obj, 12973 struct bpf_prog_skeleton *progs, 12974 size_t prog_cnt) 12975 { 12976 int i; 12977 12978 for (i = 0; i < prog_cnt; i++) { 12979 struct bpf_program **prog = progs[i].prog; 12980 const char *name = progs[i].name; 12981 12982 *prog = bpf_object__find_program_by_name(obj, name); 12983 if (!*prog) { 12984 pr_warn("failed to find skeleton program '%s'\n", name); 12985 return -ESRCH; 12986 } 12987 } 12988 return 0; 12989 } 12990 12991 int bpf_object__open_skeleton(struct bpf_object_skeleton *s, 12992 const struct bpf_object_open_opts *opts) 12993 { 12994 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts, 12995 .object_name = s->name, 12996 ); 12997 struct bpf_object *obj; 12998 int err; 12999 13000 /* Attempt to preserve opts->object_name, unless overriden by user 13001 * explicitly. Overwriting object name for skeletons is discouraged, 13002 * as it breaks global data maps, because they contain object name 13003 * prefix as their own map name prefix. When skeleton is generated, 13004 * bpftool is making an assumption that this name will stay the same. 13005 */ 13006 if (opts) { 13007 memcpy(&skel_opts, opts, sizeof(*opts)); 13008 if (!opts->object_name) 13009 skel_opts.object_name = s->name; 13010 } 13011 13012 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts); 13013 err = libbpf_get_error(obj); 13014 if (err) { 13015 pr_warn("failed to initialize skeleton BPF object '%s': %d\n", 13016 s->name, err); 13017 return libbpf_err(err); 13018 } 13019 13020 *s->obj = obj; 13021 err = populate_skeleton_maps(obj, s->maps, s->map_cnt); 13022 if (err) { 13023 pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err); 13024 return libbpf_err(err); 13025 } 13026 13027 err = populate_skeleton_progs(obj, s->progs, s->prog_cnt); 13028 if (err) { 13029 pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err); 13030 return libbpf_err(err); 13031 } 13032 13033 return 0; 13034 } 13035 13036 int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s) 13037 { 13038 int err, len, var_idx, i; 13039 const char *var_name; 13040 const struct bpf_map *map; 13041 struct btf *btf; 13042 __u32 map_type_id; 13043 const struct btf_type *map_type, *var_type; 13044 const struct bpf_var_skeleton *var_skel; 13045 struct btf_var_secinfo *var; 13046 13047 if (!s->obj) 13048 return libbpf_err(-EINVAL); 13049 13050 btf = bpf_object__btf(s->obj); 13051 if (!btf) { 13052 pr_warn("subskeletons require BTF at runtime (object %s)\n", 13053 bpf_object__name(s->obj)); 13054 return libbpf_err(-errno); 13055 } 13056 13057 err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt); 13058 if (err) { 13059 pr_warn("failed to populate subskeleton maps: %d\n", err); 13060 return libbpf_err(err); 13061 } 13062 13063 err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt); 13064 if (err) { 13065 pr_warn("failed to populate subskeleton maps: %d\n", err); 13066 return libbpf_err(err); 13067 } 13068 13069 for (var_idx = 0; var_idx < s->var_cnt; var_idx++) { 13070 var_skel = &s->vars[var_idx]; 13071 map = *var_skel->map; 13072 map_type_id = bpf_map__btf_value_type_id(map); 13073 map_type = btf__type_by_id(btf, map_type_id); 13074 13075 if (!btf_is_datasec(map_type)) { 13076 pr_warn("type for map '%1$s' is not a datasec: %2$s", 13077 bpf_map__name(map), 13078 __btf_kind_str(btf_kind(map_type))); 13079 return libbpf_err(-EINVAL); 13080 } 13081 13082 len = btf_vlen(map_type); 13083 var = btf_var_secinfos(map_type); 13084 for (i = 0; i < len; i++, var++) { 13085 var_type = btf__type_by_id(btf, var->type); 13086 var_name = btf__name_by_offset(btf, var_type->name_off); 13087 if (strcmp(var_name, var_skel->name) == 0) { 13088 *var_skel->addr = map->mmaped + var->offset; 13089 break; 13090 } 13091 } 13092 } 13093 return 0; 13094 } 13095 13096 void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s) 13097 { 13098 if (!s) 13099 return; 13100 free(s->maps); 13101 free(s->progs); 13102 free(s->vars); 13103 free(s); 13104 } 13105 13106 int bpf_object__load_skeleton(struct bpf_object_skeleton *s) 13107 { 13108 int i, err; 13109 13110 err = bpf_object__load(*s->obj); 13111 if (err) { 13112 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err); 13113 return libbpf_err(err); 13114 } 13115 13116 for (i = 0; i < s->map_cnt; i++) { 13117 struct bpf_map *map = *s->maps[i].map; 13118 size_t mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries); 13119 int prot, map_fd = bpf_map__fd(map); 13120 void **mmaped = s->maps[i].mmaped; 13121 13122 if (!mmaped) 13123 continue; 13124 13125 if (!(map->def.map_flags & BPF_F_MMAPABLE)) { 13126 *mmaped = NULL; 13127 continue; 13128 } 13129 13130 if (map->def.map_flags & BPF_F_RDONLY_PROG) 13131 prot = PROT_READ; 13132 else 13133 prot = PROT_READ | PROT_WRITE; 13134 13135 /* Remap anonymous mmap()-ed "map initialization image" as 13136 * a BPF map-backed mmap()-ed memory, but preserving the same 13137 * memory address. This will cause kernel to change process' 13138 * page table to point to a different piece of kernel memory, 13139 * but from userspace point of view memory address (and its 13140 * contents, being identical at this point) will stay the 13141 * same. This mapping will be released by bpf_object__close() 13142 * as per normal clean up procedure, so we don't need to worry 13143 * about it from skeleton's clean up perspective. 13144 */ 13145 *mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0); 13146 if (*mmaped == MAP_FAILED) { 13147 err = -errno; 13148 *mmaped = NULL; 13149 pr_warn("failed to re-mmap() map '%s': %d\n", 13150 bpf_map__name(map), err); 13151 return libbpf_err(err); 13152 } 13153 } 13154 13155 return 0; 13156 } 13157 13158 int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) 13159 { 13160 int i, err; 13161 13162 for (i = 0; i < s->prog_cnt; i++) { 13163 struct bpf_program *prog = *s->progs[i].prog; 13164 struct bpf_link **link = s->progs[i].link; 13165 13166 if (!prog->autoload || !prog->autoattach) 13167 continue; 13168 13169 /* auto-attaching not supported for this program */ 13170 if (!prog->sec_def || !prog->sec_def->prog_attach_fn) 13171 continue; 13172 13173 /* if user already set the link manually, don't attempt auto-attach */ 13174 if (*link) 13175 continue; 13176 13177 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link); 13178 if (err) { 13179 pr_warn("prog '%s': failed to auto-attach: %d\n", 13180 bpf_program__name(prog), err); 13181 return libbpf_err(err); 13182 } 13183 13184 /* It's possible that for some SEC() definitions auto-attach 13185 * is supported in some cases (e.g., if definition completely 13186 * specifies target information), but is not in other cases. 13187 * SEC("uprobe") is one such case. If user specified target 13188 * binary and function name, such BPF program can be 13189 * auto-attached. But if not, it shouldn't trigger skeleton's 13190 * attach to fail. It should just be skipped. 13191 * attach_fn signals such case with returning 0 (no error) and 13192 * setting link to NULL. 13193 */ 13194 } 13195 13196 return 0; 13197 } 13198 13199 void bpf_object__detach_skeleton(struct bpf_object_skeleton *s) 13200 { 13201 int i; 13202 13203 for (i = 0; i < s->prog_cnt; i++) { 13204 struct bpf_link **link = s->progs[i].link; 13205 13206 bpf_link__destroy(*link); 13207 *link = NULL; 13208 } 13209 } 13210 13211 void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s) 13212 { 13213 if (!s) 13214 return; 13215 13216 if (s->progs) 13217 bpf_object__detach_skeleton(s); 13218 if (s->obj) 13219 bpf_object__close(*s->obj); 13220 free(s->maps); 13221 free(s->progs); 13222 free(s); 13223 } 13224