1 // SPDX-License-Identifier: LGPL-2.1 2 3 /* 4 * common eBPF ELF operations. 5 * 6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> 7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> 8 * Copyright (C) 2015 Huawei Inc. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU Lesser General Public 12 * License as published by the Free Software Foundation; 13 * version 2.1 of the License (not later!) 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU Lesser General Public License for more details. 19 * 20 * You should have received a copy of the GNU Lesser General Public 21 * License along with this program; if not, see <http://www.gnu.org/licenses> 22 */ 23 24 #include <stdlib.h> 25 #include <memory.h> 26 #include <unistd.h> 27 #include <asm/unistd.h> 28 #include <linux/bpf.h> 29 #include "bpf.h" 30 #include "libbpf.h" 31 #include "nlattr.h" 32 #include <linux/rtnetlink.h> 33 #include <linux/if_link.h> 34 #include <sys/socket.h> 35 #include <errno.h> 36 37 #ifndef SOL_NETLINK 38 #define SOL_NETLINK 270 39 #endif 40 41 /* 42 * When building perf, unistd.h is overridden. __NR_bpf is 43 * required to be defined explicitly. 44 */ 45 #ifndef __NR_bpf 46 # if defined(__i386__) 47 # define __NR_bpf 357 48 # elif defined(__x86_64__) 49 # define __NR_bpf 321 50 # elif defined(__aarch64__) 51 # define __NR_bpf 280 52 # elif defined(__sparc__) 53 # define __NR_bpf 349 54 # elif defined(__s390__) 55 # define __NR_bpf 351 56 # else 57 # error __NR_bpf not defined. libbpf does not support your arch. 58 # endif 59 #endif 60 61 #ifndef min 62 #define min(x, y) ((x) < (y) ? (x) : (y)) 63 #endif 64 65 static inline __u64 ptr_to_u64(const void *ptr) 66 { 67 return (__u64) (unsigned long) ptr; 68 } 69 70 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, 71 unsigned int size) 72 { 73 return syscall(__NR_bpf, cmd, attr, size); 74 } 75 76 int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) 77 { 78 __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0; 79 union bpf_attr attr; 80 81 memset(&attr, '\0', sizeof(attr)); 82 83 attr.map_type = create_attr->map_type; 84 attr.key_size = create_attr->key_size; 85 attr.value_size = create_attr->value_size; 86 attr.max_entries = create_attr->max_entries; 87 attr.map_flags = create_attr->map_flags; 88 memcpy(attr.map_name, create_attr->name, 89 min(name_len, BPF_OBJ_NAME_LEN - 1)); 90 attr.numa_node = create_attr->numa_node; 91 attr.btf_fd = create_attr->btf_fd; 92 attr.btf_key_id = create_attr->btf_key_id; 93 attr.btf_value_id = create_attr->btf_value_id; 94 attr.map_ifindex = create_attr->map_ifindex; 95 96 return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); 97 } 98 99 int bpf_create_map_node(enum bpf_map_type map_type, const char *name, 100 int key_size, int value_size, int max_entries, 101 __u32 map_flags, int node) 102 { 103 struct bpf_create_map_attr map_attr = {}; 104 105 map_attr.name = name; 106 map_attr.map_type = map_type; 107 map_attr.map_flags = map_flags; 108 map_attr.key_size = key_size; 109 map_attr.value_size = value_size; 110 map_attr.max_entries = max_entries; 111 if (node >= 0) { 112 map_attr.numa_node = node; 113 map_attr.map_flags |= BPF_F_NUMA_NODE; 114 } 115 116 return bpf_create_map_xattr(&map_attr); 117 } 118 119 int bpf_create_map(enum bpf_map_type map_type, int key_size, 120 int value_size, int max_entries, __u32 map_flags) 121 { 122 struct bpf_create_map_attr map_attr = {}; 123 124 map_attr.map_type = map_type; 125 map_attr.map_flags = map_flags; 126 map_attr.key_size = key_size; 127 map_attr.value_size = value_size; 128 map_attr.max_entries = max_entries; 129 130 return bpf_create_map_xattr(&map_attr); 131 } 132 133 int bpf_create_map_name(enum bpf_map_type map_type, const char *name, 134 int key_size, int value_size, int max_entries, 135 __u32 map_flags) 136 { 137 struct bpf_create_map_attr map_attr = {}; 138 139 map_attr.name = name; 140 map_attr.map_type = map_type; 141 map_attr.map_flags = map_flags; 142 map_attr.key_size = key_size; 143 map_attr.value_size = value_size; 144 map_attr.max_entries = max_entries; 145 146 return bpf_create_map_xattr(&map_attr); 147 } 148 149 int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name, 150 int key_size, int inner_map_fd, int max_entries, 151 __u32 map_flags, int node) 152 { 153 __u32 name_len = name ? strlen(name) : 0; 154 union bpf_attr attr; 155 156 memset(&attr, '\0', sizeof(attr)); 157 158 attr.map_type = map_type; 159 attr.key_size = key_size; 160 attr.value_size = 4; 161 attr.inner_map_fd = inner_map_fd; 162 attr.max_entries = max_entries; 163 attr.map_flags = map_flags; 164 memcpy(attr.map_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1)); 165 166 if (node >= 0) { 167 attr.map_flags |= BPF_F_NUMA_NODE; 168 attr.numa_node = node; 169 } 170 171 return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); 172 } 173 174 int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name, 175 int key_size, int inner_map_fd, int max_entries, 176 __u32 map_flags) 177 { 178 return bpf_create_map_in_map_node(map_type, name, key_size, 179 inner_map_fd, max_entries, map_flags, 180 -1); 181 } 182 183 int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, 184 char *log_buf, size_t log_buf_sz) 185 { 186 union bpf_attr attr; 187 __u32 name_len; 188 int fd; 189 190 if (!load_attr) 191 return -EINVAL; 192 193 name_len = load_attr->name ? strlen(load_attr->name) : 0; 194 195 bzero(&attr, sizeof(attr)); 196 attr.prog_type = load_attr->prog_type; 197 attr.expected_attach_type = load_attr->expected_attach_type; 198 attr.insn_cnt = (__u32)load_attr->insns_cnt; 199 attr.insns = ptr_to_u64(load_attr->insns); 200 attr.license = ptr_to_u64(load_attr->license); 201 attr.log_buf = ptr_to_u64(NULL); 202 attr.log_size = 0; 203 attr.log_level = 0; 204 attr.kern_version = load_attr->kern_version; 205 attr.prog_ifindex = load_attr->prog_ifindex; 206 memcpy(attr.prog_name, load_attr->name, 207 min(name_len, BPF_OBJ_NAME_LEN - 1)); 208 209 fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 210 if (fd >= 0 || !log_buf || !log_buf_sz) 211 return fd; 212 213 /* Try again with log */ 214 attr.log_buf = ptr_to_u64(log_buf); 215 attr.log_size = log_buf_sz; 216 attr.log_level = 1; 217 log_buf[0] = 0; 218 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 219 } 220 221 int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, 222 size_t insns_cnt, const char *license, 223 __u32 kern_version, char *log_buf, 224 size_t log_buf_sz) 225 { 226 struct bpf_load_program_attr load_attr; 227 228 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); 229 load_attr.prog_type = type; 230 load_attr.expected_attach_type = 0; 231 load_attr.name = NULL; 232 load_attr.insns = insns; 233 load_attr.insns_cnt = insns_cnt; 234 load_attr.license = license; 235 load_attr.kern_version = kern_version; 236 237 return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz); 238 } 239 240 int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, 241 size_t insns_cnt, int strict_alignment, 242 const char *license, __u32 kern_version, 243 char *log_buf, size_t log_buf_sz, int log_level) 244 { 245 union bpf_attr attr; 246 247 bzero(&attr, sizeof(attr)); 248 attr.prog_type = type; 249 attr.insn_cnt = (__u32)insns_cnt; 250 attr.insns = ptr_to_u64(insns); 251 attr.license = ptr_to_u64(license); 252 attr.log_buf = ptr_to_u64(log_buf); 253 attr.log_size = log_buf_sz; 254 attr.log_level = log_level; 255 log_buf[0] = 0; 256 attr.kern_version = kern_version; 257 attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0; 258 259 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 260 } 261 262 int bpf_map_update_elem(int fd, const void *key, const void *value, 263 __u64 flags) 264 { 265 union bpf_attr attr; 266 267 bzero(&attr, sizeof(attr)); 268 attr.map_fd = fd; 269 attr.key = ptr_to_u64(key); 270 attr.value = ptr_to_u64(value); 271 attr.flags = flags; 272 273 return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); 274 } 275 276 int bpf_map_lookup_elem(int fd, const void *key, void *value) 277 { 278 union bpf_attr attr; 279 280 bzero(&attr, sizeof(attr)); 281 attr.map_fd = fd; 282 attr.key = ptr_to_u64(key); 283 attr.value = ptr_to_u64(value); 284 285 return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); 286 } 287 288 int bpf_map_delete_elem(int fd, const void *key) 289 { 290 union bpf_attr attr; 291 292 bzero(&attr, sizeof(attr)); 293 attr.map_fd = fd; 294 attr.key = ptr_to_u64(key); 295 296 return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); 297 } 298 299 int bpf_map_get_next_key(int fd, const void *key, void *next_key) 300 { 301 union bpf_attr attr; 302 303 bzero(&attr, sizeof(attr)); 304 attr.map_fd = fd; 305 attr.key = ptr_to_u64(key); 306 attr.next_key = ptr_to_u64(next_key); 307 308 return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr)); 309 } 310 311 int bpf_obj_pin(int fd, const char *pathname) 312 { 313 union bpf_attr attr; 314 315 bzero(&attr, sizeof(attr)); 316 attr.pathname = ptr_to_u64((void *)pathname); 317 attr.bpf_fd = fd; 318 319 return sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr)); 320 } 321 322 int bpf_obj_get(const char *pathname) 323 { 324 union bpf_attr attr; 325 326 bzero(&attr, sizeof(attr)); 327 attr.pathname = ptr_to_u64((void *)pathname); 328 329 return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr)); 330 } 331 332 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type, 333 unsigned int flags) 334 { 335 union bpf_attr attr; 336 337 bzero(&attr, sizeof(attr)); 338 attr.target_fd = target_fd; 339 attr.attach_bpf_fd = prog_fd; 340 attr.attach_type = type; 341 attr.attach_flags = flags; 342 343 return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)); 344 } 345 346 int bpf_prog_detach(int target_fd, enum bpf_attach_type type) 347 { 348 union bpf_attr attr; 349 350 bzero(&attr, sizeof(attr)); 351 attr.target_fd = target_fd; 352 attr.attach_type = type; 353 354 return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); 355 } 356 357 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type) 358 { 359 union bpf_attr attr; 360 361 bzero(&attr, sizeof(attr)); 362 attr.target_fd = target_fd; 363 attr.attach_bpf_fd = prog_fd; 364 attr.attach_type = type; 365 366 return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); 367 } 368 369 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, 370 __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt) 371 { 372 union bpf_attr attr; 373 int ret; 374 375 bzero(&attr, sizeof(attr)); 376 attr.query.target_fd = target_fd; 377 attr.query.attach_type = type; 378 attr.query.query_flags = query_flags; 379 attr.query.prog_cnt = *prog_cnt; 380 attr.query.prog_ids = ptr_to_u64(prog_ids); 381 382 ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr)); 383 if (attach_flags) 384 *attach_flags = attr.query.attach_flags; 385 *prog_cnt = attr.query.prog_cnt; 386 return ret; 387 } 388 389 int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, 390 void *data_out, __u32 *size_out, __u32 *retval, 391 __u32 *duration) 392 { 393 union bpf_attr attr; 394 int ret; 395 396 bzero(&attr, sizeof(attr)); 397 attr.test.prog_fd = prog_fd; 398 attr.test.data_in = ptr_to_u64(data); 399 attr.test.data_out = ptr_to_u64(data_out); 400 attr.test.data_size_in = size; 401 attr.test.repeat = repeat; 402 403 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); 404 if (size_out) 405 *size_out = attr.test.data_size_out; 406 if (retval) 407 *retval = attr.test.retval; 408 if (duration) 409 *duration = attr.test.duration; 410 return ret; 411 } 412 413 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id) 414 { 415 union bpf_attr attr; 416 int err; 417 418 bzero(&attr, sizeof(attr)); 419 attr.start_id = start_id; 420 421 err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr)); 422 if (!err) 423 *next_id = attr.next_id; 424 425 return err; 426 } 427 428 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id) 429 { 430 union bpf_attr attr; 431 int err; 432 433 bzero(&attr, sizeof(attr)); 434 attr.start_id = start_id; 435 436 err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr)); 437 if (!err) 438 *next_id = attr.next_id; 439 440 return err; 441 } 442 443 int bpf_prog_get_fd_by_id(__u32 id) 444 { 445 union bpf_attr attr; 446 447 bzero(&attr, sizeof(attr)); 448 attr.prog_id = id; 449 450 return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr)); 451 } 452 453 int bpf_map_get_fd_by_id(__u32 id) 454 { 455 union bpf_attr attr; 456 457 bzero(&attr, sizeof(attr)); 458 attr.map_id = id; 459 460 return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr)); 461 } 462 463 int bpf_btf_get_fd_by_id(__u32 id) 464 { 465 union bpf_attr attr; 466 467 bzero(&attr, sizeof(attr)); 468 attr.btf_id = id; 469 470 return sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr)); 471 } 472 473 int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len) 474 { 475 union bpf_attr attr; 476 int err; 477 478 bzero(&attr, sizeof(attr)); 479 attr.info.bpf_fd = prog_fd; 480 attr.info.info_len = *info_len; 481 attr.info.info = ptr_to_u64(info); 482 483 err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr)); 484 if (!err) 485 *info_len = attr.info.info_len; 486 487 return err; 488 } 489 490 int bpf_raw_tracepoint_open(const char *name, int prog_fd) 491 { 492 union bpf_attr attr; 493 494 bzero(&attr, sizeof(attr)); 495 attr.raw_tracepoint.name = ptr_to_u64(name); 496 attr.raw_tracepoint.prog_fd = prog_fd; 497 498 return sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr)); 499 } 500 501 int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags) 502 { 503 struct sockaddr_nl sa; 504 int sock, seq = 0, len, ret = -1; 505 char buf[4096]; 506 struct nlattr *nla, *nla_xdp; 507 struct { 508 struct nlmsghdr nh; 509 struct ifinfomsg ifinfo; 510 char attrbuf[64]; 511 } req; 512 struct nlmsghdr *nh; 513 struct nlmsgerr *err; 514 socklen_t addrlen; 515 int one = 1; 516 517 memset(&sa, 0, sizeof(sa)); 518 sa.nl_family = AF_NETLINK; 519 520 sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); 521 if (sock < 0) { 522 return -errno; 523 } 524 525 if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK, 526 &one, sizeof(one)) < 0) { 527 fprintf(stderr, "Netlink error reporting not supported\n"); 528 } 529 530 if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) { 531 ret = -errno; 532 goto cleanup; 533 } 534 535 addrlen = sizeof(sa); 536 if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) { 537 ret = -errno; 538 goto cleanup; 539 } 540 541 if (addrlen != sizeof(sa)) { 542 ret = -LIBBPF_ERRNO__INTERNAL; 543 goto cleanup; 544 } 545 546 memset(&req, 0, sizeof(req)); 547 req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)); 548 req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; 549 req.nh.nlmsg_type = RTM_SETLINK; 550 req.nh.nlmsg_pid = 0; 551 req.nh.nlmsg_seq = ++seq; 552 req.ifinfo.ifi_family = AF_UNSPEC; 553 req.ifinfo.ifi_index = ifindex; 554 555 /* started nested attribute for XDP */ 556 nla = (struct nlattr *)(((char *)&req) 557 + NLMSG_ALIGN(req.nh.nlmsg_len)); 558 nla->nla_type = NLA_F_NESTED | IFLA_XDP; 559 nla->nla_len = NLA_HDRLEN; 560 561 /* add XDP fd */ 562 nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); 563 nla_xdp->nla_type = IFLA_XDP_FD; 564 nla_xdp->nla_len = NLA_HDRLEN + sizeof(int); 565 memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd)); 566 nla->nla_len += nla_xdp->nla_len; 567 568 /* if user passed in any flags, add those too */ 569 if (flags) { 570 nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); 571 nla_xdp->nla_type = IFLA_XDP_FLAGS; 572 nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags); 573 memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags)); 574 nla->nla_len += nla_xdp->nla_len; 575 } 576 577 req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len); 578 579 if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) { 580 ret = -errno; 581 goto cleanup; 582 } 583 584 len = recv(sock, buf, sizeof(buf), 0); 585 if (len < 0) { 586 ret = -errno; 587 goto cleanup; 588 } 589 590 for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len); 591 nh = NLMSG_NEXT(nh, len)) { 592 if (nh->nlmsg_pid != sa.nl_pid) { 593 ret = -LIBBPF_ERRNO__WRNGPID; 594 goto cleanup; 595 } 596 if (nh->nlmsg_seq != seq) { 597 ret = -LIBBPF_ERRNO__INVSEQ; 598 goto cleanup; 599 } 600 switch (nh->nlmsg_type) { 601 case NLMSG_ERROR: 602 err = (struct nlmsgerr *)NLMSG_DATA(nh); 603 if (!err->error) 604 continue; 605 ret = err->error; 606 nla_dump_errormsg(nh); 607 goto cleanup; 608 case NLMSG_DONE: 609 break; 610 default: 611 break; 612 } 613 } 614 615 ret = 0; 616 617 cleanup: 618 close(sock); 619 return ret; 620 } 621 622 int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, 623 bool do_log) 624 { 625 union bpf_attr attr = {}; 626 int fd; 627 628 attr.btf = ptr_to_u64(btf); 629 attr.btf_size = btf_size; 630 631 retry: 632 if (do_log && log_buf && log_buf_size) { 633 attr.btf_log_level = 1; 634 attr.btf_log_size = log_buf_size; 635 attr.btf_log_buf = ptr_to_u64(log_buf); 636 } 637 638 fd = sys_bpf(BPF_BTF_LOAD, &attr, sizeof(attr)); 639 if (fd == -1 && !do_log && log_buf && log_buf_size) { 640 do_log = true; 641 goto retry; 642 } 643 644 return fd; 645 } 646