1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 /* Copyright (c) 2019 Facebook */ 3 4 #ifdef __KERNEL__ 5 #include <linux/bpf.h> 6 #include <linux/btf.h> 7 #include <linux/string.h> 8 #include <linux/bpf_verifier.h> 9 #include "relo_core.h" 10 11 static const char *btf_kind_str(const struct btf_type *t) 12 { 13 return btf_type_str(t); 14 } 15 16 static bool is_ldimm64_insn(struct bpf_insn *insn) 17 { 18 return insn->code == (BPF_LD | BPF_IMM | BPF_DW); 19 } 20 21 static const struct btf_type * 22 skip_mods_and_typedefs(const struct btf *btf, u32 id, u32 *res_id) 23 { 24 return btf_type_skip_modifiers(btf, id, res_id); 25 } 26 27 static const char *btf__name_by_offset(const struct btf *btf, u32 offset) 28 { 29 return btf_name_by_offset(btf, offset); 30 } 31 32 static s64 btf__resolve_size(const struct btf *btf, u32 type_id) 33 { 34 const struct btf_type *t; 35 int size; 36 37 t = btf_type_by_id(btf, type_id); 38 t = btf_resolve_size(btf, t, &size); 39 if (IS_ERR(t)) 40 return PTR_ERR(t); 41 return size; 42 } 43 44 enum libbpf_print_level { 45 LIBBPF_WARN, 46 LIBBPF_INFO, 47 LIBBPF_DEBUG, 48 }; 49 50 #undef pr_warn 51 #undef pr_info 52 #undef pr_debug 53 #define pr_warn(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__) 54 #define pr_info(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__) 55 #define pr_debug(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__) 56 #define libbpf_print(level, fmt, ...) bpf_log((void *)prog_name, fmt, ##__VA_ARGS__) 57 #else 58 #include <stdio.h> 59 #include <string.h> 60 #include <errno.h> 61 #include <ctype.h> 62 #include <linux/err.h> 63 64 #include "libbpf.h" 65 #include "bpf.h" 66 #include "btf.h" 67 #include "str_error.h" 68 #include "libbpf_internal.h" 69 #endif 70 71 static bool is_flex_arr(const struct btf *btf, 72 const struct bpf_core_accessor *acc, 73 const struct btf_array *arr) 74 { 75 const struct btf_type *t; 76 77 /* not a flexible array, if not inside a struct or has non-zero size */ 78 if (!acc->name || arr->nelems > 0) 79 return false; 80 81 /* has to be the last member of enclosing struct */ 82 t = btf_type_by_id(btf, acc->type_id); 83 return acc->idx == btf_vlen(t) - 1; 84 } 85 86 static const char *core_relo_kind_str(enum bpf_core_relo_kind kind) 87 { 88 switch (kind) { 89 case BPF_CORE_FIELD_BYTE_OFFSET: return "byte_off"; 90 case BPF_CORE_FIELD_BYTE_SIZE: return "byte_sz"; 91 case BPF_CORE_FIELD_EXISTS: return "field_exists"; 92 case BPF_CORE_FIELD_SIGNED: return "signed"; 93 case BPF_CORE_FIELD_LSHIFT_U64: return "lshift_u64"; 94 case BPF_CORE_FIELD_RSHIFT_U64: return "rshift_u64"; 95 case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id"; 96 case BPF_CORE_TYPE_ID_TARGET: return "target_type_id"; 97 case BPF_CORE_TYPE_EXISTS: return "type_exists"; 98 case BPF_CORE_TYPE_SIZE: return "type_size"; 99 case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists"; 100 case BPF_CORE_ENUMVAL_VALUE: return "enumval_value"; 101 default: return "unknown"; 102 } 103 } 104 105 static bool core_relo_is_field_based(enum bpf_core_relo_kind kind) 106 { 107 switch (kind) { 108 case BPF_CORE_FIELD_BYTE_OFFSET: 109 case BPF_CORE_FIELD_BYTE_SIZE: 110 case BPF_CORE_FIELD_EXISTS: 111 case BPF_CORE_FIELD_SIGNED: 112 case BPF_CORE_FIELD_LSHIFT_U64: 113 case BPF_CORE_FIELD_RSHIFT_U64: 114 return true; 115 default: 116 return false; 117 } 118 } 119 120 static bool core_relo_is_type_based(enum bpf_core_relo_kind kind) 121 { 122 switch (kind) { 123 case BPF_CORE_TYPE_ID_LOCAL: 124 case BPF_CORE_TYPE_ID_TARGET: 125 case BPF_CORE_TYPE_EXISTS: 126 case BPF_CORE_TYPE_SIZE: 127 return true; 128 default: 129 return false; 130 } 131 } 132 133 static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind) 134 { 135 switch (kind) { 136 case BPF_CORE_ENUMVAL_EXISTS: 137 case BPF_CORE_ENUMVAL_VALUE: 138 return true; 139 default: 140 return false; 141 } 142 } 143 144 /* 145 * Turn bpf_core_relo into a low- and high-level spec representation, 146 * validating correctness along the way, as well as calculating resulting 147 * field bit offset, specified by accessor string. Low-level spec captures 148 * every single level of nestedness, including traversing anonymous 149 * struct/union members. High-level one only captures semantically meaningful 150 * "turning points": named fields and array indicies. 151 * E.g., for this case: 152 * 153 * struct sample { 154 * int __unimportant; 155 * struct { 156 * int __1; 157 * int __2; 158 * int a[7]; 159 * }; 160 * }; 161 * 162 * struct sample *s = ...; 163 * 164 * int x = &s->a[3]; // access string = '0:1:2:3' 165 * 166 * Low-level spec has 1:1 mapping with each element of access string (it's 167 * just a parsed access string representation): [0, 1, 2, 3]. 168 * 169 * High-level spec will capture only 3 points: 170 * - initial zero-index access by pointer (&s->... is the same as &s[0]...); 171 * - field 'a' access (corresponds to '2' in low-level spec); 172 * - array element #3 access (corresponds to '3' in low-level spec). 173 * 174 * Type-based relocations (TYPE_EXISTS/TYPE_SIZE, 175 * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their 176 * spec and raw_spec are kept empty. 177 * 178 * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access 179 * string to specify enumerator's value index that need to be relocated. 180 */ 181 int bpf_core_parse_spec(const char *prog_name, const struct btf *btf, 182 const struct bpf_core_relo *relo, 183 struct bpf_core_spec *spec) 184 { 185 int access_idx, parsed_len, i; 186 struct bpf_core_accessor *acc; 187 const struct btf_type *t; 188 const char *name, *spec_str; 189 __u32 id, name_off; 190 __s64 sz; 191 192 spec_str = btf__name_by_offset(btf, relo->access_str_off); 193 if (str_is_empty(spec_str) || *spec_str == ':') 194 return -EINVAL; 195 196 memset(spec, 0, sizeof(*spec)); 197 spec->btf = btf; 198 spec->root_type_id = relo->type_id; 199 spec->relo_kind = relo->kind; 200 201 /* type-based relocations don't have a field access string */ 202 if (core_relo_is_type_based(relo->kind)) { 203 if (strcmp(spec_str, "0")) 204 return -EINVAL; 205 return 0; 206 } 207 208 /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */ 209 while (*spec_str) { 210 if (*spec_str == ':') 211 ++spec_str; 212 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1) 213 return -EINVAL; 214 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) 215 return -E2BIG; 216 spec_str += parsed_len; 217 spec->raw_spec[spec->raw_len++] = access_idx; 218 } 219 220 if (spec->raw_len == 0) 221 return -EINVAL; 222 223 t = skip_mods_and_typedefs(btf, relo->type_id, &id); 224 if (!t) 225 return -EINVAL; 226 227 access_idx = spec->raw_spec[0]; 228 acc = &spec->spec[0]; 229 acc->type_id = id; 230 acc->idx = access_idx; 231 spec->len++; 232 233 if (core_relo_is_enumval_based(relo->kind)) { 234 if (!btf_is_any_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t)) 235 return -EINVAL; 236 237 /* record enumerator name in a first accessor */ 238 name_off = btf_is_enum(t) ? btf_enum(t)[access_idx].name_off 239 : btf_enum64(t)[access_idx].name_off; 240 acc->name = btf__name_by_offset(btf, name_off); 241 return 0; 242 } 243 244 if (!core_relo_is_field_based(relo->kind)) 245 return -EINVAL; 246 247 sz = btf__resolve_size(btf, id); 248 if (sz < 0) 249 return sz; 250 spec->bit_offset = access_idx * sz * 8; 251 252 for (i = 1; i < spec->raw_len; i++) { 253 t = skip_mods_and_typedefs(btf, id, &id); 254 if (!t) 255 return -EINVAL; 256 257 access_idx = spec->raw_spec[i]; 258 acc = &spec->spec[spec->len]; 259 260 if (btf_is_composite(t)) { 261 const struct btf_member *m; 262 __u32 bit_offset; 263 264 if (access_idx >= btf_vlen(t)) 265 return -EINVAL; 266 267 bit_offset = btf_member_bit_offset(t, access_idx); 268 spec->bit_offset += bit_offset; 269 270 m = btf_members(t) + access_idx; 271 if (m->name_off) { 272 name = btf__name_by_offset(btf, m->name_off); 273 if (str_is_empty(name)) 274 return -EINVAL; 275 276 acc->type_id = id; 277 acc->idx = access_idx; 278 acc->name = name; 279 spec->len++; 280 } 281 282 id = m->type; 283 } else if (btf_is_array(t)) { 284 const struct btf_array *a = btf_array(t); 285 bool flex; 286 287 t = skip_mods_and_typedefs(btf, a->type, &id); 288 if (!t) 289 return -EINVAL; 290 291 flex = is_flex_arr(btf, acc - 1, a); 292 if (!flex && access_idx >= a->nelems) 293 return -EINVAL; 294 295 spec->spec[spec->len].type_id = id; 296 spec->spec[spec->len].idx = access_idx; 297 spec->len++; 298 299 sz = btf__resolve_size(btf, id); 300 if (sz < 0) 301 return sz; 302 spec->bit_offset += access_idx * sz * 8; 303 } else { 304 pr_warn("prog '%s': relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n", 305 prog_name, relo->type_id, spec_str, i, id, btf_kind_str(t)); 306 return -EINVAL; 307 } 308 } 309 310 return 0; 311 } 312 313 /* Check two types for compatibility for the purpose of field access 314 * relocation. const/volatile/restrict and typedefs are skipped to ensure we 315 * are relocating semantically compatible entities: 316 * - any two STRUCTs/UNIONs are compatible and can be mixed; 317 * - any two FWDs are compatible, if their names match (modulo flavor suffix); 318 * - any two PTRs are always compatible; 319 * - for ENUMs, names should be the same (ignoring flavor suffix) or at 320 * least one of enums should be anonymous; 321 * - for ENUMs, check sizes, names are ignored; 322 * - for INT, size and signedness are ignored; 323 * - any two FLOATs are always compatible; 324 * - for ARRAY, dimensionality is ignored, element types are checked for 325 * compatibility recursively; 326 * - everything else shouldn't be ever a target of relocation. 327 * These rules are not set in stone and probably will be adjusted as we get 328 * more experience with using BPF CO-RE relocations. 329 */ 330 static int bpf_core_fields_are_compat(const struct btf *local_btf, 331 __u32 local_id, 332 const struct btf *targ_btf, 333 __u32 targ_id) 334 { 335 const struct btf_type *local_type, *targ_type; 336 337 recur: 338 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id); 339 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id); 340 if (!local_type || !targ_type) 341 return -EINVAL; 342 343 if (btf_is_composite(local_type) && btf_is_composite(targ_type)) 344 return 1; 345 if (!btf_kind_core_compat(local_type, targ_type)) 346 return 0; 347 348 switch (btf_kind(local_type)) { 349 case BTF_KIND_PTR: 350 case BTF_KIND_FLOAT: 351 return 1; 352 case BTF_KIND_FWD: 353 case BTF_KIND_ENUM64: 354 case BTF_KIND_ENUM: { 355 const char *local_name, *targ_name; 356 size_t local_len, targ_len; 357 358 local_name = btf__name_by_offset(local_btf, 359 local_type->name_off); 360 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off); 361 local_len = bpf_core_essential_name_len(local_name); 362 targ_len = bpf_core_essential_name_len(targ_name); 363 /* one of them is anonymous or both w/ same flavor-less names */ 364 return local_len == 0 || targ_len == 0 || 365 (local_len == targ_len && 366 strncmp(local_name, targ_name, local_len) == 0); 367 } 368 case BTF_KIND_INT: 369 /* just reject deprecated bitfield-like integers; all other 370 * integers are by default compatible between each other 371 */ 372 return btf_int_offset(local_type) == 0 && 373 btf_int_offset(targ_type) == 0; 374 case BTF_KIND_ARRAY: 375 local_id = btf_array(local_type)->type; 376 targ_id = btf_array(targ_type)->type; 377 goto recur; 378 default: 379 return 0; 380 } 381 } 382 383 /* 384 * Given single high-level named field accessor in local type, find 385 * corresponding high-level accessor for a target type. Along the way, 386 * maintain low-level spec for target as well. Also keep updating target 387 * bit offset. 388 * 389 * Searching is performed through recursive exhaustive enumeration of all 390 * fields of a struct/union. If there are any anonymous (embedded) 391 * structs/unions, they are recursively searched as well. If field with 392 * desired name is found, check compatibility between local and target types, 393 * before returning result. 394 * 395 * 1 is returned, if field is found. 396 * 0 is returned if no compatible field is found. 397 * <0 is returned on error. 398 */ 399 static int bpf_core_match_member(const struct btf *local_btf, 400 const struct bpf_core_accessor *local_acc, 401 const struct btf *targ_btf, 402 __u32 targ_id, 403 struct bpf_core_spec *spec, 404 __u32 *next_targ_id) 405 { 406 const struct btf_type *local_type, *targ_type; 407 const struct btf_member *local_member, *m; 408 const char *local_name, *targ_name; 409 __u32 local_id; 410 int i, n, found; 411 412 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id); 413 if (!targ_type) 414 return -EINVAL; 415 if (!btf_is_composite(targ_type)) 416 return 0; 417 418 local_id = local_acc->type_id; 419 local_type = btf_type_by_id(local_btf, local_id); 420 local_member = btf_members(local_type) + local_acc->idx; 421 local_name = btf__name_by_offset(local_btf, local_member->name_off); 422 423 n = btf_vlen(targ_type); 424 m = btf_members(targ_type); 425 for (i = 0; i < n; i++, m++) { 426 __u32 bit_offset; 427 428 bit_offset = btf_member_bit_offset(targ_type, i); 429 430 /* too deep struct/union/array nesting */ 431 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) 432 return -E2BIG; 433 434 /* speculate this member will be the good one */ 435 spec->bit_offset += bit_offset; 436 spec->raw_spec[spec->raw_len++] = i; 437 438 targ_name = btf__name_by_offset(targ_btf, m->name_off); 439 if (str_is_empty(targ_name)) { 440 /* embedded struct/union, we need to go deeper */ 441 found = bpf_core_match_member(local_btf, local_acc, 442 targ_btf, m->type, 443 spec, next_targ_id); 444 if (found) /* either found or error */ 445 return found; 446 } else if (strcmp(local_name, targ_name) == 0) { 447 /* matching named field */ 448 struct bpf_core_accessor *targ_acc; 449 450 targ_acc = &spec->spec[spec->len++]; 451 targ_acc->type_id = targ_id; 452 targ_acc->idx = i; 453 targ_acc->name = targ_name; 454 455 *next_targ_id = m->type; 456 found = bpf_core_fields_are_compat(local_btf, 457 local_member->type, 458 targ_btf, m->type); 459 if (!found) 460 spec->len--; /* pop accessor */ 461 return found; 462 } 463 /* member turned out not to be what we looked for */ 464 spec->bit_offset -= bit_offset; 465 spec->raw_len--; 466 } 467 468 return 0; 469 } 470 471 /* 472 * Try to match local spec to a target type and, if successful, produce full 473 * target spec (high-level, low-level + bit offset). 474 */ 475 static int bpf_core_spec_match(struct bpf_core_spec *local_spec, 476 const struct btf *targ_btf, __u32 targ_id, 477 struct bpf_core_spec *targ_spec) 478 { 479 const struct btf_type *targ_type; 480 const struct bpf_core_accessor *local_acc; 481 struct bpf_core_accessor *targ_acc; 482 int i, sz, matched; 483 __u32 name_off; 484 485 memset(targ_spec, 0, sizeof(*targ_spec)); 486 targ_spec->btf = targ_btf; 487 targ_spec->root_type_id = targ_id; 488 targ_spec->relo_kind = local_spec->relo_kind; 489 490 if (core_relo_is_type_based(local_spec->relo_kind)) { 491 return bpf_core_types_are_compat(local_spec->btf, 492 local_spec->root_type_id, 493 targ_btf, targ_id); 494 } 495 496 local_acc = &local_spec->spec[0]; 497 targ_acc = &targ_spec->spec[0]; 498 499 if (core_relo_is_enumval_based(local_spec->relo_kind)) { 500 size_t local_essent_len, targ_essent_len; 501 const char *targ_name; 502 503 /* has to resolve to an enum */ 504 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id); 505 if (!btf_is_any_enum(targ_type)) 506 return 0; 507 508 local_essent_len = bpf_core_essential_name_len(local_acc->name); 509 510 for (i = 0; i < btf_vlen(targ_type); i++) { 511 if (btf_is_enum(targ_type)) 512 name_off = btf_enum(targ_type)[i].name_off; 513 else 514 name_off = btf_enum64(targ_type)[i].name_off; 515 516 targ_name = btf__name_by_offset(targ_spec->btf, name_off); 517 targ_essent_len = bpf_core_essential_name_len(targ_name); 518 if (targ_essent_len != local_essent_len) 519 continue; 520 if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) { 521 targ_acc->type_id = targ_id; 522 targ_acc->idx = i; 523 targ_acc->name = targ_name; 524 targ_spec->len++; 525 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; 526 targ_spec->raw_len++; 527 return 1; 528 } 529 } 530 return 0; 531 } 532 533 if (!core_relo_is_field_based(local_spec->relo_kind)) 534 return -EINVAL; 535 536 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) { 537 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, 538 &targ_id); 539 if (!targ_type) 540 return -EINVAL; 541 542 if (local_acc->name) { 543 matched = bpf_core_match_member(local_spec->btf, 544 local_acc, 545 targ_btf, targ_id, 546 targ_spec, &targ_id); 547 if (matched <= 0) 548 return matched; 549 } else { 550 /* for i=0, targ_id is already treated as array element 551 * type (because it's the original struct), for others 552 * we should find array element type first 553 */ 554 if (i > 0) { 555 const struct btf_array *a; 556 bool flex; 557 558 if (!btf_is_array(targ_type)) 559 return 0; 560 561 a = btf_array(targ_type); 562 flex = is_flex_arr(targ_btf, targ_acc - 1, a); 563 if (!flex && local_acc->idx >= a->nelems) 564 return 0; 565 if (!skip_mods_and_typedefs(targ_btf, a->type, 566 &targ_id)) 567 return -EINVAL; 568 } 569 570 /* too deep struct/union/array nesting */ 571 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN) 572 return -E2BIG; 573 574 targ_acc->type_id = targ_id; 575 targ_acc->idx = local_acc->idx; 576 targ_acc->name = NULL; 577 targ_spec->len++; 578 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; 579 targ_spec->raw_len++; 580 581 sz = btf__resolve_size(targ_btf, targ_id); 582 if (sz < 0) 583 return sz; 584 targ_spec->bit_offset += local_acc->idx * sz * 8; 585 } 586 } 587 588 return 1; 589 } 590 591 static int bpf_core_calc_field_relo(const char *prog_name, 592 const struct bpf_core_relo *relo, 593 const struct bpf_core_spec *spec, 594 __u64 *val, __u32 *field_sz, __u32 *type_id, 595 bool *validate) 596 { 597 const struct bpf_core_accessor *acc; 598 const struct btf_type *t; 599 __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id; 600 const struct btf_member *m; 601 const struct btf_type *mt; 602 bool bitfield; 603 __s64 sz; 604 605 *field_sz = 0; 606 607 if (relo->kind == BPF_CORE_FIELD_EXISTS) { 608 *val = spec ? 1 : 0; 609 return 0; 610 } 611 612 if (!spec) 613 return -EUCLEAN; /* request instruction poisoning */ 614 615 acc = &spec->spec[spec->len - 1]; 616 t = btf_type_by_id(spec->btf, acc->type_id); 617 618 /* a[n] accessor needs special handling */ 619 if (!acc->name) { 620 if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) { 621 *val = spec->bit_offset / 8; 622 /* remember field size for load/store mem size */ 623 sz = btf__resolve_size(spec->btf, acc->type_id); 624 if (sz < 0) 625 return -EINVAL; 626 *field_sz = sz; 627 *type_id = acc->type_id; 628 } else if (relo->kind == BPF_CORE_FIELD_BYTE_SIZE) { 629 sz = btf__resolve_size(spec->btf, acc->type_id); 630 if (sz < 0) 631 return -EINVAL; 632 *val = sz; 633 } else { 634 pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n", 635 prog_name, relo->kind, relo->insn_off / 8); 636 return -EINVAL; 637 } 638 if (validate) 639 *validate = true; 640 return 0; 641 } 642 643 m = btf_members(t) + acc->idx; 644 mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id); 645 bit_off = spec->bit_offset; 646 bit_sz = btf_member_bitfield_size(t, acc->idx); 647 648 bitfield = bit_sz > 0; 649 if (bitfield) { 650 byte_sz = mt->size; 651 byte_off = bit_off / 8 / byte_sz * byte_sz; 652 /* figure out smallest int size necessary for bitfield load */ 653 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) { 654 if (byte_sz >= 8) { 655 /* bitfield can't be read with 64-bit read */ 656 pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n", 657 prog_name, relo->kind, relo->insn_off / 8); 658 return -E2BIG; 659 } 660 byte_sz *= 2; 661 byte_off = bit_off / 8 / byte_sz * byte_sz; 662 } 663 } else { 664 sz = btf__resolve_size(spec->btf, field_type_id); 665 if (sz < 0) 666 return -EINVAL; 667 byte_sz = sz; 668 byte_off = spec->bit_offset / 8; 669 bit_sz = byte_sz * 8; 670 } 671 672 /* for bitfields, all the relocatable aspects are ambiguous and we 673 * might disagree with compiler, so turn off validation of expected 674 * value, except for signedness 675 */ 676 if (validate) 677 *validate = !bitfield; 678 679 switch (relo->kind) { 680 case BPF_CORE_FIELD_BYTE_OFFSET: 681 *val = byte_off; 682 if (!bitfield) { 683 *field_sz = byte_sz; 684 *type_id = field_type_id; 685 } 686 break; 687 case BPF_CORE_FIELD_BYTE_SIZE: 688 *val = byte_sz; 689 break; 690 case BPF_CORE_FIELD_SIGNED: 691 *val = (btf_is_any_enum(mt) && BTF_INFO_KFLAG(mt->info)) || 692 (btf_int_encoding(mt) & BTF_INT_SIGNED); 693 if (validate) 694 *validate = true; /* signedness is never ambiguous */ 695 break; 696 case BPF_CORE_FIELD_LSHIFT_U64: 697 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 698 *val = 64 - (bit_off + bit_sz - byte_off * 8); 699 #else 700 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8); 701 #endif 702 break; 703 case BPF_CORE_FIELD_RSHIFT_U64: 704 *val = 64 - bit_sz; 705 if (validate) 706 *validate = true; /* right shift is never ambiguous */ 707 break; 708 case BPF_CORE_FIELD_EXISTS: 709 default: 710 return -EOPNOTSUPP; 711 } 712 713 return 0; 714 } 715 716 static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo, 717 const struct bpf_core_spec *spec, 718 __u64 *val, bool *validate) 719 { 720 __s64 sz; 721 722 /* by default, always check expected value in bpf_insn */ 723 if (validate) 724 *validate = true; 725 726 /* type-based relos return zero when target type is not found */ 727 if (!spec) { 728 *val = 0; 729 return 0; 730 } 731 732 switch (relo->kind) { 733 case BPF_CORE_TYPE_ID_TARGET: 734 *val = spec->root_type_id; 735 /* type ID, embedded in bpf_insn, might change during linking, 736 * so enforcing it is pointless 737 */ 738 if (validate) 739 *validate = false; 740 break; 741 case BPF_CORE_TYPE_EXISTS: 742 *val = 1; 743 break; 744 case BPF_CORE_TYPE_SIZE: 745 sz = btf__resolve_size(spec->btf, spec->root_type_id); 746 if (sz < 0) 747 return -EINVAL; 748 *val = sz; 749 break; 750 case BPF_CORE_TYPE_ID_LOCAL: 751 /* BPF_CORE_TYPE_ID_LOCAL is handled specially and shouldn't get here */ 752 default: 753 return -EOPNOTSUPP; 754 } 755 756 return 0; 757 } 758 759 static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo, 760 const struct bpf_core_spec *spec, 761 __u64 *val) 762 { 763 const struct btf_type *t; 764 765 switch (relo->kind) { 766 case BPF_CORE_ENUMVAL_EXISTS: 767 *val = spec ? 1 : 0; 768 break; 769 case BPF_CORE_ENUMVAL_VALUE: 770 if (!spec) 771 return -EUCLEAN; /* request instruction poisoning */ 772 t = btf_type_by_id(spec->btf, spec->spec[0].type_id); 773 if (btf_is_enum(t)) 774 *val = btf_enum(t)[spec->spec[0].idx].val; 775 else 776 *val = btf_enum64_value(btf_enum64(t) + spec->spec[0].idx); 777 break; 778 default: 779 return -EOPNOTSUPP; 780 } 781 782 return 0; 783 } 784 785 /* Calculate original and target relocation values, given local and target 786 * specs and relocation kind. These values are calculated for each candidate. 787 * If there are multiple candidates, resulting values should all be consistent 788 * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity. 789 * If instruction has to be poisoned, *poison will be set to true. 790 */ 791 static int bpf_core_calc_relo(const char *prog_name, 792 const struct bpf_core_relo *relo, 793 int relo_idx, 794 const struct bpf_core_spec *local_spec, 795 const struct bpf_core_spec *targ_spec, 796 struct bpf_core_relo_res *res) 797 { 798 int err = -EOPNOTSUPP; 799 800 res->orig_val = 0; 801 res->new_val = 0; 802 res->poison = false; 803 res->validate = true; 804 res->fail_memsz_adjust = false; 805 res->orig_sz = res->new_sz = 0; 806 res->orig_type_id = res->new_type_id = 0; 807 808 if (core_relo_is_field_based(relo->kind)) { 809 err = bpf_core_calc_field_relo(prog_name, relo, local_spec, 810 &res->orig_val, &res->orig_sz, 811 &res->orig_type_id, &res->validate); 812 err = err ?: bpf_core_calc_field_relo(prog_name, relo, targ_spec, 813 &res->new_val, &res->new_sz, 814 &res->new_type_id, NULL); 815 if (err) 816 goto done; 817 /* Validate if it's safe to adjust load/store memory size. 818 * Adjustments are performed only if original and new memory 819 * sizes differ. 820 */ 821 res->fail_memsz_adjust = false; 822 if (res->orig_sz != res->new_sz) { 823 const struct btf_type *orig_t, *new_t; 824 825 orig_t = btf_type_by_id(local_spec->btf, res->orig_type_id); 826 new_t = btf_type_by_id(targ_spec->btf, res->new_type_id); 827 828 /* There are two use cases in which it's safe to 829 * adjust load/store's mem size: 830 * - reading a 32-bit kernel pointer, while on BPF 831 * size pointers are always 64-bit; in this case 832 * it's safe to "downsize" instruction size due to 833 * pointer being treated as unsigned integer with 834 * zero-extended upper 32-bits; 835 * - reading unsigned integers, again due to 836 * zero-extension is preserving the value correctly. 837 * 838 * In all other cases it's incorrect to attempt to 839 * load/store field because read value will be 840 * incorrect, so we poison relocated instruction. 841 */ 842 if (btf_is_ptr(orig_t) && btf_is_ptr(new_t)) 843 goto done; 844 if (btf_is_int(orig_t) && btf_is_int(new_t) && 845 btf_int_encoding(orig_t) != BTF_INT_SIGNED && 846 btf_int_encoding(new_t) != BTF_INT_SIGNED) 847 goto done; 848 849 /* mark as invalid mem size adjustment, but this will 850 * only be checked for LDX/STX/ST insns 851 */ 852 res->fail_memsz_adjust = true; 853 } 854 } else if (core_relo_is_type_based(relo->kind)) { 855 err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val, &res->validate); 856 err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val, NULL); 857 } else if (core_relo_is_enumval_based(relo->kind)) { 858 err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val); 859 err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val); 860 } 861 862 done: 863 if (err == -EUCLEAN) { 864 /* EUCLEAN is used to signal instruction poisoning request */ 865 res->poison = true; 866 err = 0; 867 } else if (err == -EOPNOTSUPP) { 868 /* EOPNOTSUPP means unknown/unsupported relocation */ 869 pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n", 870 prog_name, relo_idx, core_relo_kind_str(relo->kind), 871 relo->kind, relo->insn_off / 8); 872 } 873 874 return err; 875 } 876 877 /* 878 * Turn instruction for which CO_RE relocation failed into invalid one with 879 * distinct signature. 880 */ 881 static void bpf_core_poison_insn(const char *prog_name, int relo_idx, 882 int insn_idx, struct bpf_insn *insn) 883 { 884 pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n", 885 prog_name, relo_idx, insn_idx); 886 insn->code = BPF_JMP | BPF_CALL; 887 insn->dst_reg = 0; 888 insn->src_reg = 0; 889 insn->off = 0; 890 /* if this instruction is reachable (not a dead code), 891 * verifier will complain with the following message: 892 * invalid func unknown#195896080 893 */ 894 insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */ 895 } 896 897 static int insn_bpf_size_to_bytes(struct bpf_insn *insn) 898 { 899 switch (BPF_SIZE(insn->code)) { 900 case BPF_DW: return 8; 901 case BPF_W: return 4; 902 case BPF_H: return 2; 903 case BPF_B: return 1; 904 default: return -1; 905 } 906 } 907 908 static int insn_bytes_to_bpf_size(__u32 sz) 909 { 910 switch (sz) { 911 case 8: return BPF_DW; 912 case 4: return BPF_W; 913 case 2: return BPF_H; 914 case 1: return BPF_B; 915 default: return -1; 916 } 917 } 918 919 /* 920 * Patch relocatable BPF instruction. 921 * 922 * Patched value is determined by relocation kind and target specification. 923 * For existence relocations target spec will be NULL if field/type is not found. 924 * Expected insn->imm value is determined using relocation kind and local 925 * spec, and is checked before patching instruction. If actual insn->imm value 926 * is wrong, bail out with error. 927 * 928 * Currently supported classes of BPF instruction are: 929 * 1. rX = <imm> (assignment with immediate operand); 930 * 2. rX += <imm> (arithmetic operations with immediate operand); 931 * 3. rX = <imm64> (load with 64-bit immediate value); 932 * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64}; 933 * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64}; 934 * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}. 935 */ 936 int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn, 937 int insn_idx, const struct bpf_core_relo *relo, 938 int relo_idx, const struct bpf_core_relo_res *res) 939 { 940 __u64 orig_val, new_val; 941 __u8 class; 942 943 class = BPF_CLASS(insn->code); 944 945 if (res->poison) { 946 poison: 947 /* poison second part of ldimm64 to avoid confusing error from 948 * verifier about "unknown opcode 00" 949 */ 950 if (is_ldimm64_insn(insn)) 951 bpf_core_poison_insn(prog_name, relo_idx, insn_idx + 1, insn + 1); 952 bpf_core_poison_insn(prog_name, relo_idx, insn_idx, insn); 953 return 0; 954 } 955 956 orig_val = res->orig_val; 957 new_val = res->new_val; 958 959 switch (class) { 960 case BPF_ALU: 961 case BPF_ALU64: 962 if (BPF_SRC(insn->code) != BPF_K) 963 return -EINVAL; 964 if (res->validate && insn->imm != orig_val) { 965 pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %llu -> %llu\n", 966 prog_name, relo_idx, 967 insn_idx, insn->imm, (unsigned long long)orig_val, 968 (unsigned long long)new_val); 969 return -EINVAL; 970 } 971 orig_val = insn->imm; 972 insn->imm = new_val; 973 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %llu -> %llu\n", 974 prog_name, relo_idx, insn_idx, 975 (unsigned long long)orig_val, (unsigned long long)new_val); 976 break; 977 case BPF_LDX: 978 case BPF_ST: 979 case BPF_STX: 980 if (res->validate && insn->off != orig_val) { 981 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %llu -> %llu\n", 982 prog_name, relo_idx, insn_idx, insn->off, (unsigned long long)orig_val, 983 (unsigned long long)new_val); 984 return -EINVAL; 985 } 986 if (new_val > SHRT_MAX) { 987 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %llu\n", 988 prog_name, relo_idx, insn_idx, (unsigned long long)new_val); 989 return -ERANGE; 990 } 991 if (res->fail_memsz_adjust) { 992 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. " 993 "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n", 994 prog_name, relo_idx, insn_idx); 995 goto poison; 996 } 997 998 orig_val = insn->off; 999 insn->off = new_val; 1000 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %llu -> %llu\n", 1001 prog_name, relo_idx, insn_idx, (unsigned long long)orig_val, 1002 (unsigned long long)new_val); 1003 1004 if (res->new_sz != res->orig_sz) { 1005 int insn_bytes_sz, insn_bpf_sz; 1006 1007 insn_bytes_sz = insn_bpf_size_to_bytes(insn); 1008 if (insn_bytes_sz != res->orig_sz) { 1009 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n", 1010 prog_name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz); 1011 return -EINVAL; 1012 } 1013 1014 insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz); 1015 if (insn_bpf_sz < 0) { 1016 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n", 1017 prog_name, relo_idx, insn_idx, res->new_sz); 1018 return -EINVAL; 1019 } 1020 1021 insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code); 1022 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n", 1023 prog_name, relo_idx, insn_idx, res->orig_sz, res->new_sz); 1024 } 1025 break; 1026 case BPF_LD: { 1027 __u64 imm; 1028 1029 if (!is_ldimm64_insn(insn) || 1030 insn[0].src_reg != 0 || insn[0].off != 0 || 1031 insn[1].code != 0 || insn[1].dst_reg != 0 || 1032 insn[1].src_reg != 0 || insn[1].off != 0) { 1033 pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n", 1034 prog_name, relo_idx, insn_idx); 1035 return -EINVAL; 1036 } 1037 1038 imm = (__u32)insn[0].imm | ((__u64)insn[1].imm << 32); 1039 if (res->validate && imm != orig_val) { 1040 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %llu -> %llu\n", 1041 prog_name, relo_idx, 1042 insn_idx, (unsigned long long)imm, 1043 (unsigned long long)orig_val, (unsigned long long)new_val); 1044 return -EINVAL; 1045 } 1046 1047 insn[0].imm = new_val; 1048 insn[1].imm = new_val >> 32; 1049 pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %llu\n", 1050 prog_name, relo_idx, insn_idx, 1051 (unsigned long long)imm, (unsigned long long)new_val); 1052 break; 1053 } 1054 default: 1055 pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n", 1056 prog_name, relo_idx, insn_idx, insn->code, 1057 insn->src_reg, insn->dst_reg, insn->off, insn->imm); 1058 return -EINVAL; 1059 } 1060 1061 return 0; 1062 } 1063 1064 /* Output spec definition in the format: 1065 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>, 1066 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b 1067 */ 1068 int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec) 1069 { 1070 const struct btf_type *t; 1071 const char *s; 1072 __u32 type_id; 1073 int i, len = 0; 1074 1075 #define append_buf(fmt, args...) \ 1076 ({ \ 1077 int r; \ 1078 r = snprintf(buf, buf_sz, fmt, ##args); \ 1079 len += r; \ 1080 if (r >= buf_sz) \ 1081 r = buf_sz; \ 1082 buf += r; \ 1083 buf_sz -= r; \ 1084 }) 1085 1086 type_id = spec->root_type_id; 1087 t = btf_type_by_id(spec->btf, type_id); 1088 s = btf__name_by_offset(spec->btf, t->name_off); 1089 1090 append_buf("<%s> [%u] %s %s", 1091 core_relo_kind_str(spec->relo_kind), 1092 type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s); 1093 1094 if (core_relo_is_type_based(spec->relo_kind)) 1095 return len; 1096 1097 if (core_relo_is_enumval_based(spec->relo_kind)) { 1098 t = skip_mods_and_typedefs(spec->btf, type_id, NULL); 1099 if (btf_is_enum(t)) { 1100 const struct btf_enum *e; 1101 const char *fmt_str; 1102 1103 e = btf_enum(t) + spec->raw_spec[0]; 1104 s = btf__name_by_offset(spec->btf, e->name_off); 1105 fmt_str = BTF_INFO_KFLAG(t->info) ? "::%s = %d" : "::%s = %u"; 1106 append_buf(fmt_str, s, e->val); 1107 } else { 1108 const struct btf_enum64 *e; 1109 const char *fmt_str; 1110 1111 e = btf_enum64(t) + spec->raw_spec[0]; 1112 s = btf__name_by_offset(spec->btf, e->name_off); 1113 fmt_str = BTF_INFO_KFLAG(t->info) ? "::%s = %lld" : "::%s = %llu"; 1114 append_buf(fmt_str, s, (unsigned long long)btf_enum64_value(e)); 1115 } 1116 return len; 1117 } 1118 1119 if (core_relo_is_field_based(spec->relo_kind)) { 1120 for (i = 0; i < spec->len; i++) { 1121 if (spec->spec[i].name) 1122 append_buf(".%s", spec->spec[i].name); 1123 else if (i > 0 || spec->spec[i].idx > 0) 1124 append_buf("[%u]", spec->spec[i].idx); 1125 } 1126 1127 append_buf(" ("); 1128 for (i = 0; i < spec->raw_len; i++) 1129 append_buf("%s%d", i == 0 ? "" : ":", spec->raw_spec[i]); 1130 1131 if (spec->bit_offset % 8) 1132 append_buf(" @ offset %u.%u)", spec->bit_offset / 8, spec->bit_offset % 8); 1133 else 1134 append_buf(" @ offset %u)", spec->bit_offset / 8); 1135 return len; 1136 } 1137 1138 return len; 1139 #undef append_buf 1140 } 1141 1142 /* 1143 * Calculate CO-RE relocation target result. 1144 * 1145 * The outline and important points of the algorithm: 1146 * 1. For given local type, find corresponding candidate target types. 1147 * Candidate type is a type with the same "essential" name, ignoring 1148 * everything after last triple underscore (___). E.g., `sample`, 1149 * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates 1150 * for each other. Names with triple underscore are referred to as 1151 * "flavors" and are useful, among other things, to allow to 1152 * specify/support incompatible variations of the same kernel struct, which 1153 * might differ between different kernel versions and/or build 1154 * configurations. 1155 * 1156 * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C 1157 * converter, when deduplicated BTF of a kernel still contains more than 1158 * one different types with the same name. In that case, ___2, ___3, etc 1159 * are appended starting from second name conflict. But start flavors are 1160 * also useful to be defined "locally", in BPF program, to extract same 1161 * data from incompatible changes between different kernel 1162 * versions/configurations. For instance, to handle field renames between 1163 * kernel versions, one can use two flavors of the struct name with the 1164 * same common name and use conditional relocations to extract that field, 1165 * depending on target kernel version. 1166 * 2. For each candidate type, try to match local specification to this 1167 * candidate target type. Matching involves finding corresponding 1168 * high-level spec accessors, meaning that all named fields should match, 1169 * as well as all array accesses should be within the actual bounds. Also, 1170 * types should be compatible (see bpf_core_fields_are_compat for details). 1171 * 3. It is supported and expected that there might be multiple flavors 1172 * matching the spec. As long as all the specs resolve to the same set of 1173 * offsets across all candidates, there is no error. If there is any 1174 * ambiguity, CO-RE relocation will fail. This is necessary to accommodate 1175 * imperfection of BTF deduplication, which can cause slight duplication of 1176 * the same BTF type, if some directly or indirectly referenced (by 1177 * pointer) type gets resolved to different actual types in different 1178 * object files. If such a situation occurs, deduplicated BTF will end up 1179 * with two (or more) structurally identical types, which differ only in 1180 * types they refer to through pointer. This should be OK in most cases and 1181 * is not an error. 1182 * 4. Candidate types search is performed by linearly scanning through all 1183 * types in target BTF. It is anticipated that this is overall more 1184 * efficient memory-wise and not significantly worse (if not better) 1185 * CPU-wise compared to prebuilding a map from all local type names to 1186 * a list of candidate type names. It's also sped up by caching resolved 1187 * list of matching candidates per each local "root" type ID, that has at 1188 * least one bpf_core_relo associated with it. This list is shared 1189 * between multiple relocations for the same type ID and is updated as some 1190 * of the candidates are pruned due to structural incompatibility. 1191 */ 1192 int bpf_core_calc_relo_insn(const char *prog_name, 1193 const struct bpf_core_relo *relo, 1194 int relo_idx, 1195 const struct btf *local_btf, 1196 struct bpf_core_cand_list *cands, 1197 struct bpf_core_spec *specs_scratch, 1198 struct bpf_core_relo_res *targ_res) 1199 { 1200 struct bpf_core_spec *local_spec = &specs_scratch[0]; 1201 struct bpf_core_spec *cand_spec = &specs_scratch[1]; 1202 struct bpf_core_spec *targ_spec = &specs_scratch[2]; 1203 struct bpf_core_relo_res cand_res; 1204 const struct btf_type *local_type; 1205 const char *local_name; 1206 __u32 local_id; 1207 char spec_buf[256]; 1208 int i, j, err; 1209 1210 local_id = relo->type_id; 1211 local_type = btf_type_by_id(local_btf, local_id); 1212 local_name = btf__name_by_offset(local_btf, local_type->name_off); 1213 if (!local_name) 1214 return -EINVAL; 1215 1216 err = bpf_core_parse_spec(prog_name, local_btf, relo, local_spec); 1217 if (err) { 1218 const char *spec_str; 1219 1220 spec_str = btf__name_by_offset(local_btf, relo->access_str_off); 1221 pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n", 1222 prog_name, relo_idx, local_id, btf_kind_str(local_type), 1223 str_is_empty(local_name) ? "<anon>" : local_name, 1224 spec_str ?: "<?>", err); 1225 return -EINVAL; 1226 } 1227 1228 bpf_core_format_spec(spec_buf, sizeof(spec_buf), local_spec); 1229 pr_debug("prog '%s': relo #%d: %s\n", prog_name, relo_idx, spec_buf); 1230 1231 /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */ 1232 if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) { 1233 /* bpf_insn's imm value could get out of sync during linking */ 1234 memset(targ_res, 0, sizeof(*targ_res)); 1235 targ_res->validate = false; 1236 targ_res->poison = false; 1237 targ_res->orig_val = local_spec->root_type_id; 1238 targ_res->new_val = local_spec->root_type_id; 1239 return 0; 1240 } 1241 1242 /* libbpf doesn't support candidate search for anonymous types */ 1243 if (str_is_empty(local_name)) { 1244 pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n", 1245 prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind); 1246 return -EOPNOTSUPP; 1247 } 1248 1249 for (i = 0, j = 0; i < cands->len; i++) { 1250 err = bpf_core_spec_match(local_spec, cands->cands[i].btf, 1251 cands->cands[i].id, cand_spec); 1252 if (err < 0) { 1253 bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec); 1254 pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n ", 1255 prog_name, relo_idx, i, spec_buf, err); 1256 return err; 1257 } 1258 1259 bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec); 1260 pr_debug("prog '%s': relo #%d: %s candidate #%d %s\n", prog_name, 1261 relo_idx, err == 0 ? "non-matching" : "matching", i, spec_buf); 1262 1263 if (err == 0) 1264 continue; 1265 1266 err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, cand_spec, &cand_res); 1267 if (err) 1268 return err; 1269 1270 if (j == 0) { 1271 *targ_res = cand_res; 1272 *targ_spec = *cand_spec; 1273 } else if (cand_spec->bit_offset != targ_spec->bit_offset) { 1274 /* if there are many field relo candidates, they 1275 * should all resolve to the same bit offset 1276 */ 1277 pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n", 1278 prog_name, relo_idx, cand_spec->bit_offset, 1279 targ_spec->bit_offset); 1280 return -EINVAL; 1281 } else if (cand_res.poison != targ_res->poison || 1282 cand_res.new_val != targ_res->new_val) { 1283 /* all candidates should result in the same relocation 1284 * decision and value, otherwise it's dangerous to 1285 * proceed due to ambiguity 1286 */ 1287 pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %llu != %s %llu\n", 1288 prog_name, relo_idx, 1289 cand_res.poison ? "failure" : "success", 1290 (unsigned long long)cand_res.new_val, 1291 targ_res->poison ? "failure" : "success", 1292 (unsigned long long)targ_res->new_val); 1293 return -EINVAL; 1294 } 1295 1296 cands->cands[j++] = cands->cands[i]; 1297 } 1298 1299 /* 1300 * For BPF_CORE_FIELD_EXISTS relo or when used BPF program has field 1301 * existence checks or kernel version/config checks, it's expected 1302 * that we might not find any candidates. In this case, if field 1303 * wasn't found in any candidate, the list of candidates shouldn't 1304 * change at all, we'll just handle relocating appropriately, 1305 * depending on relo's kind. 1306 */ 1307 if (j > 0) 1308 cands->len = j; 1309 1310 /* 1311 * If no candidates were found, it might be both a programmer error, 1312 * as well as expected case, depending whether instruction w/ 1313 * relocation is guarded in some way that makes it unreachable (dead 1314 * code) if relocation can't be resolved. This is handled in 1315 * bpf_core_patch_insn() uniformly by replacing that instruction with 1316 * BPF helper call insn (using invalid helper ID). If that instruction 1317 * is indeed unreachable, then it will be ignored and eliminated by 1318 * verifier. If it was an error, then verifier will complain and point 1319 * to a specific instruction number in its log. 1320 */ 1321 if (j == 0) { 1322 pr_debug("prog '%s': relo #%d: no matching targets found\n", 1323 prog_name, relo_idx); 1324 1325 /* calculate single target relo result explicitly */ 1326 err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, NULL, targ_res); 1327 if (err) 1328 return err; 1329 } 1330 1331 return 0; 1332 } 1333