1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2018 Facebook */ 3 4 #include <uapi/linux/btf.h> 5 #include <uapi/linux/types.h> 6 #include <linux/seq_file.h> 7 #include <linux/compiler.h> 8 #include <linux/ctype.h> 9 #include <linux/errno.h> 10 #include <linux/slab.h> 11 #include <linux/anon_inodes.h> 12 #include <linux/file.h> 13 #include <linux/uaccess.h> 14 #include <linux/kernel.h> 15 #include <linux/idr.h> 16 #include <linux/sort.h> 17 #include <linux/bpf_verifier.h> 18 #include <linux/btf.h> 19 20 /* BTF (BPF Type Format) is the meta data format which describes 21 * the data types of BPF program/map. Hence, it basically focus 22 * on the C programming language which the modern BPF is primary 23 * using. 24 * 25 * ELF Section: 26 * ~~~~~~~~~~~ 27 * The BTF data is stored under the ".BTF" ELF section 28 * 29 * struct btf_type: 30 * ~~~~~~~~~~~~~~~ 31 * Each 'struct btf_type' object describes a C data type. 32 * Depending on the type it is describing, a 'struct btf_type' 33 * object may be followed by more data. F.e. 34 * To describe an array, 'struct btf_type' is followed by 35 * 'struct btf_array'. 36 * 37 * 'struct btf_type' and any extra data following it are 38 * 4 bytes aligned. 39 * 40 * Type section: 41 * ~~~~~~~~~~~~~ 42 * The BTF type section contains a list of 'struct btf_type' objects. 43 * Each one describes a C type. Recall from the above section 44 * that a 'struct btf_type' object could be immediately followed by extra 45 * data in order to desribe some particular C types. 46 * 47 * type_id: 48 * ~~~~~~~ 49 * Each btf_type object is identified by a type_id. The type_id 50 * is implicitly implied by the location of the btf_type object in 51 * the BTF type section. The first one has type_id 1. The second 52 * one has type_id 2...etc. Hence, an earlier btf_type has 53 * a smaller type_id. 54 * 55 * A btf_type object may refer to another btf_type object by using 56 * type_id (i.e. the "type" in the "struct btf_type"). 57 * 58 * NOTE that we cannot assume any reference-order. 59 * A btf_type object can refer to an earlier btf_type object 60 * but it can also refer to a later btf_type object. 61 * 62 * For example, to describe "const void *". A btf_type 63 * object describing "const" may refer to another btf_type 64 * object describing "void *". This type-reference is done 65 * by specifying type_id: 66 * 67 * [1] CONST (anon) type_id=2 68 * [2] PTR (anon) type_id=0 69 * 70 * The above is the btf_verifier debug log: 71 * - Each line started with "[?]" is a btf_type object 72 * - [?] is the type_id of the btf_type object. 73 * - CONST/PTR is the BTF_KIND_XXX 74 * - "(anon)" is the name of the type. It just 75 * happens that CONST and PTR has no name. 76 * - type_id=XXX is the 'u32 type' in btf_type 77 * 78 * NOTE: "void" has type_id 0 79 * 80 * String section: 81 * ~~~~~~~~~~~~~~ 82 * The BTF string section contains the names used by the type section. 83 * Each string is referred by an "offset" from the beginning of the 84 * string section. 85 * 86 * Each string is '\0' terminated. 87 * 88 * The first character in the string section must be '\0' 89 * which is used to mean 'anonymous'. Some btf_type may not 90 * have a name. 91 */ 92 93 /* BTF verification: 94 * 95 * To verify BTF data, two passes are needed. 96 * 97 * Pass #1 98 * ~~~~~~~ 99 * The first pass is to collect all btf_type objects to 100 * an array: "btf->types". 101 * 102 * Depending on the C type that a btf_type is describing, 103 * a btf_type may be followed by extra data. We don't know 104 * how many btf_type is there, and more importantly we don't 105 * know where each btf_type is located in the type section. 106 * 107 * Without knowing the location of each type_id, most verifications 108 * cannot be done. e.g. an earlier btf_type may refer to a later 109 * btf_type (recall the "const void *" above), so we cannot 110 * check this type-reference in the first pass. 111 * 112 * In the first pass, it still does some verifications (e.g. 113 * checking the name is a valid offset to the string section). 114 * 115 * Pass #2 116 * ~~~~~~~ 117 * The main focus is to resolve a btf_type that is referring 118 * to another type. 119 * 120 * We have to ensure the referring type: 121 * 1) does exist in the BTF (i.e. in btf->types[]) 122 * 2) does not cause a loop: 123 * struct A { 124 * struct B b; 125 * }; 126 * 127 * struct B { 128 * struct A a; 129 * }; 130 * 131 * btf_type_needs_resolve() decides if a btf_type needs 132 * to be resolved. 133 * 134 * The needs_resolve type implements the "resolve()" ops which 135 * essentially does a DFS and detects backedge. 136 * 137 * During resolve (or DFS), different C types have different 138 * "RESOLVED" conditions. 139 * 140 * When resolving a BTF_KIND_STRUCT, we need to resolve all its 141 * members because a member is always referring to another 142 * type. A struct's member can be treated as "RESOLVED" if 143 * it is referring to a BTF_KIND_PTR. Otherwise, the 144 * following valid C struct would be rejected: 145 * 146 * struct A { 147 * int m; 148 * struct A *a; 149 * }; 150 * 151 * When resolving a BTF_KIND_PTR, it needs to keep resolving if 152 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot 153 * detect a pointer loop, e.g.: 154 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR + 155 * ^ | 156 * +-----------------------------------------+ 157 * 158 */ 159 160 #define BITS_PER_U64 (sizeof(u64) * BITS_PER_BYTE) 161 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1) 162 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK) 163 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3) 164 #define BITS_ROUNDUP_BYTES(bits) \ 165 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits)) 166 167 #define BTF_INFO_MASK 0x0f00ffff 168 #define BTF_INT_MASK 0x0fffffff 169 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE) 170 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET) 171 172 /* 16MB for 64k structs and each has 16 members and 173 * a few MB spaces for the string section. 174 * The hard limit is S32_MAX. 175 */ 176 #define BTF_MAX_SIZE (16 * 1024 * 1024) 177 178 #define for_each_member(i, struct_type, member) \ 179 for (i = 0, member = btf_type_member(struct_type); \ 180 i < btf_type_vlen(struct_type); \ 181 i++, member++) 182 183 #define for_each_member_from(i, from, struct_type, member) \ 184 for (i = from, member = btf_type_member(struct_type) + from; \ 185 i < btf_type_vlen(struct_type); \ 186 i++, member++) 187 188 static DEFINE_IDR(btf_idr); 189 static DEFINE_SPINLOCK(btf_idr_lock); 190 191 struct btf { 192 void *data; 193 struct btf_type **types; 194 u32 *resolved_ids; 195 u32 *resolved_sizes; 196 const char *strings; 197 void *nohdr_data; 198 struct btf_header hdr; 199 u32 nr_types; 200 u32 types_size; 201 u32 data_size; 202 refcount_t refcnt; 203 u32 id; 204 struct rcu_head rcu; 205 }; 206 207 enum verifier_phase { 208 CHECK_META, 209 CHECK_TYPE, 210 }; 211 212 struct resolve_vertex { 213 const struct btf_type *t; 214 u32 type_id; 215 u16 next_member; 216 }; 217 218 enum visit_state { 219 NOT_VISITED, 220 VISITED, 221 RESOLVED, 222 }; 223 224 enum resolve_mode { 225 RESOLVE_TBD, /* To Be Determined */ 226 RESOLVE_PTR, /* Resolving for Pointer */ 227 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union 228 * or array 229 */ 230 }; 231 232 #define MAX_RESOLVE_DEPTH 32 233 234 struct btf_sec_info { 235 u32 off; 236 u32 len; 237 }; 238 239 struct btf_verifier_env { 240 struct btf *btf; 241 u8 *visit_states; 242 struct resolve_vertex stack[MAX_RESOLVE_DEPTH]; 243 struct bpf_verifier_log log; 244 u32 log_type_id; 245 u32 top_stack; 246 enum verifier_phase phase; 247 enum resolve_mode resolve_mode; 248 }; 249 250 static const char * const btf_kind_str[NR_BTF_KINDS] = { 251 [BTF_KIND_UNKN] = "UNKNOWN", 252 [BTF_KIND_INT] = "INT", 253 [BTF_KIND_PTR] = "PTR", 254 [BTF_KIND_ARRAY] = "ARRAY", 255 [BTF_KIND_STRUCT] = "STRUCT", 256 [BTF_KIND_UNION] = "UNION", 257 [BTF_KIND_ENUM] = "ENUM", 258 [BTF_KIND_FWD] = "FWD", 259 [BTF_KIND_TYPEDEF] = "TYPEDEF", 260 [BTF_KIND_VOLATILE] = "VOLATILE", 261 [BTF_KIND_CONST] = "CONST", 262 [BTF_KIND_RESTRICT] = "RESTRICT", 263 }; 264 265 struct btf_kind_operations { 266 s32 (*check_meta)(struct btf_verifier_env *env, 267 const struct btf_type *t, 268 u32 meta_left); 269 int (*resolve)(struct btf_verifier_env *env, 270 const struct resolve_vertex *v); 271 int (*check_member)(struct btf_verifier_env *env, 272 const struct btf_type *struct_type, 273 const struct btf_member *member, 274 const struct btf_type *member_type); 275 void (*log_details)(struct btf_verifier_env *env, 276 const struct btf_type *t); 277 void (*seq_show)(const struct btf *btf, const struct btf_type *t, 278 u32 type_id, void *data, u8 bits_offsets, 279 struct seq_file *m); 280 }; 281 282 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS]; 283 static struct btf_type btf_void; 284 285 static bool btf_type_is_modifier(const struct btf_type *t) 286 { 287 /* Some of them is not strictly a C modifier 288 * but they are grouped into the same bucket 289 * for BTF concern: 290 * A type (t) that refers to another 291 * type through t->type AND its size cannot 292 * be determined without following the t->type. 293 * 294 * ptr does not fall into this bucket 295 * because its size is always sizeof(void *). 296 */ 297 switch (BTF_INFO_KIND(t->info)) { 298 case BTF_KIND_TYPEDEF: 299 case BTF_KIND_VOLATILE: 300 case BTF_KIND_CONST: 301 case BTF_KIND_RESTRICT: 302 return true; 303 } 304 305 return false; 306 } 307 308 static bool btf_type_is_void(const struct btf_type *t) 309 { 310 /* void => no type and size info. 311 * Hence, FWD is also treated as void. 312 */ 313 return t == &btf_void || BTF_INFO_KIND(t->info) == BTF_KIND_FWD; 314 } 315 316 static bool btf_type_is_void_or_null(const struct btf_type *t) 317 { 318 return !t || btf_type_is_void(t); 319 } 320 321 /* union is only a special case of struct: 322 * all its offsetof(member) == 0 323 */ 324 static bool btf_type_is_struct(const struct btf_type *t) 325 { 326 u8 kind = BTF_INFO_KIND(t->info); 327 328 return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION; 329 } 330 331 static bool btf_type_is_array(const struct btf_type *t) 332 { 333 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY; 334 } 335 336 static bool btf_type_is_ptr(const struct btf_type *t) 337 { 338 return BTF_INFO_KIND(t->info) == BTF_KIND_PTR; 339 } 340 341 static bool btf_type_is_int(const struct btf_type *t) 342 { 343 return BTF_INFO_KIND(t->info) == BTF_KIND_INT; 344 } 345 346 /* What types need to be resolved? 347 * 348 * btf_type_is_modifier() is an obvious one. 349 * 350 * btf_type_is_struct() because its member refers to 351 * another type (through member->type). 352 353 * btf_type_is_array() because its element (array->type) 354 * refers to another type. Array can be thought of a 355 * special case of struct while array just has the same 356 * member-type repeated by array->nelems of times. 357 */ 358 static bool btf_type_needs_resolve(const struct btf_type *t) 359 { 360 return btf_type_is_modifier(t) || 361 btf_type_is_ptr(t) || 362 btf_type_is_struct(t) || 363 btf_type_is_array(t); 364 } 365 366 /* t->size can be used */ 367 static bool btf_type_has_size(const struct btf_type *t) 368 { 369 switch (BTF_INFO_KIND(t->info)) { 370 case BTF_KIND_INT: 371 case BTF_KIND_STRUCT: 372 case BTF_KIND_UNION: 373 case BTF_KIND_ENUM: 374 return true; 375 } 376 377 return false; 378 } 379 380 static const char *btf_int_encoding_str(u8 encoding) 381 { 382 if (encoding == 0) 383 return "(none)"; 384 else if (encoding == BTF_INT_SIGNED) 385 return "SIGNED"; 386 else if (encoding == BTF_INT_CHAR) 387 return "CHAR"; 388 else if (encoding == BTF_INT_BOOL) 389 return "BOOL"; 390 else 391 return "UNKN"; 392 } 393 394 static u16 btf_type_vlen(const struct btf_type *t) 395 { 396 return BTF_INFO_VLEN(t->info); 397 } 398 399 static u32 btf_type_int(const struct btf_type *t) 400 { 401 return *(u32 *)(t + 1); 402 } 403 404 static const struct btf_array *btf_type_array(const struct btf_type *t) 405 { 406 return (const struct btf_array *)(t + 1); 407 } 408 409 static const struct btf_member *btf_type_member(const struct btf_type *t) 410 { 411 return (const struct btf_member *)(t + 1); 412 } 413 414 static const struct btf_enum *btf_type_enum(const struct btf_type *t) 415 { 416 return (const struct btf_enum *)(t + 1); 417 } 418 419 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) 420 { 421 return kind_ops[BTF_INFO_KIND(t->info)]; 422 } 423 424 static bool btf_name_offset_valid(const struct btf *btf, u32 offset) 425 { 426 return BTF_STR_OFFSET_VALID(offset) && 427 offset < btf->hdr.str_len; 428 } 429 430 /* Only C-style identifier is permitted. This can be relaxed if 431 * necessary. 432 */ 433 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset) 434 { 435 /* offset must be valid */ 436 const char *src = &btf->strings[offset]; 437 const char *src_limit; 438 439 if (!isalpha(*src) && *src != '_') 440 return false; 441 442 /* set a limit on identifier length */ 443 src_limit = src + KSYM_NAME_LEN; 444 src++; 445 while (*src && src < src_limit) { 446 if (!isalnum(*src) && *src != '_') 447 return false; 448 src++; 449 } 450 451 return !*src; 452 } 453 454 static const char *btf_name_by_offset(const struct btf *btf, u32 offset) 455 { 456 if (!offset) 457 return "(anon)"; 458 else if (offset < btf->hdr.str_len) 459 return &btf->strings[offset]; 460 else 461 return "(invalid-name-offset)"; 462 } 463 464 static const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) 465 { 466 if (type_id > btf->nr_types) 467 return NULL; 468 469 return btf->types[type_id]; 470 } 471 472 /* 473 * Regular int is not a bit field and it must be either 474 * u8/u16/u32/u64. 475 */ 476 static bool btf_type_int_is_regular(const struct btf_type *t) 477 { 478 u8 nr_bits, nr_bytes; 479 u32 int_data; 480 481 int_data = btf_type_int(t); 482 nr_bits = BTF_INT_BITS(int_data); 483 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits); 484 if (BITS_PER_BYTE_MASKED(nr_bits) || 485 BTF_INT_OFFSET(int_data) || 486 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) && 487 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64))) { 488 return false; 489 } 490 491 return true; 492 } 493 494 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log, 495 const char *fmt, ...) 496 { 497 va_list args; 498 499 va_start(args, fmt); 500 bpf_verifier_vlog(log, fmt, args); 501 va_end(args); 502 } 503 504 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env, 505 const char *fmt, ...) 506 { 507 struct bpf_verifier_log *log = &env->log; 508 va_list args; 509 510 if (!bpf_verifier_log_needed(log)) 511 return; 512 513 va_start(args, fmt); 514 bpf_verifier_vlog(log, fmt, args); 515 va_end(args); 516 } 517 518 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env, 519 const struct btf_type *t, 520 bool log_details, 521 const char *fmt, ...) 522 { 523 struct bpf_verifier_log *log = &env->log; 524 u8 kind = BTF_INFO_KIND(t->info); 525 struct btf *btf = env->btf; 526 va_list args; 527 528 if (!bpf_verifier_log_needed(log)) 529 return; 530 531 __btf_verifier_log(log, "[%u] %s %s%s", 532 env->log_type_id, 533 btf_kind_str[kind], 534 btf_name_by_offset(btf, t->name_off), 535 log_details ? " " : ""); 536 537 if (log_details) 538 btf_type_ops(t)->log_details(env, t); 539 540 if (fmt && *fmt) { 541 __btf_verifier_log(log, " "); 542 va_start(args, fmt); 543 bpf_verifier_vlog(log, fmt, args); 544 va_end(args); 545 } 546 547 __btf_verifier_log(log, "\n"); 548 } 549 550 #define btf_verifier_log_type(env, t, ...) \ 551 __btf_verifier_log_type((env), (t), true, __VA_ARGS__) 552 #define btf_verifier_log_basic(env, t, ...) \ 553 __btf_verifier_log_type((env), (t), false, __VA_ARGS__) 554 555 __printf(4, 5) 556 static void btf_verifier_log_member(struct btf_verifier_env *env, 557 const struct btf_type *struct_type, 558 const struct btf_member *member, 559 const char *fmt, ...) 560 { 561 struct bpf_verifier_log *log = &env->log; 562 struct btf *btf = env->btf; 563 va_list args; 564 565 if (!bpf_verifier_log_needed(log)) 566 return; 567 568 /* The CHECK_META phase already did a btf dump. 569 * 570 * If member is logged again, it must hit an error in 571 * parsing this member. It is useful to print out which 572 * struct this member belongs to. 573 */ 574 if (env->phase != CHECK_META) 575 btf_verifier_log_type(env, struct_type, NULL); 576 577 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u", 578 btf_name_by_offset(btf, member->name_off), 579 member->type, member->offset); 580 581 if (fmt && *fmt) { 582 __btf_verifier_log(log, " "); 583 va_start(args, fmt); 584 bpf_verifier_vlog(log, fmt, args); 585 va_end(args); 586 } 587 588 __btf_verifier_log(log, "\n"); 589 } 590 591 static void btf_verifier_log_hdr(struct btf_verifier_env *env, 592 u32 btf_data_size) 593 { 594 struct bpf_verifier_log *log = &env->log; 595 const struct btf *btf = env->btf; 596 const struct btf_header *hdr; 597 598 if (!bpf_verifier_log_needed(log)) 599 return; 600 601 hdr = &btf->hdr; 602 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic); 603 __btf_verifier_log(log, "version: %u\n", hdr->version); 604 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags); 605 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len); 606 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off); 607 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len); 608 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off); 609 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len); 610 __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size); 611 } 612 613 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t) 614 { 615 struct btf *btf = env->btf; 616 617 /* < 2 because +1 for btf_void which is always in btf->types[0]. 618 * btf_void is not accounted in btf->nr_types because btf_void 619 * does not come from the BTF file. 620 */ 621 if (btf->types_size - btf->nr_types < 2) { 622 /* Expand 'types' array */ 623 624 struct btf_type **new_types; 625 u32 expand_by, new_size; 626 627 if (btf->types_size == BTF_MAX_TYPE) { 628 btf_verifier_log(env, "Exceeded max num of types"); 629 return -E2BIG; 630 } 631 632 expand_by = max_t(u32, btf->types_size >> 2, 16); 633 new_size = min_t(u32, BTF_MAX_TYPE, 634 btf->types_size + expand_by); 635 636 new_types = kvcalloc(new_size, sizeof(*new_types), 637 GFP_KERNEL | __GFP_NOWARN); 638 if (!new_types) 639 return -ENOMEM; 640 641 if (btf->nr_types == 0) 642 new_types[0] = &btf_void; 643 else 644 memcpy(new_types, btf->types, 645 sizeof(*btf->types) * (btf->nr_types + 1)); 646 647 kvfree(btf->types); 648 btf->types = new_types; 649 btf->types_size = new_size; 650 } 651 652 btf->types[++(btf->nr_types)] = t; 653 654 return 0; 655 } 656 657 static int btf_alloc_id(struct btf *btf) 658 { 659 int id; 660 661 idr_preload(GFP_KERNEL); 662 spin_lock_bh(&btf_idr_lock); 663 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC); 664 if (id > 0) 665 btf->id = id; 666 spin_unlock_bh(&btf_idr_lock); 667 idr_preload_end(); 668 669 if (WARN_ON_ONCE(!id)) 670 return -ENOSPC; 671 672 return id > 0 ? 0 : id; 673 } 674 675 static void btf_free_id(struct btf *btf) 676 { 677 unsigned long flags; 678 679 /* 680 * In map-in-map, calling map_delete_elem() on outer 681 * map will call bpf_map_put on the inner map. 682 * It will then eventually call btf_free_id() 683 * on the inner map. Some of the map_delete_elem() 684 * implementation may have irq disabled, so 685 * we need to use the _irqsave() version instead 686 * of the _bh() version. 687 */ 688 spin_lock_irqsave(&btf_idr_lock, flags); 689 idr_remove(&btf_idr, btf->id); 690 spin_unlock_irqrestore(&btf_idr_lock, flags); 691 } 692 693 static void btf_free(struct btf *btf) 694 { 695 kvfree(btf->types); 696 kvfree(btf->resolved_sizes); 697 kvfree(btf->resolved_ids); 698 kvfree(btf->data); 699 kfree(btf); 700 } 701 702 static void btf_free_rcu(struct rcu_head *rcu) 703 { 704 struct btf *btf = container_of(rcu, struct btf, rcu); 705 706 btf_free(btf); 707 } 708 709 void btf_put(struct btf *btf) 710 { 711 if (btf && refcount_dec_and_test(&btf->refcnt)) { 712 btf_free_id(btf); 713 call_rcu(&btf->rcu, btf_free_rcu); 714 } 715 } 716 717 static int env_resolve_init(struct btf_verifier_env *env) 718 { 719 struct btf *btf = env->btf; 720 u32 nr_types = btf->nr_types; 721 u32 *resolved_sizes = NULL; 722 u32 *resolved_ids = NULL; 723 u8 *visit_states = NULL; 724 725 /* +1 for btf_void */ 726 resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes), 727 GFP_KERNEL | __GFP_NOWARN); 728 if (!resolved_sizes) 729 goto nomem; 730 731 resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids), 732 GFP_KERNEL | __GFP_NOWARN); 733 if (!resolved_ids) 734 goto nomem; 735 736 visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states), 737 GFP_KERNEL | __GFP_NOWARN); 738 if (!visit_states) 739 goto nomem; 740 741 btf->resolved_sizes = resolved_sizes; 742 btf->resolved_ids = resolved_ids; 743 env->visit_states = visit_states; 744 745 return 0; 746 747 nomem: 748 kvfree(resolved_sizes); 749 kvfree(resolved_ids); 750 kvfree(visit_states); 751 return -ENOMEM; 752 } 753 754 static void btf_verifier_env_free(struct btf_verifier_env *env) 755 { 756 kvfree(env->visit_states); 757 kfree(env); 758 } 759 760 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env, 761 const struct btf_type *next_type) 762 { 763 switch (env->resolve_mode) { 764 case RESOLVE_TBD: 765 /* int, enum or void is a sink */ 766 return !btf_type_needs_resolve(next_type); 767 case RESOLVE_PTR: 768 /* int, enum, void, struct or array is a sink for ptr */ 769 return !btf_type_is_modifier(next_type) && 770 !btf_type_is_ptr(next_type); 771 case RESOLVE_STRUCT_OR_ARRAY: 772 /* int, enum, void or ptr is a sink for struct and array */ 773 return !btf_type_is_modifier(next_type) && 774 !btf_type_is_array(next_type) && 775 !btf_type_is_struct(next_type); 776 default: 777 BUG(); 778 } 779 } 780 781 static bool env_type_is_resolved(const struct btf_verifier_env *env, 782 u32 type_id) 783 { 784 return env->visit_states[type_id] == RESOLVED; 785 } 786 787 static int env_stack_push(struct btf_verifier_env *env, 788 const struct btf_type *t, u32 type_id) 789 { 790 struct resolve_vertex *v; 791 792 if (env->top_stack == MAX_RESOLVE_DEPTH) 793 return -E2BIG; 794 795 if (env->visit_states[type_id] != NOT_VISITED) 796 return -EEXIST; 797 798 env->visit_states[type_id] = VISITED; 799 800 v = &env->stack[env->top_stack++]; 801 v->t = t; 802 v->type_id = type_id; 803 v->next_member = 0; 804 805 if (env->resolve_mode == RESOLVE_TBD) { 806 if (btf_type_is_ptr(t)) 807 env->resolve_mode = RESOLVE_PTR; 808 else if (btf_type_is_struct(t) || btf_type_is_array(t)) 809 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY; 810 } 811 812 return 0; 813 } 814 815 static void env_stack_set_next_member(struct btf_verifier_env *env, 816 u16 next_member) 817 { 818 env->stack[env->top_stack - 1].next_member = next_member; 819 } 820 821 static void env_stack_pop_resolved(struct btf_verifier_env *env, 822 u32 resolved_type_id, 823 u32 resolved_size) 824 { 825 u32 type_id = env->stack[--(env->top_stack)].type_id; 826 struct btf *btf = env->btf; 827 828 btf->resolved_sizes[type_id] = resolved_size; 829 btf->resolved_ids[type_id] = resolved_type_id; 830 env->visit_states[type_id] = RESOLVED; 831 } 832 833 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env) 834 { 835 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL; 836 } 837 838 /* The input param "type_id" must point to a needs_resolve type */ 839 static const struct btf_type *btf_type_id_resolve(const struct btf *btf, 840 u32 *type_id) 841 { 842 *type_id = btf->resolved_ids[*type_id]; 843 return btf_type_by_id(btf, *type_id); 844 } 845 846 const struct btf_type *btf_type_id_size(const struct btf *btf, 847 u32 *type_id, u32 *ret_size) 848 { 849 const struct btf_type *size_type; 850 u32 size_type_id = *type_id; 851 u32 size = 0; 852 853 size_type = btf_type_by_id(btf, size_type_id); 854 if (btf_type_is_void_or_null(size_type)) 855 return NULL; 856 857 if (btf_type_has_size(size_type)) { 858 size = size_type->size; 859 } else if (btf_type_is_array(size_type)) { 860 size = btf->resolved_sizes[size_type_id]; 861 } else if (btf_type_is_ptr(size_type)) { 862 size = sizeof(void *); 863 } else { 864 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type))) 865 return NULL; 866 867 size = btf->resolved_sizes[size_type_id]; 868 size_type_id = btf->resolved_ids[size_type_id]; 869 size_type = btf_type_by_id(btf, size_type_id); 870 if (btf_type_is_void(size_type)) 871 return NULL; 872 } 873 874 *type_id = size_type_id; 875 if (ret_size) 876 *ret_size = size; 877 878 return size_type; 879 } 880 881 static int btf_df_check_member(struct btf_verifier_env *env, 882 const struct btf_type *struct_type, 883 const struct btf_member *member, 884 const struct btf_type *member_type) 885 { 886 btf_verifier_log_basic(env, struct_type, 887 "Unsupported check_member"); 888 return -EINVAL; 889 } 890 891 static int btf_df_resolve(struct btf_verifier_env *env, 892 const struct resolve_vertex *v) 893 { 894 btf_verifier_log_basic(env, v->t, "Unsupported resolve"); 895 return -EINVAL; 896 } 897 898 static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t, 899 u32 type_id, void *data, u8 bits_offsets, 900 struct seq_file *m) 901 { 902 seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info)); 903 } 904 905 static int btf_int_check_member(struct btf_verifier_env *env, 906 const struct btf_type *struct_type, 907 const struct btf_member *member, 908 const struct btf_type *member_type) 909 { 910 u32 int_data = btf_type_int(member_type); 911 u32 struct_bits_off = member->offset; 912 u32 struct_size = struct_type->size; 913 u32 nr_copy_bits; 914 u32 bytes_offset; 915 916 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) { 917 btf_verifier_log_member(env, struct_type, member, 918 "bits_offset exceeds U32_MAX"); 919 return -EINVAL; 920 } 921 922 struct_bits_off += BTF_INT_OFFSET(int_data); 923 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 924 nr_copy_bits = BTF_INT_BITS(int_data) + 925 BITS_PER_BYTE_MASKED(struct_bits_off); 926 927 if (nr_copy_bits > BITS_PER_U64) { 928 btf_verifier_log_member(env, struct_type, member, 929 "nr_copy_bits exceeds 64"); 930 return -EINVAL; 931 } 932 933 if (struct_size < bytes_offset || 934 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { 935 btf_verifier_log_member(env, struct_type, member, 936 "Member exceeds struct_size"); 937 return -EINVAL; 938 } 939 940 return 0; 941 } 942 943 static s32 btf_int_check_meta(struct btf_verifier_env *env, 944 const struct btf_type *t, 945 u32 meta_left) 946 { 947 u32 int_data, nr_bits, meta_needed = sizeof(int_data); 948 u16 encoding; 949 950 if (meta_left < meta_needed) { 951 btf_verifier_log_basic(env, t, 952 "meta_left:%u meta_needed:%u", 953 meta_left, meta_needed); 954 return -EINVAL; 955 } 956 957 if (btf_type_vlen(t)) { 958 btf_verifier_log_type(env, t, "vlen != 0"); 959 return -EINVAL; 960 } 961 962 int_data = btf_type_int(t); 963 if (int_data & ~BTF_INT_MASK) { 964 btf_verifier_log_basic(env, t, "Invalid int_data:%x", 965 int_data); 966 return -EINVAL; 967 } 968 969 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data); 970 971 if (nr_bits > BITS_PER_U64) { 972 btf_verifier_log_type(env, t, "nr_bits exceeds %zu", 973 BITS_PER_U64); 974 return -EINVAL; 975 } 976 977 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) { 978 btf_verifier_log_type(env, t, "nr_bits exceeds type_size"); 979 return -EINVAL; 980 } 981 982 /* 983 * Only one of the encoding bits is allowed and it 984 * should be sufficient for the pretty print purpose (i.e. decoding). 985 * Multiple bits can be allowed later if it is found 986 * to be insufficient. 987 */ 988 encoding = BTF_INT_ENCODING(int_data); 989 if (encoding && 990 encoding != BTF_INT_SIGNED && 991 encoding != BTF_INT_CHAR && 992 encoding != BTF_INT_BOOL) { 993 btf_verifier_log_type(env, t, "Unsupported encoding"); 994 return -ENOTSUPP; 995 } 996 997 btf_verifier_log_type(env, t, NULL); 998 999 return meta_needed; 1000 } 1001 1002 static void btf_int_log(struct btf_verifier_env *env, 1003 const struct btf_type *t) 1004 { 1005 int int_data = btf_type_int(t); 1006 1007 btf_verifier_log(env, 1008 "size=%u bits_offset=%u nr_bits=%u encoding=%s", 1009 t->size, BTF_INT_OFFSET(int_data), 1010 BTF_INT_BITS(int_data), 1011 btf_int_encoding_str(BTF_INT_ENCODING(int_data))); 1012 } 1013 1014 static void btf_int_bits_seq_show(const struct btf *btf, 1015 const struct btf_type *t, 1016 void *data, u8 bits_offset, 1017 struct seq_file *m) 1018 { 1019 u16 left_shift_bits, right_shift_bits; 1020 u32 int_data = btf_type_int(t); 1021 u8 nr_bits = BTF_INT_BITS(int_data); 1022 u8 total_bits_offset; 1023 u8 nr_copy_bytes; 1024 u8 nr_copy_bits; 1025 u64 print_num; 1026 1027 /* 1028 * bits_offset is at most 7. 1029 * BTF_INT_OFFSET() cannot exceed 64 bits. 1030 */ 1031 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); 1032 data += BITS_ROUNDDOWN_BYTES(total_bits_offset); 1033 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset); 1034 nr_copy_bits = nr_bits + bits_offset; 1035 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); 1036 1037 print_num = 0; 1038 memcpy(&print_num, data, nr_copy_bytes); 1039 1040 #ifdef __BIG_ENDIAN_BITFIELD 1041 left_shift_bits = bits_offset; 1042 #else 1043 left_shift_bits = BITS_PER_U64 - nr_copy_bits; 1044 #endif 1045 right_shift_bits = BITS_PER_U64 - nr_bits; 1046 1047 print_num <<= left_shift_bits; 1048 print_num >>= right_shift_bits; 1049 1050 seq_printf(m, "0x%llx", print_num); 1051 } 1052 1053 static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, 1054 u32 type_id, void *data, u8 bits_offset, 1055 struct seq_file *m) 1056 { 1057 u32 int_data = btf_type_int(t); 1058 u8 encoding = BTF_INT_ENCODING(int_data); 1059 bool sign = encoding & BTF_INT_SIGNED; 1060 u8 nr_bits = BTF_INT_BITS(int_data); 1061 1062 if (bits_offset || BTF_INT_OFFSET(int_data) || 1063 BITS_PER_BYTE_MASKED(nr_bits)) { 1064 btf_int_bits_seq_show(btf, t, data, bits_offset, m); 1065 return; 1066 } 1067 1068 switch (nr_bits) { 1069 case 64: 1070 if (sign) 1071 seq_printf(m, "%lld", *(s64 *)data); 1072 else 1073 seq_printf(m, "%llu", *(u64 *)data); 1074 break; 1075 case 32: 1076 if (sign) 1077 seq_printf(m, "%d", *(s32 *)data); 1078 else 1079 seq_printf(m, "%u", *(u32 *)data); 1080 break; 1081 case 16: 1082 if (sign) 1083 seq_printf(m, "%d", *(s16 *)data); 1084 else 1085 seq_printf(m, "%u", *(u16 *)data); 1086 break; 1087 case 8: 1088 if (sign) 1089 seq_printf(m, "%d", *(s8 *)data); 1090 else 1091 seq_printf(m, "%u", *(u8 *)data); 1092 break; 1093 default: 1094 btf_int_bits_seq_show(btf, t, data, bits_offset, m); 1095 } 1096 } 1097 1098 static const struct btf_kind_operations int_ops = { 1099 .check_meta = btf_int_check_meta, 1100 .resolve = btf_df_resolve, 1101 .check_member = btf_int_check_member, 1102 .log_details = btf_int_log, 1103 .seq_show = btf_int_seq_show, 1104 }; 1105 1106 static int btf_modifier_check_member(struct btf_verifier_env *env, 1107 const struct btf_type *struct_type, 1108 const struct btf_member *member, 1109 const struct btf_type *member_type) 1110 { 1111 const struct btf_type *resolved_type; 1112 u32 resolved_type_id = member->type; 1113 struct btf_member resolved_member; 1114 struct btf *btf = env->btf; 1115 1116 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL); 1117 if (!resolved_type) { 1118 btf_verifier_log_member(env, struct_type, member, 1119 "Invalid member"); 1120 return -EINVAL; 1121 } 1122 1123 resolved_member = *member; 1124 resolved_member.type = resolved_type_id; 1125 1126 return btf_type_ops(resolved_type)->check_member(env, struct_type, 1127 &resolved_member, 1128 resolved_type); 1129 } 1130 1131 static int btf_ptr_check_member(struct btf_verifier_env *env, 1132 const struct btf_type *struct_type, 1133 const struct btf_member *member, 1134 const struct btf_type *member_type) 1135 { 1136 u32 struct_size, struct_bits_off, bytes_offset; 1137 1138 struct_size = struct_type->size; 1139 struct_bits_off = member->offset; 1140 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 1141 1142 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 1143 btf_verifier_log_member(env, struct_type, member, 1144 "Member is not byte aligned"); 1145 return -EINVAL; 1146 } 1147 1148 if (struct_size - bytes_offset < sizeof(void *)) { 1149 btf_verifier_log_member(env, struct_type, member, 1150 "Member exceeds struct_size"); 1151 return -EINVAL; 1152 } 1153 1154 return 0; 1155 } 1156 1157 static int btf_ref_type_check_meta(struct btf_verifier_env *env, 1158 const struct btf_type *t, 1159 u32 meta_left) 1160 { 1161 if (btf_type_vlen(t)) { 1162 btf_verifier_log_type(env, t, "vlen != 0"); 1163 return -EINVAL; 1164 } 1165 1166 if (!BTF_TYPE_ID_VALID(t->type)) { 1167 btf_verifier_log_type(env, t, "Invalid type_id"); 1168 return -EINVAL; 1169 } 1170 1171 /* typedef type must have a valid name, and other ref types, 1172 * volatile, const, restrict, should have a null name. 1173 */ 1174 if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) { 1175 if (!t->name_off || 1176 !btf_name_valid_identifier(env->btf, t->name_off)) { 1177 btf_verifier_log_type(env, t, "Invalid name"); 1178 return -EINVAL; 1179 } 1180 } else { 1181 if (t->name_off) { 1182 btf_verifier_log_type(env, t, "Invalid name"); 1183 return -EINVAL; 1184 } 1185 } 1186 1187 btf_verifier_log_type(env, t, NULL); 1188 1189 return 0; 1190 } 1191 1192 static int btf_modifier_resolve(struct btf_verifier_env *env, 1193 const struct resolve_vertex *v) 1194 { 1195 const struct btf_type *t = v->t; 1196 const struct btf_type *next_type; 1197 u32 next_type_id = t->type; 1198 struct btf *btf = env->btf; 1199 u32 next_type_size = 0; 1200 1201 next_type = btf_type_by_id(btf, next_type_id); 1202 if (!next_type) { 1203 btf_verifier_log_type(env, v->t, "Invalid type_id"); 1204 return -EINVAL; 1205 } 1206 1207 /* "typedef void new_void", "const void"...etc */ 1208 if (btf_type_is_void(next_type)) 1209 goto resolved; 1210 1211 if (!env_type_is_resolve_sink(env, next_type) && 1212 !env_type_is_resolved(env, next_type_id)) 1213 return env_stack_push(env, next_type, next_type_id); 1214 1215 /* Figure out the resolved next_type_id with size. 1216 * They will be stored in the current modifier's 1217 * resolved_ids and resolved_sizes such that it can 1218 * save us a few type-following when we use it later (e.g. in 1219 * pretty print). 1220 */ 1221 if (!btf_type_id_size(btf, &next_type_id, &next_type_size) && 1222 !btf_type_is_void(btf_type_id_resolve(btf, &next_type_id))) { 1223 btf_verifier_log_type(env, v->t, "Invalid type_id"); 1224 return -EINVAL; 1225 } 1226 1227 resolved: 1228 env_stack_pop_resolved(env, next_type_id, next_type_size); 1229 1230 return 0; 1231 } 1232 1233 static int btf_ptr_resolve(struct btf_verifier_env *env, 1234 const struct resolve_vertex *v) 1235 { 1236 const struct btf_type *next_type; 1237 const struct btf_type *t = v->t; 1238 u32 next_type_id = t->type; 1239 struct btf *btf = env->btf; 1240 u32 next_type_size = 0; 1241 1242 next_type = btf_type_by_id(btf, next_type_id); 1243 if (!next_type) { 1244 btf_verifier_log_type(env, v->t, "Invalid type_id"); 1245 return -EINVAL; 1246 } 1247 1248 /* "void *" */ 1249 if (btf_type_is_void(next_type)) 1250 goto resolved; 1251 1252 if (!env_type_is_resolve_sink(env, next_type) && 1253 !env_type_is_resolved(env, next_type_id)) 1254 return env_stack_push(env, next_type, next_type_id); 1255 1256 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY, 1257 * the modifier may have stopped resolving when it was resolved 1258 * to a ptr (last-resolved-ptr). 1259 * 1260 * We now need to continue from the last-resolved-ptr to 1261 * ensure the last-resolved-ptr will not referring back to 1262 * the currenct ptr (t). 1263 */ 1264 if (btf_type_is_modifier(next_type)) { 1265 const struct btf_type *resolved_type; 1266 u32 resolved_type_id; 1267 1268 resolved_type_id = next_type_id; 1269 resolved_type = btf_type_id_resolve(btf, &resolved_type_id); 1270 1271 if (btf_type_is_ptr(resolved_type) && 1272 !env_type_is_resolve_sink(env, resolved_type) && 1273 !env_type_is_resolved(env, resolved_type_id)) 1274 return env_stack_push(env, resolved_type, 1275 resolved_type_id); 1276 } 1277 1278 if (!btf_type_id_size(btf, &next_type_id, &next_type_size) && 1279 !btf_type_is_void(btf_type_id_resolve(btf, &next_type_id))) { 1280 btf_verifier_log_type(env, v->t, "Invalid type_id"); 1281 return -EINVAL; 1282 } 1283 1284 resolved: 1285 env_stack_pop_resolved(env, next_type_id, 0); 1286 1287 return 0; 1288 } 1289 1290 static void btf_modifier_seq_show(const struct btf *btf, 1291 const struct btf_type *t, 1292 u32 type_id, void *data, 1293 u8 bits_offset, struct seq_file *m) 1294 { 1295 t = btf_type_id_resolve(btf, &type_id); 1296 1297 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m); 1298 } 1299 1300 static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t, 1301 u32 type_id, void *data, u8 bits_offset, 1302 struct seq_file *m) 1303 { 1304 /* It is a hashed value */ 1305 seq_printf(m, "%p", *(void **)data); 1306 } 1307 1308 static void btf_ref_type_log(struct btf_verifier_env *env, 1309 const struct btf_type *t) 1310 { 1311 btf_verifier_log(env, "type_id=%u", t->type); 1312 } 1313 1314 static struct btf_kind_operations modifier_ops = { 1315 .check_meta = btf_ref_type_check_meta, 1316 .resolve = btf_modifier_resolve, 1317 .check_member = btf_modifier_check_member, 1318 .log_details = btf_ref_type_log, 1319 .seq_show = btf_modifier_seq_show, 1320 }; 1321 1322 static struct btf_kind_operations ptr_ops = { 1323 .check_meta = btf_ref_type_check_meta, 1324 .resolve = btf_ptr_resolve, 1325 .check_member = btf_ptr_check_member, 1326 .log_details = btf_ref_type_log, 1327 .seq_show = btf_ptr_seq_show, 1328 }; 1329 1330 static s32 btf_fwd_check_meta(struct btf_verifier_env *env, 1331 const struct btf_type *t, 1332 u32 meta_left) 1333 { 1334 if (btf_type_vlen(t)) { 1335 btf_verifier_log_type(env, t, "vlen != 0"); 1336 return -EINVAL; 1337 } 1338 1339 if (t->type) { 1340 btf_verifier_log_type(env, t, "type != 0"); 1341 return -EINVAL; 1342 } 1343 1344 /* fwd type must have a valid name */ 1345 if (!t->name_off || 1346 !btf_name_valid_identifier(env->btf, t->name_off)) { 1347 btf_verifier_log_type(env, t, "Invalid name"); 1348 return -EINVAL; 1349 } 1350 1351 btf_verifier_log_type(env, t, NULL); 1352 1353 return 0; 1354 } 1355 1356 static struct btf_kind_operations fwd_ops = { 1357 .check_meta = btf_fwd_check_meta, 1358 .resolve = btf_df_resolve, 1359 .check_member = btf_df_check_member, 1360 .log_details = btf_ref_type_log, 1361 .seq_show = btf_df_seq_show, 1362 }; 1363 1364 static int btf_array_check_member(struct btf_verifier_env *env, 1365 const struct btf_type *struct_type, 1366 const struct btf_member *member, 1367 const struct btf_type *member_type) 1368 { 1369 u32 struct_bits_off = member->offset; 1370 u32 struct_size, bytes_offset; 1371 u32 array_type_id, array_size; 1372 struct btf *btf = env->btf; 1373 1374 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 1375 btf_verifier_log_member(env, struct_type, member, 1376 "Member is not byte aligned"); 1377 return -EINVAL; 1378 } 1379 1380 array_type_id = member->type; 1381 btf_type_id_size(btf, &array_type_id, &array_size); 1382 struct_size = struct_type->size; 1383 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 1384 if (struct_size - bytes_offset < array_size) { 1385 btf_verifier_log_member(env, struct_type, member, 1386 "Member exceeds struct_size"); 1387 return -EINVAL; 1388 } 1389 1390 return 0; 1391 } 1392 1393 static s32 btf_array_check_meta(struct btf_verifier_env *env, 1394 const struct btf_type *t, 1395 u32 meta_left) 1396 { 1397 const struct btf_array *array = btf_type_array(t); 1398 u32 meta_needed = sizeof(*array); 1399 1400 if (meta_left < meta_needed) { 1401 btf_verifier_log_basic(env, t, 1402 "meta_left:%u meta_needed:%u", 1403 meta_left, meta_needed); 1404 return -EINVAL; 1405 } 1406 1407 /* array type should not have a name */ 1408 if (t->name_off) { 1409 btf_verifier_log_type(env, t, "Invalid name"); 1410 return -EINVAL; 1411 } 1412 1413 if (btf_type_vlen(t)) { 1414 btf_verifier_log_type(env, t, "vlen != 0"); 1415 return -EINVAL; 1416 } 1417 1418 if (t->size) { 1419 btf_verifier_log_type(env, t, "size != 0"); 1420 return -EINVAL; 1421 } 1422 1423 /* Array elem type and index type cannot be in type void, 1424 * so !array->type and !array->index_type are not allowed. 1425 */ 1426 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) { 1427 btf_verifier_log_type(env, t, "Invalid elem"); 1428 return -EINVAL; 1429 } 1430 1431 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) { 1432 btf_verifier_log_type(env, t, "Invalid index"); 1433 return -EINVAL; 1434 } 1435 1436 btf_verifier_log_type(env, t, NULL); 1437 1438 return meta_needed; 1439 } 1440 1441 static int btf_array_resolve(struct btf_verifier_env *env, 1442 const struct resolve_vertex *v) 1443 { 1444 const struct btf_array *array = btf_type_array(v->t); 1445 const struct btf_type *elem_type, *index_type; 1446 u32 elem_type_id, index_type_id; 1447 struct btf *btf = env->btf; 1448 u32 elem_size; 1449 1450 /* Check array->index_type */ 1451 index_type_id = array->index_type; 1452 index_type = btf_type_by_id(btf, index_type_id); 1453 if (btf_type_is_void_or_null(index_type)) { 1454 btf_verifier_log_type(env, v->t, "Invalid index"); 1455 return -EINVAL; 1456 } 1457 1458 if (!env_type_is_resolve_sink(env, index_type) && 1459 !env_type_is_resolved(env, index_type_id)) 1460 return env_stack_push(env, index_type, index_type_id); 1461 1462 index_type = btf_type_id_size(btf, &index_type_id, NULL); 1463 if (!index_type || !btf_type_is_int(index_type) || 1464 !btf_type_int_is_regular(index_type)) { 1465 btf_verifier_log_type(env, v->t, "Invalid index"); 1466 return -EINVAL; 1467 } 1468 1469 /* Check array->type */ 1470 elem_type_id = array->type; 1471 elem_type = btf_type_by_id(btf, elem_type_id); 1472 if (btf_type_is_void_or_null(elem_type)) { 1473 btf_verifier_log_type(env, v->t, 1474 "Invalid elem"); 1475 return -EINVAL; 1476 } 1477 1478 if (!env_type_is_resolve_sink(env, elem_type) && 1479 !env_type_is_resolved(env, elem_type_id)) 1480 return env_stack_push(env, elem_type, elem_type_id); 1481 1482 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); 1483 if (!elem_type) { 1484 btf_verifier_log_type(env, v->t, "Invalid elem"); 1485 return -EINVAL; 1486 } 1487 1488 if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) { 1489 btf_verifier_log_type(env, v->t, "Invalid array of int"); 1490 return -EINVAL; 1491 } 1492 1493 if (array->nelems && elem_size > U32_MAX / array->nelems) { 1494 btf_verifier_log_type(env, v->t, 1495 "Array size overflows U32_MAX"); 1496 return -EINVAL; 1497 } 1498 1499 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems); 1500 1501 return 0; 1502 } 1503 1504 static void btf_array_log(struct btf_verifier_env *env, 1505 const struct btf_type *t) 1506 { 1507 const struct btf_array *array = btf_type_array(t); 1508 1509 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u", 1510 array->type, array->index_type, array->nelems); 1511 } 1512 1513 static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t, 1514 u32 type_id, void *data, u8 bits_offset, 1515 struct seq_file *m) 1516 { 1517 const struct btf_array *array = btf_type_array(t); 1518 const struct btf_kind_operations *elem_ops; 1519 const struct btf_type *elem_type; 1520 u32 i, elem_size, elem_type_id; 1521 1522 elem_type_id = array->type; 1523 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); 1524 elem_ops = btf_type_ops(elem_type); 1525 seq_puts(m, "["); 1526 for (i = 0; i < array->nelems; i++) { 1527 if (i) 1528 seq_puts(m, ","); 1529 1530 elem_ops->seq_show(btf, elem_type, elem_type_id, data, 1531 bits_offset, m); 1532 data += elem_size; 1533 } 1534 seq_puts(m, "]"); 1535 } 1536 1537 static struct btf_kind_operations array_ops = { 1538 .check_meta = btf_array_check_meta, 1539 .resolve = btf_array_resolve, 1540 .check_member = btf_array_check_member, 1541 .log_details = btf_array_log, 1542 .seq_show = btf_array_seq_show, 1543 }; 1544 1545 static int btf_struct_check_member(struct btf_verifier_env *env, 1546 const struct btf_type *struct_type, 1547 const struct btf_member *member, 1548 const struct btf_type *member_type) 1549 { 1550 u32 struct_bits_off = member->offset; 1551 u32 struct_size, bytes_offset; 1552 1553 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 1554 btf_verifier_log_member(env, struct_type, member, 1555 "Member is not byte aligned"); 1556 return -EINVAL; 1557 } 1558 1559 struct_size = struct_type->size; 1560 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 1561 if (struct_size - bytes_offset < member_type->size) { 1562 btf_verifier_log_member(env, struct_type, member, 1563 "Member exceeds struct_size"); 1564 return -EINVAL; 1565 } 1566 1567 return 0; 1568 } 1569 1570 static s32 btf_struct_check_meta(struct btf_verifier_env *env, 1571 const struct btf_type *t, 1572 u32 meta_left) 1573 { 1574 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION; 1575 const struct btf_member *member; 1576 u32 meta_needed, last_offset; 1577 struct btf *btf = env->btf; 1578 u32 struct_size = t->size; 1579 u16 i; 1580 1581 meta_needed = btf_type_vlen(t) * sizeof(*member); 1582 if (meta_left < meta_needed) { 1583 btf_verifier_log_basic(env, t, 1584 "meta_left:%u meta_needed:%u", 1585 meta_left, meta_needed); 1586 return -EINVAL; 1587 } 1588 1589 /* struct type either no name or a valid one */ 1590 if (t->name_off && 1591 !btf_name_valid_identifier(env->btf, t->name_off)) { 1592 btf_verifier_log_type(env, t, "Invalid name"); 1593 return -EINVAL; 1594 } 1595 1596 btf_verifier_log_type(env, t, NULL); 1597 1598 last_offset = 0; 1599 for_each_member(i, t, member) { 1600 if (!btf_name_offset_valid(btf, member->name_off)) { 1601 btf_verifier_log_member(env, t, member, 1602 "Invalid member name_offset:%u", 1603 member->name_off); 1604 return -EINVAL; 1605 } 1606 1607 /* struct member either no name or a valid one */ 1608 if (member->name_off && 1609 !btf_name_valid_identifier(btf, member->name_off)) { 1610 btf_verifier_log_member(env, t, member, "Invalid name"); 1611 return -EINVAL; 1612 } 1613 /* A member cannot be in type void */ 1614 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) { 1615 btf_verifier_log_member(env, t, member, 1616 "Invalid type_id"); 1617 return -EINVAL; 1618 } 1619 1620 if (is_union && member->offset) { 1621 btf_verifier_log_member(env, t, member, 1622 "Invalid member bits_offset"); 1623 return -EINVAL; 1624 } 1625 1626 /* 1627 * ">" instead of ">=" because the last member could be 1628 * "char a[0];" 1629 */ 1630 if (last_offset > member->offset) { 1631 btf_verifier_log_member(env, t, member, 1632 "Invalid member bits_offset"); 1633 return -EINVAL; 1634 } 1635 1636 if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) { 1637 btf_verifier_log_member(env, t, member, 1638 "Memmber bits_offset exceeds its struct size"); 1639 return -EINVAL; 1640 } 1641 1642 btf_verifier_log_member(env, t, member, NULL); 1643 last_offset = member->offset; 1644 } 1645 1646 return meta_needed; 1647 } 1648 1649 static int btf_struct_resolve(struct btf_verifier_env *env, 1650 const struct resolve_vertex *v) 1651 { 1652 const struct btf_member *member; 1653 int err; 1654 u16 i; 1655 1656 /* Before continue resolving the next_member, 1657 * ensure the last member is indeed resolved to a 1658 * type with size info. 1659 */ 1660 if (v->next_member) { 1661 const struct btf_type *last_member_type; 1662 const struct btf_member *last_member; 1663 u16 last_member_type_id; 1664 1665 last_member = btf_type_member(v->t) + v->next_member - 1; 1666 last_member_type_id = last_member->type; 1667 if (WARN_ON_ONCE(!env_type_is_resolved(env, 1668 last_member_type_id))) 1669 return -EINVAL; 1670 1671 last_member_type = btf_type_by_id(env->btf, 1672 last_member_type_id); 1673 err = btf_type_ops(last_member_type)->check_member(env, v->t, 1674 last_member, 1675 last_member_type); 1676 if (err) 1677 return err; 1678 } 1679 1680 for_each_member_from(i, v->next_member, v->t, member) { 1681 u32 member_type_id = member->type; 1682 const struct btf_type *member_type = btf_type_by_id(env->btf, 1683 member_type_id); 1684 1685 if (btf_type_is_void_or_null(member_type)) { 1686 btf_verifier_log_member(env, v->t, member, 1687 "Invalid member"); 1688 return -EINVAL; 1689 } 1690 1691 if (!env_type_is_resolve_sink(env, member_type) && 1692 !env_type_is_resolved(env, member_type_id)) { 1693 env_stack_set_next_member(env, i + 1); 1694 return env_stack_push(env, member_type, member_type_id); 1695 } 1696 1697 err = btf_type_ops(member_type)->check_member(env, v->t, 1698 member, 1699 member_type); 1700 if (err) 1701 return err; 1702 } 1703 1704 env_stack_pop_resolved(env, 0, 0); 1705 1706 return 0; 1707 } 1708 1709 static void btf_struct_log(struct btf_verifier_env *env, 1710 const struct btf_type *t) 1711 { 1712 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); 1713 } 1714 1715 static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t, 1716 u32 type_id, void *data, u8 bits_offset, 1717 struct seq_file *m) 1718 { 1719 const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ","; 1720 const struct btf_member *member; 1721 u32 i; 1722 1723 seq_puts(m, "{"); 1724 for_each_member(i, t, member) { 1725 const struct btf_type *member_type = btf_type_by_id(btf, 1726 member->type); 1727 u32 member_offset = member->offset; 1728 u32 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); 1729 u8 bits8_offset = BITS_PER_BYTE_MASKED(member_offset); 1730 const struct btf_kind_operations *ops; 1731 1732 if (i) 1733 seq_puts(m, seq); 1734 1735 ops = btf_type_ops(member_type); 1736 ops->seq_show(btf, member_type, member->type, 1737 data + bytes_offset, bits8_offset, m); 1738 } 1739 seq_puts(m, "}"); 1740 } 1741 1742 static struct btf_kind_operations struct_ops = { 1743 .check_meta = btf_struct_check_meta, 1744 .resolve = btf_struct_resolve, 1745 .check_member = btf_struct_check_member, 1746 .log_details = btf_struct_log, 1747 .seq_show = btf_struct_seq_show, 1748 }; 1749 1750 static int btf_enum_check_member(struct btf_verifier_env *env, 1751 const struct btf_type *struct_type, 1752 const struct btf_member *member, 1753 const struct btf_type *member_type) 1754 { 1755 u32 struct_bits_off = member->offset; 1756 u32 struct_size, bytes_offset; 1757 1758 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 1759 btf_verifier_log_member(env, struct_type, member, 1760 "Member is not byte aligned"); 1761 return -EINVAL; 1762 } 1763 1764 struct_size = struct_type->size; 1765 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 1766 if (struct_size - bytes_offset < sizeof(int)) { 1767 btf_verifier_log_member(env, struct_type, member, 1768 "Member exceeds struct_size"); 1769 return -EINVAL; 1770 } 1771 1772 return 0; 1773 } 1774 1775 static s32 btf_enum_check_meta(struct btf_verifier_env *env, 1776 const struct btf_type *t, 1777 u32 meta_left) 1778 { 1779 const struct btf_enum *enums = btf_type_enum(t); 1780 struct btf *btf = env->btf; 1781 u16 i, nr_enums; 1782 u32 meta_needed; 1783 1784 nr_enums = btf_type_vlen(t); 1785 meta_needed = nr_enums * sizeof(*enums); 1786 1787 if (meta_left < meta_needed) { 1788 btf_verifier_log_basic(env, t, 1789 "meta_left:%u meta_needed:%u", 1790 meta_left, meta_needed); 1791 return -EINVAL; 1792 } 1793 1794 if (t->size != sizeof(int)) { 1795 btf_verifier_log_type(env, t, "Expected size:%zu", 1796 sizeof(int)); 1797 return -EINVAL; 1798 } 1799 1800 /* enum type either no name or a valid one */ 1801 if (t->name_off && 1802 !btf_name_valid_identifier(env->btf, t->name_off)) { 1803 btf_verifier_log_type(env, t, "Invalid name"); 1804 return -EINVAL; 1805 } 1806 1807 btf_verifier_log_type(env, t, NULL); 1808 1809 for (i = 0; i < nr_enums; i++) { 1810 if (!btf_name_offset_valid(btf, enums[i].name_off)) { 1811 btf_verifier_log(env, "\tInvalid name_offset:%u", 1812 enums[i].name_off); 1813 return -EINVAL; 1814 } 1815 1816 /* enum member must have a valid name */ 1817 if (!enums[i].name_off || 1818 !btf_name_valid_identifier(btf, enums[i].name_off)) { 1819 btf_verifier_log_type(env, t, "Invalid name"); 1820 return -EINVAL; 1821 } 1822 1823 1824 btf_verifier_log(env, "\t%s val=%d\n", 1825 btf_name_by_offset(btf, enums[i].name_off), 1826 enums[i].val); 1827 } 1828 1829 return meta_needed; 1830 } 1831 1832 static void btf_enum_log(struct btf_verifier_env *env, 1833 const struct btf_type *t) 1834 { 1835 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); 1836 } 1837 1838 static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t, 1839 u32 type_id, void *data, u8 bits_offset, 1840 struct seq_file *m) 1841 { 1842 const struct btf_enum *enums = btf_type_enum(t); 1843 u32 i, nr_enums = btf_type_vlen(t); 1844 int v = *(int *)data; 1845 1846 for (i = 0; i < nr_enums; i++) { 1847 if (v == enums[i].val) { 1848 seq_printf(m, "%s", 1849 btf_name_by_offset(btf, enums[i].name_off)); 1850 return; 1851 } 1852 } 1853 1854 seq_printf(m, "%d", v); 1855 } 1856 1857 static struct btf_kind_operations enum_ops = { 1858 .check_meta = btf_enum_check_meta, 1859 .resolve = btf_df_resolve, 1860 .check_member = btf_enum_check_member, 1861 .log_details = btf_enum_log, 1862 .seq_show = btf_enum_seq_show, 1863 }; 1864 1865 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = { 1866 [BTF_KIND_INT] = &int_ops, 1867 [BTF_KIND_PTR] = &ptr_ops, 1868 [BTF_KIND_ARRAY] = &array_ops, 1869 [BTF_KIND_STRUCT] = &struct_ops, 1870 [BTF_KIND_UNION] = &struct_ops, 1871 [BTF_KIND_ENUM] = &enum_ops, 1872 [BTF_KIND_FWD] = &fwd_ops, 1873 [BTF_KIND_TYPEDEF] = &modifier_ops, 1874 [BTF_KIND_VOLATILE] = &modifier_ops, 1875 [BTF_KIND_CONST] = &modifier_ops, 1876 [BTF_KIND_RESTRICT] = &modifier_ops, 1877 }; 1878 1879 static s32 btf_check_meta(struct btf_verifier_env *env, 1880 const struct btf_type *t, 1881 u32 meta_left) 1882 { 1883 u32 saved_meta_left = meta_left; 1884 s32 var_meta_size; 1885 1886 if (meta_left < sizeof(*t)) { 1887 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu", 1888 env->log_type_id, meta_left, sizeof(*t)); 1889 return -EINVAL; 1890 } 1891 meta_left -= sizeof(*t); 1892 1893 if (t->info & ~BTF_INFO_MASK) { 1894 btf_verifier_log(env, "[%u] Invalid btf_info:%x", 1895 env->log_type_id, t->info); 1896 return -EINVAL; 1897 } 1898 1899 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX || 1900 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) { 1901 btf_verifier_log(env, "[%u] Invalid kind:%u", 1902 env->log_type_id, BTF_INFO_KIND(t->info)); 1903 return -EINVAL; 1904 } 1905 1906 if (!btf_name_offset_valid(env->btf, t->name_off)) { 1907 btf_verifier_log(env, "[%u] Invalid name_offset:%u", 1908 env->log_type_id, t->name_off); 1909 return -EINVAL; 1910 } 1911 1912 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left); 1913 if (var_meta_size < 0) 1914 return var_meta_size; 1915 1916 meta_left -= var_meta_size; 1917 1918 return saved_meta_left - meta_left; 1919 } 1920 1921 static int btf_check_all_metas(struct btf_verifier_env *env) 1922 { 1923 struct btf *btf = env->btf; 1924 struct btf_header *hdr; 1925 void *cur, *end; 1926 1927 hdr = &btf->hdr; 1928 cur = btf->nohdr_data + hdr->type_off; 1929 end = cur + hdr->type_len; 1930 1931 env->log_type_id = 1; 1932 while (cur < end) { 1933 struct btf_type *t = cur; 1934 s32 meta_size; 1935 1936 meta_size = btf_check_meta(env, t, end - cur); 1937 if (meta_size < 0) 1938 return meta_size; 1939 1940 btf_add_type(env, t); 1941 cur += meta_size; 1942 env->log_type_id++; 1943 } 1944 1945 return 0; 1946 } 1947 1948 static int btf_resolve(struct btf_verifier_env *env, 1949 const struct btf_type *t, u32 type_id) 1950 { 1951 const struct resolve_vertex *v; 1952 int err = 0; 1953 1954 env->resolve_mode = RESOLVE_TBD; 1955 env_stack_push(env, t, type_id); 1956 while (!err && (v = env_stack_peak(env))) { 1957 env->log_type_id = v->type_id; 1958 err = btf_type_ops(v->t)->resolve(env, v); 1959 } 1960 1961 env->log_type_id = type_id; 1962 if (err == -E2BIG) 1963 btf_verifier_log_type(env, t, 1964 "Exceeded max resolving depth:%u", 1965 MAX_RESOLVE_DEPTH); 1966 else if (err == -EEXIST) 1967 btf_verifier_log_type(env, t, "Loop detected"); 1968 1969 return err; 1970 } 1971 1972 static bool btf_resolve_valid(struct btf_verifier_env *env, 1973 const struct btf_type *t, 1974 u32 type_id) 1975 { 1976 struct btf *btf = env->btf; 1977 1978 if (!env_type_is_resolved(env, type_id)) 1979 return false; 1980 1981 if (btf_type_is_struct(t)) 1982 return !btf->resolved_ids[type_id] && 1983 !btf->resolved_sizes[type_id]; 1984 1985 if (btf_type_is_modifier(t) || btf_type_is_ptr(t)) { 1986 t = btf_type_id_resolve(btf, &type_id); 1987 return t && !btf_type_is_modifier(t); 1988 } 1989 1990 if (btf_type_is_array(t)) { 1991 const struct btf_array *array = btf_type_array(t); 1992 const struct btf_type *elem_type; 1993 u32 elem_type_id = array->type; 1994 u32 elem_size; 1995 1996 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); 1997 return elem_type && !btf_type_is_modifier(elem_type) && 1998 (array->nelems * elem_size == 1999 btf->resolved_sizes[type_id]); 2000 } 2001 2002 return false; 2003 } 2004 2005 static int btf_check_all_types(struct btf_verifier_env *env) 2006 { 2007 struct btf *btf = env->btf; 2008 u32 type_id; 2009 int err; 2010 2011 err = env_resolve_init(env); 2012 if (err) 2013 return err; 2014 2015 env->phase++; 2016 for (type_id = 1; type_id <= btf->nr_types; type_id++) { 2017 const struct btf_type *t = btf_type_by_id(btf, type_id); 2018 2019 env->log_type_id = type_id; 2020 if (btf_type_needs_resolve(t) && 2021 !env_type_is_resolved(env, type_id)) { 2022 err = btf_resolve(env, t, type_id); 2023 if (err) 2024 return err; 2025 } 2026 2027 if (btf_type_needs_resolve(t) && 2028 !btf_resolve_valid(env, t, type_id)) { 2029 btf_verifier_log_type(env, t, "Invalid resolve state"); 2030 return -EINVAL; 2031 } 2032 } 2033 2034 return 0; 2035 } 2036 2037 static int btf_parse_type_sec(struct btf_verifier_env *env) 2038 { 2039 const struct btf_header *hdr = &env->btf->hdr; 2040 int err; 2041 2042 /* Type section must align to 4 bytes */ 2043 if (hdr->type_off & (sizeof(u32) - 1)) { 2044 btf_verifier_log(env, "Unaligned type_off"); 2045 return -EINVAL; 2046 } 2047 2048 if (!hdr->type_len) { 2049 btf_verifier_log(env, "No type found"); 2050 return -EINVAL; 2051 } 2052 2053 err = btf_check_all_metas(env); 2054 if (err) 2055 return err; 2056 2057 return btf_check_all_types(env); 2058 } 2059 2060 static int btf_parse_str_sec(struct btf_verifier_env *env) 2061 { 2062 const struct btf_header *hdr; 2063 struct btf *btf = env->btf; 2064 const char *start, *end; 2065 2066 hdr = &btf->hdr; 2067 start = btf->nohdr_data + hdr->str_off; 2068 end = start + hdr->str_len; 2069 2070 if (end != btf->data + btf->data_size) { 2071 btf_verifier_log(env, "String section is not at the end"); 2072 return -EINVAL; 2073 } 2074 2075 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || 2076 start[0] || end[-1]) { 2077 btf_verifier_log(env, "Invalid string section"); 2078 return -EINVAL; 2079 } 2080 2081 btf->strings = start; 2082 2083 return 0; 2084 } 2085 2086 static const size_t btf_sec_info_offset[] = { 2087 offsetof(struct btf_header, type_off), 2088 offsetof(struct btf_header, str_off), 2089 }; 2090 2091 static int btf_sec_info_cmp(const void *a, const void *b) 2092 { 2093 const struct btf_sec_info *x = a; 2094 const struct btf_sec_info *y = b; 2095 2096 return (int)(x->off - y->off) ? : (int)(x->len - y->len); 2097 } 2098 2099 static int btf_check_sec_info(struct btf_verifier_env *env, 2100 u32 btf_data_size) 2101 { 2102 struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)]; 2103 u32 total, expected_total, i; 2104 const struct btf_header *hdr; 2105 const struct btf *btf; 2106 2107 btf = env->btf; 2108 hdr = &btf->hdr; 2109 2110 /* Populate the secs from hdr */ 2111 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) 2112 secs[i] = *(struct btf_sec_info *)((void *)hdr + 2113 btf_sec_info_offset[i]); 2114 2115 sort(secs, ARRAY_SIZE(btf_sec_info_offset), 2116 sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL); 2117 2118 /* Check for gaps and overlap among sections */ 2119 total = 0; 2120 expected_total = btf_data_size - hdr->hdr_len; 2121 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) { 2122 if (expected_total < secs[i].off) { 2123 btf_verifier_log(env, "Invalid section offset"); 2124 return -EINVAL; 2125 } 2126 if (total < secs[i].off) { 2127 /* gap */ 2128 btf_verifier_log(env, "Unsupported section found"); 2129 return -EINVAL; 2130 } 2131 if (total > secs[i].off) { 2132 btf_verifier_log(env, "Section overlap found"); 2133 return -EINVAL; 2134 } 2135 if (expected_total - total < secs[i].len) { 2136 btf_verifier_log(env, 2137 "Total section length too long"); 2138 return -EINVAL; 2139 } 2140 total += secs[i].len; 2141 } 2142 2143 /* There is data other than hdr and known sections */ 2144 if (expected_total != total) { 2145 btf_verifier_log(env, "Unsupported section found"); 2146 return -EINVAL; 2147 } 2148 2149 return 0; 2150 } 2151 2152 static int btf_parse_hdr(struct btf_verifier_env *env) 2153 { 2154 u32 hdr_len, hdr_copy, btf_data_size; 2155 const struct btf_header *hdr; 2156 struct btf *btf; 2157 int err; 2158 2159 btf = env->btf; 2160 btf_data_size = btf->data_size; 2161 2162 if (btf_data_size < 2163 offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) { 2164 btf_verifier_log(env, "hdr_len not found"); 2165 return -EINVAL; 2166 } 2167 2168 hdr = btf->data; 2169 hdr_len = hdr->hdr_len; 2170 if (btf_data_size < hdr_len) { 2171 btf_verifier_log(env, "btf_header not found"); 2172 return -EINVAL; 2173 } 2174 2175 /* Ensure the unsupported header fields are zero */ 2176 if (hdr_len > sizeof(btf->hdr)) { 2177 u8 *expected_zero = btf->data + sizeof(btf->hdr); 2178 u8 *end = btf->data + hdr_len; 2179 2180 for (; expected_zero < end; expected_zero++) { 2181 if (*expected_zero) { 2182 btf_verifier_log(env, "Unsupported btf_header"); 2183 return -E2BIG; 2184 } 2185 } 2186 } 2187 2188 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr)); 2189 memcpy(&btf->hdr, btf->data, hdr_copy); 2190 2191 hdr = &btf->hdr; 2192 2193 btf_verifier_log_hdr(env, btf_data_size); 2194 2195 if (hdr->magic != BTF_MAGIC) { 2196 btf_verifier_log(env, "Invalid magic"); 2197 return -EINVAL; 2198 } 2199 2200 if (hdr->version != BTF_VERSION) { 2201 btf_verifier_log(env, "Unsupported version"); 2202 return -ENOTSUPP; 2203 } 2204 2205 if (hdr->flags) { 2206 btf_verifier_log(env, "Unsupported flags"); 2207 return -ENOTSUPP; 2208 } 2209 2210 if (btf_data_size == hdr->hdr_len) { 2211 btf_verifier_log(env, "No data"); 2212 return -EINVAL; 2213 } 2214 2215 err = btf_check_sec_info(env, btf_data_size); 2216 if (err) 2217 return err; 2218 2219 return 0; 2220 } 2221 2222 static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size, 2223 u32 log_level, char __user *log_ubuf, u32 log_size) 2224 { 2225 struct btf_verifier_env *env = NULL; 2226 struct bpf_verifier_log *log; 2227 struct btf *btf = NULL; 2228 u8 *data; 2229 int err; 2230 2231 if (btf_data_size > BTF_MAX_SIZE) 2232 return ERR_PTR(-E2BIG); 2233 2234 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); 2235 if (!env) 2236 return ERR_PTR(-ENOMEM); 2237 2238 log = &env->log; 2239 if (log_level || log_ubuf || log_size) { 2240 /* user requested verbose verifier output 2241 * and supplied buffer to store the verification trace 2242 */ 2243 log->level = log_level; 2244 log->ubuf = log_ubuf; 2245 log->len_total = log_size; 2246 2247 /* log attributes have to be sane */ 2248 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || 2249 !log->level || !log->ubuf) { 2250 err = -EINVAL; 2251 goto errout; 2252 } 2253 } 2254 2255 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); 2256 if (!btf) { 2257 err = -ENOMEM; 2258 goto errout; 2259 } 2260 env->btf = btf; 2261 2262 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN); 2263 if (!data) { 2264 err = -ENOMEM; 2265 goto errout; 2266 } 2267 2268 btf->data = data; 2269 btf->data_size = btf_data_size; 2270 2271 if (copy_from_user(data, btf_data, btf_data_size)) { 2272 err = -EFAULT; 2273 goto errout; 2274 } 2275 2276 err = btf_parse_hdr(env); 2277 if (err) 2278 goto errout; 2279 2280 btf->nohdr_data = btf->data + btf->hdr.hdr_len; 2281 2282 err = btf_parse_str_sec(env); 2283 if (err) 2284 goto errout; 2285 2286 err = btf_parse_type_sec(env); 2287 if (err) 2288 goto errout; 2289 2290 if (log->level && bpf_verifier_log_full(log)) { 2291 err = -ENOSPC; 2292 goto errout; 2293 } 2294 2295 btf_verifier_env_free(env); 2296 refcount_set(&btf->refcnt, 1); 2297 return btf; 2298 2299 errout: 2300 btf_verifier_env_free(env); 2301 if (btf) 2302 btf_free(btf); 2303 return ERR_PTR(err); 2304 } 2305 2306 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, 2307 struct seq_file *m) 2308 { 2309 const struct btf_type *t = btf_type_by_id(btf, type_id); 2310 2311 btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m); 2312 } 2313 2314 static int btf_release(struct inode *inode, struct file *filp) 2315 { 2316 btf_put(filp->private_data); 2317 return 0; 2318 } 2319 2320 const struct file_operations btf_fops = { 2321 .release = btf_release, 2322 }; 2323 2324 static int __btf_new_fd(struct btf *btf) 2325 { 2326 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC); 2327 } 2328 2329 int btf_new_fd(const union bpf_attr *attr) 2330 { 2331 struct btf *btf; 2332 int ret; 2333 2334 btf = btf_parse(u64_to_user_ptr(attr->btf), 2335 attr->btf_size, attr->btf_log_level, 2336 u64_to_user_ptr(attr->btf_log_buf), 2337 attr->btf_log_size); 2338 if (IS_ERR(btf)) 2339 return PTR_ERR(btf); 2340 2341 ret = btf_alloc_id(btf); 2342 if (ret) { 2343 btf_free(btf); 2344 return ret; 2345 } 2346 2347 /* 2348 * The BTF ID is published to the userspace. 2349 * All BTF free must go through call_rcu() from 2350 * now on (i.e. free by calling btf_put()). 2351 */ 2352 2353 ret = __btf_new_fd(btf); 2354 if (ret < 0) 2355 btf_put(btf); 2356 2357 return ret; 2358 } 2359 2360 struct btf *btf_get_by_fd(int fd) 2361 { 2362 struct btf *btf; 2363 struct fd f; 2364 2365 f = fdget(fd); 2366 2367 if (!f.file) 2368 return ERR_PTR(-EBADF); 2369 2370 if (f.file->f_op != &btf_fops) { 2371 fdput(f); 2372 return ERR_PTR(-EINVAL); 2373 } 2374 2375 btf = f.file->private_data; 2376 refcount_inc(&btf->refcnt); 2377 fdput(f); 2378 2379 return btf; 2380 } 2381 2382 int btf_get_info_by_fd(const struct btf *btf, 2383 const union bpf_attr *attr, 2384 union bpf_attr __user *uattr) 2385 { 2386 struct bpf_btf_info __user *uinfo; 2387 struct bpf_btf_info info = {}; 2388 u32 info_copy, btf_copy; 2389 void __user *ubtf; 2390 u32 uinfo_len; 2391 2392 uinfo = u64_to_user_ptr(attr->info.info); 2393 uinfo_len = attr->info.info_len; 2394 2395 info_copy = min_t(u32, uinfo_len, sizeof(info)); 2396 if (copy_from_user(&info, uinfo, info_copy)) 2397 return -EFAULT; 2398 2399 info.id = btf->id; 2400 ubtf = u64_to_user_ptr(info.btf); 2401 btf_copy = min_t(u32, btf->data_size, info.btf_size); 2402 if (copy_to_user(ubtf, btf->data, btf_copy)) 2403 return -EFAULT; 2404 info.btf_size = btf->data_size; 2405 2406 if (copy_to_user(uinfo, &info, info_copy) || 2407 put_user(info_copy, &uattr->info.info_len)) 2408 return -EFAULT; 2409 2410 return 0; 2411 } 2412 2413 int btf_get_fd_by_id(u32 id) 2414 { 2415 struct btf *btf; 2416 int fd; 2417 2418 rcu_read_lock(); 2419 btf = idr_find(&btf_idr, id); 2420 if (!btf || !refcount_inc_not_zero(&btf->refcnt)) 2421 btf = ERR_PTR(-ENOENT); 2422 rcu_read_unlock(); 2423 2424 if (IS_ERR(btf)) 2425 return PTR_ERR(btf); 2426 2427 fd = __btf_new_fd(btf); 2428 if (fd < 0) 2429 btf_put(btf); 2430 2431 return fd; 2432 } 2433 2434 u32 btf_id(const struct btf *btf) 2435 { 2436 return btf->id; 2437 } 2438