1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2018 Facebook */ 3 4 #include <uapi/linux/btf.h> 5 #include <uapi/linux/types.h> 6 #include <linux/seq_file.h> 7 #include <linux/compiler.h> 8 #include <linux/ctype.h> 9 #include <linux/errno.h> 10 #include <linux/slab.h> 11 #include <linux/anon_inodes.h> 12 #include <linux/file.h> 13 #include <linux/uaccess.h> 14 #include <linux/kernel.h> 15 #include <linux/idr.h> 16 #include <linux/sort.h> 17 #include <linux/bpf_verifier.h> 18 #include <linux/btf.h> 19 20 /* BTF (BPF Type Format) is the meta data format which describes 21 * the data types of BPF program/map. Hence, it basically focus 22 * on the C programming language which the modern BPF is primary 23 * using. 24 * 25 * ELF Section: 26 * ~~~~~~~~~~~ 27 * The BTF data is stored under the ".BTF" ELF section 28 * 29 * struct btf_type: 30 * ~~~~~~~~~~~~~~~ 31 * Each 'struct btf_type' object describes a C data type. 32 * Depending on the type it is describing, a 'struct btf_type' 33 * object may be followed by more data. F.e. 34 * To describe an array, 'struct btf_type' is followed by 35 * 'struct btf_array'. 36 * 37 * 'struct btf_type' and any extra data following it are 38 * 4 bytes aligned. 39 * 40 * Type section: 41 * ~~~~~~~~~~~~~ 42 * The BTF type section contains a list of 'struct btf_type' objects. 43 * Each one describes a C type. Recall from the above section 44 * that a 'struct btf_type' object could be immediately followed by extra 45 * data in order to desribe some particular C types. 46 * 47 * type_id: 48 * ~~~~~~~ 49 * Each btf_type object is identified by a type_id. The type_id 50 * is implicitly implied by the location of the btf_type object in 51 * the BTF type section. The first one has type_id 1. The second 52 * one has type_id 2...etc. Hence, an earlier btf_type has 53 * a smaller type_id. 54 * 55 * A btf_type object may refer to another btf_type object by using 56 * type_id (i.e. the "type" in the "struct btf_type"). 57 * 58 * NOTE that we cannot assume any reference-order. 59 * A btf_type object can refer to an earlier btf_type object 60 * but it can also refer to a later btf_type object. 61 * 62 * For example, to describe "const void *". A btf_type 63 * object describing "const" may refer to another btf_type 64 * object describing "void *". This type-reference is done 65 * by specifying type_id: 66 * 67 * [1] CONST (anon) type_id=2 68 * [2] PTR (anon) type_id=0 69 * 70 * The above is the btf_verifier debug log: 71 * - Each line started with "[?]" is a btf_type object 72 * - [?] is the type_id of the btf_type object. 73 * - CONST/PTR is the BTF_KIND_XXX 74 * - "(anon)" is the name of the type. It just 75 * happens that CONST and PTR has no name. 76 * - type_id=XXX is the 'u32 type' in btf_type 77 * 78 * NOTE: "void" has type_id 0 79 * 80 * String section: 81 * ~~~~~~~~~~~~~~ 82 * The BTF string section contains the names used by the type section. 83 * Each string is referred by an "offset" from the beginning of the 84 * string section. 85 * 86 * Each string is '\0' terminated. 87 * 88 * The first character in the string section must be '\0' 89 * which is used to mean 'anonymous'. Some btf_type may not 90 * have a name. 91 */ 92 93 /* BTF verification: 94 * 95 * To verify BTF data, two passes are needed. 96 * 97 * Pass #1 98 * ~~~~~~~ 99 * The first pass is to collect all btf_type objects to 100 * an array: "btf->types". 101 * 102 * Depending on the C type that a btf_type is describing, 103 * a btf_type may be followed by extra data. We don't know 104 * how many btf_type is there, and more importantly we don't 105 * know where each btf_type is located in the type section. 106 * 107 * Without knowing the location of each type_id, most verifications 108 * cannot be done. e.g. an earlier btf_type may refer to a later 109 * btf_type (recall the "const void *" above), so we cannot 110 * check this type-reference in the first pass. 111 * 112 * In the first pass, it still does some verifications (e.g. 113 * checking the name is a valid offset to the string section). 114 * 115 * Pass #2 116 * ~~~~~~~ 117 * The main focus is to resolve a btf_type that is referring 118 * to another type. 119 * 120 * We have to ensure the referring type: 121 * 1) does exist in the BTF (i.e. in btf->types[]) 122 * 2) does not cause a loop: 123 * struct A { 124 * struct B b; 125 * }; 126 * 127 * struct B { 128 * struct A a; 129 * }; 130 * 131 * btf_type_needs_resolve() decides if a btf_type needs 132 * to be resolved. 133 * 134 * The needs_resolve type implements the "resolve()" ops which 135 * essentially does a DFS and detects backedge. 136 * 137 * During resolve (or DFS), different C types have different 138 * "RESOLVED" conditions. 139 * 140 * When resolving a BTF_KIND_STRUCT, we need to resolve all its 141 * members because a member is always referring to another 142 * type. A struct's member can be treated as "RESOLVED" if 143 * it is referring to a BTF_KIND_PTR. Otherwise, the 144 * following valid C struct would be rejected: 145 * 146 * struct A { 147 * int m; 148 * struct A *a; 149 * }; 150 * 151 * When resolving a BTF_KIND_PTR, it needs to keep resolving if 152 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot 153 * detect a pointer loop, e.g.: 154 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR + 155 * ^ | 156 * +-----------------------------------------+ 157 * 158 */ 159 160 #define BITS_PER_U64 (sizeof(u64) * BITS_PER_BYTE) 161 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1) 162 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK) 163 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3) 164 #define BITS_ROUNDUP_BYTES(bits) \ 165 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits)) 166 167 #define BTF_INFO_MASK 0x8f00ffff 168 #define BTF_INT_MASK 0x0fffffff 169 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE) 170 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET) 171 172 /* 16MB for 64k structs and each has 16 members and 173 * a few MB spaces for the string section. 174 * The hard limit is S32_MAX. 175 */ 176 #define BTF_MAX_SIZE (16 * 1024 * 1024) 177 178 #define for_each_member(i, struct_type, member) \ 179 for (i = 0, member = btf_type_member(struct_type); \ 180 i < btf_type_vlen(struct_type); \ 181 i++, member++) 182 183 #define for_each_member_from(i, from, struct_type, member) \ 184 for (i = from, member = btf_type_member(struct_type) + from; \ 185 i < btf_type_vlen(struct_type); \ 186 i++, member++) 187 188 static DEFINE_IDR(btf_idr); 189 static DEFINE_SPINLOCK(btf_idr_lock); 190 191 struct btf { 192 void *data; 193 struct btf_type **types; 194 u32 *resolved_ids; 195 u32 *resolved_sizes; 196 const char *strings; 197 void *nohdr_data; 198 struct btf_header hdr; 199 u32 nr_types; 200 u32 types_size; 201 u32 data_size; 202 refcount_t refcnt; 203 u32 id; 204 struct rcu_head rcu; 205 }; 206 207 enum verifier_phase { 208 CHECK_META, 209 CHECK_TYPE, 210 }; 211 212 struct resolve_vertex { 213 const struct btf_type *t; 214 u32 type_id; 215 u16 next_member; 216 }; 217 218 enum visit_state { 219 NOT_VISITED, 220 VISITED, 221 RESOLVED, 222 }; 223 224 enum resolve_mode { 225 RESOLVE_TBD, /* To Be Determined */ 226 RESOLVE_PTR, /* Resolving for Pointer */ 227 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union 228 * or array 229 */ 230 }; 231 232 #define MAX_RESOLVE_DEPTH 32 233 234 struct btf_sec_info { 235 u32 off; 236 u32 len; 237 }; 238 239 struct btf_verifier_env { 240 struct btf *btf; 241 u8 *visit_states; 242 struct resolve_vertex stack[MAX_RESOLVE_DEPTH]; 243 struct bpf_verifier_log log; 244 u32 log_type_id; 245 u32 top_stack; 246 enum verifier_phase phase; 247 enum resolve_mode resolve_mode; 248 }; 249 250 static const char * const btf_kind_str[NR_BTF_KINDS] = { 251 [BTF_KIND_UNKN] = "UNKNOWN", 252 [BTF_KIND_INT] = "INT", 253 [BTF_KIND_PTR] = "PTR", 254 [BTF_KIND_ARRAY] = "ARRAY", 255 [BTF_KIND_STRUCT] = "STRUCT", 256 [BTF_KIND_UNION] = "UNION", 257 [BTF_KIND_ENUM] = "ENUM", 258 [BTF_KIND_FWD] = "FWD", 259 [BTF_KIND_TYPEDEF] = "TYPEDEF", 260 [BTF_KIND_VOLATILE] = "VOLATILE", 261 [BTF_KIND_CONST] = "CONST", 262 [BTF_KIND_RESTRICT] = "RESTRICT", 263 [BTF_KIND_FUNC] = "FUNC", 264 [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO", 265 }; 266 267 struct btf_kind_operations { 268 s32 (*check_meta)(struct btf_verifier_env *env, 269 const struct btf_type *t, 270 u32 meta_left); 271 int (*resolve)(struct btf_verifier_env *env, 272 const struct resolve_vertex *v); 273 int (*check_member)(struct btf_verifier_env *env, 274 const struct btf_type *struct_type, 275 const struct btf_member *member, 276 const struct btf_type *member_type); 277 int (*check_kflag_member)(struct btf_verifier_env *env, 278 const struct btf_type *struct_type, 279 const struct btf_member *member, 280 const struct btf_type *member_type); 281 void (*log_details)(struct btf_verifier_env *env, 282 const struct btf_type *t); 283 void (*seq_show)(const struct btf *btf, const struct btf_type *t, 284 u32 type_id, void *data, u8 bits_offsets, 285 struct seq_file *m); 286 }; 287 288 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS]; 289 static struct btf_type btf_void; 290 291 static int btf_resolve(struct btf_verifier_env *env, 292 const struct btf_type *t, u32 type_id); 293 294 static bool btf_type_is_modifier(const struct btf_type *t) 295 { 296 /* Some of them is not strictly a C modifier 297 * but they are grouped into the same bucket 298 * for BTF concern: 299 * A type (t) that refers to another 300 * type through t->type AND its size cannot 301 * be determined without following the t->type. 302 * 303 * ptr does not fall into this bucket 304 * because its size is always sizeof(void *). 305 */ 306 switch (BTF_INFO_KIND(t->info)) { 307 case BTF_KIND_TYPEDEF: 308 case BTF_KIND_VOLATILE: 309 case BTF_KIND_CONST: 310 case BTF_KIND_RESTRICT: 311 return true; 312 } 313 314 return false; 315 } 316 317 static bool btf_type_is_void(const struct btf_type *t) 318 { 319 return t == &btf_void; 320 } 321 322 static bool btf_type_is_fwd(const struct btf_type *t) 323 { 324 return BTF_INFO_KIND(t->info) == BTF_KIND_FWD; 325 } 326 327 static bool btf_type_is_func(const struct btf_type *t) 328 { 329 return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC; 330 } 331 332 static bool btf_type_is_func_proto(const struct btf_type *t) 333 { 334 return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO; 335 } 336 337 static bool btf_type_nosize(const struct btf_type *t) 338 { 339 return btf_type_is_void(t) || btf_type_is_fwd(t) || 340 btf_type_is_func(t) || btf_type_is_func_proto(t); 341 } 342 343 static bool btf_type_nosize_or_null(const struct btf_type *t) 344 { 345 return !t || btf_type_nosize(t); 346 } 347 348 /* union is only a special case of struct: 349 * all its offsetof(member) == 0 350 */ 351 static bool btf_type_is_struct(const struct btf_type *t) 352 { 353 u8 kind = BTF_INFO_KIND(t->info); 354 355 return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION; 356 } 357 358 static bool btf_type_is_array(const struct btf_type *t) 359 { 360 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY; 361 } 362 363 static bool btf_type_is_ptr(const struct btf_type *t) 364 { 365 return BTF_INFO_KIND(t->info) == BTF_KIND_PTR; 366 } 367 368 static bool btf_type_is_int(const struct btf_type *t) 369 { 370 return BTF_INFO_KIND(t->info) == BTF_KIND_INT; 371 } 372 373 /* What types need to be resolved? 374 * 375 * btf_type_is_modifier() is an obvious one. 376 * 377 * btf_type_is_struct() because its member refers to 378 * another type (through member->type). 379 380 * btf_type_is_array() because its element (array->type) 381 * refers to another type. Array can be thought of a 382 * special case of struct while array just has the same 383 * member-type repeated by array->nelems of times. 384 */ 385 static bool btf_type_needs_resolve(const struct btf_type *t) 386 { 387 return btf_type_is_modifier(t) || 388 btf_type_is_ptr(t) || 389 btf_type_is_struct(t) || 390 btf_type_is_array(t); 391 } 392 393 /* t->size can be used */ 394 static bool btf_type_has_size(const struct btf_type *t) 395 { 396 switch (BTF_INFO_KIND(t->info)) { 397 case BTF_KIND_INT: 398 case BTF_KIND_STRUCT: 399 case BTF_KIND_UNION: 400 case BTF_KIND_ENUM: 401 return true; 402 } 403 404 return false; 405 } 406 407 static const char *btf_int_encoding_str(u8 encoding) 408 { 409 if (encoding == 0) 410 return "(none)"; 411 else if (encoding == BTF_INT_SIGNED) 412 return "SIGNED"; 413 else if (encoding == BTF_INT_CHAR) 414 return "CHAR"; 415 else if (encoding == BTF_INT_BOOL) 416 return "BOOL"; 417 else 418 return "UNKN"; 419 } 420 421 static u16 btf_type_vlen(const struct btf_type *t) 422 { 423 return BTF_INFO_VLEN(t->info); 424 } 425 426 static bool btf_type_kflag(const struct btf_type *t) 427 { 428 return BTF_INFO_KFLAG(t->info); 429 } 430 431 static u32 btf_member_bit_offset(const struct btf_type *struct_type, 432 const struct btf_member *member) 433 { 434 return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset) 435 : member->offset; 436 } 437 438 static u32 btf_member_bitfield_size(const struct btf_type *struct_type, 439 const struct btf_member *member) 440 { 441 return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset) 442 : 0; 443 } 444 445 static u32 btf_type_int(const struct btf_type *t) 446 { 447 return *(u32 *)(t + 1); 448 } 449 450 static const struct btf_array *btf_type_array(const struct btf_type *t) 451 { 452 return (const struct btf_array *)(t + 1); 453 } 454 455 static const struct btf_member *btf_type_member(const struct btf_type *t) 456 { 457 return (const struct btf_member *)(t + 1); 458 } 459 460 static const struct btf_enum *btf_type_enum(const struct btf_type *t) 461 { 462 return (const struct btf_enum *)(t + 1); 463 } 464 465 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) 466 { 467 return kind_ops[BTF_INFO_KIND(t->info)]; 468 } 469 470 static bool btf_name_offset_valid(const struct btf *btf, u32 offset) 471 { 472 return BTF_STR_OFFSET_VALID(offset) && 473 offset < btf->hdr.str_len; 474 } 475 476 /* Only C-style identifier is permitted. This can be relaxed if 477 * necessary. 478 */ 479 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset) 480 { 481 /* offset must be valid */ 482 const char *src = &btf->strings[offset]; 483 const char *src_limit; 484 485 if (!isalpha(*src) && *src != '_') 486 return false; 487 488 /* set a limit on identifier length */ 489 src_limit = src + KSYM_NAME_LEN; 490 src++; 491 while (*src && src < src_limit) { 492 if (!isalnum(*src) && *src != '_') 493 return false; 494 src++; 495 } 496 497 return !*src; 498 } 499 500 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset) 501 { 502 if (!offset) 503 return "(anon)"; 504 else if (offset < btf->hdr.str_len) 505 return &btf->strings[offset]; 506 else 507 return "(invalid-name-offset)"; 508 } 509 510 const char *btf_name_by_offset(const struct btf *btf, u32 offset) 511 { 512 if (offset < btf->hdr.str_len) 513 return &btf->strings[offset]; 514 515 return NULL; 516 } 517 518 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) 519 { 520 if (type_id > btf->nr_types) 521 return NULL; 522 523 return btf->types[type_id]; 524 } 525 526 /* 527 * Regular int is not a bit field and it must be either 528 * u8/u16/u32/u64. 529 */ 530 static bool btf_type_int_is_regular(const struct btf_type *t) 531 { 532 u8 nr_bits, nr_bytes; 533 u32 int_data; 534 535 int_data = btf_type_int(t); 536 nr_bits = BTF_INT_BITS(int_data); 537 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits); 538 if (BITS_PER_BYTE_MASKED(nr_bits) || 539 BTF_INT_OFFSET(int_data) || 540 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) && 541 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64))) { 542 return false; 543 } 544 545 return true; 546 } 547 548 /* 549 * Check that given struct member is a regular int with expected 550 * offset and size. 551 */ 552 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, 553 const struct btf_member *m, 554 u32 expected_offset, u32 expected_size) 555 { 556 const struct btf_type *t; 557 u32 id, int_data; 558 u8 nr_bits; 559 560 id = m->type; 561 t = btf_type_id_size(btf, &id, NULL); 562 if (!t || !btf_type_is_int(t)) 563 return false; 564 565 int_data = btf_type_int(t); 566 nr_bits = BTF_INT_BITS(int_data); 567 if (btf_type_kflag(s)) { 568 u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset); 569 u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset); 570 571 /* if kflag set, int should be a regular int and 572 * bit offset should be at byte boundary. 573 */ 574 return !bitfield_size && 575 BITS_ROUNDUP_BYTES(bit_offset) == expected_offset && 576 BITS_ROUNDUP_BYTES(nr_bits) == expected_size; 577 } 578 579 if (BTF_INT_OFFSET(int_data) || 580 BITS_PER_BYTE_MASKED(m->offset) || 581 BITS_ROUNDUP_BYTES(m->offset) != expected_offset || 582 BITS_PER_BYTE_MASKED(nr_bits) || 583 BITS_ROUNDUP_BYTES(nr_bits) != expected_size) 584 return false; 585 586 return true; 587 } 588 589 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log, 590 const char *fmt, ...) 591 { 592 va_list args; 593 594 va_start(args, fmt); 595 bpf_verifier_vlog(log, fmt, args); 596 va_end(args); 597 } 598 599 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env, 600 const char *fmt, ...) 601 { 602 struct bpf_verifier_log *log = &env->log; 603 va_list args; 604 605 if (!bpf_verifier_log_needed(log)) 606 return; 607 608 va_start(args, fmt); 609 bpf_verifier_vlog(log, fmt, args); 610 va_end(args); 611 } 612 613 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env, 614 const struct btf_type *t, 615 bool log_details, 616 const char *fmt, ...) 617 { 618 struct bpf_verifier_log *log = &env->log; 619 u8 kind = BTF_INFO_KIND(t->info); 620 struct btf *btf = env->btf; 621 va_list args; 622 623 if (!bpf_verifier_log_needed(log)) 624 return; 625 626 __btf_verifier_log(log, "[%u] %s %s%s", 627 env->log_type_id, 628 btf_kind_str[kind], 629 __btf_name_by_offset(btf, t->name_off), 630 log_details ? " " : ""); 631 632 if (log_details) 633 btf_type_ops(t)->log_details(env, t); 634 635 if (fmt && *fmt) { 636 __btf_verifier_log(log, " "); 637 va_start(args, fmt); 638 bpf_verifier_vlog(log, fmt, args); 639 va_end(args); 640 } 641 642 __btf_verifier_log(log, "\n"); 643 } 644 645 #define btf_verifier_log_type(env, t, ...) \ 646 __btf_verifier_log_type((env), (t), true, __VA_ARGS__) 647 #define btf_verifier_log_basic(env, t, ...) \ 648 __btf_verifier_log_type((env), (t), false, __VA_ARGS__) 649 650 __printf(4, 5) 651 static void btf_verifier_log_member(struct btf_verifier_env *env, 652 const struct btf_type *struct_type, 653 const struct btf_member *member, 654 const char *fmt, ...) 655 { 656 struct bpf_verifier_log *log = &env->log; 657 struct btf *btf = env->btf; 658 va_list args; 659 660 if (!bpf_verifier_log_needed(log)) 661 return; 662 663 /* The CHECK_META phase already did a btf dump. 664 * 665 * If member is logged again, it must hit an error in 666 * parsing this member. It is useful to print out which 667 * struct this member belongs to. 668 */ 669 if (env->phase != CHECK_META) 670 btf_verifier_log_type(env, struct_type, NULL); 671 672 if (btf_type_kflag(struct_type)) 673 __btf_verifier_log(log, 674 "\t%s type_id=%u bitfield_size=%u bits_offset=%u", 675 __btf_name_by_offset(btf, member->name_off), 676 member->type, 677 BTF_MEMBER_BITFIELD_SIZE(member->offset), 678 BTF_MEMBER_BIT_OFFSET(member->offset)); 679 else 680 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u", 681 __btf_name_by_offset(btf, member->name_off), 682 member->type, member->offset); 683 684 if (fmt && *fmt) { 685 __btf_verifier_log(log, " "); 686 va_start(args, fmt); 687 bpf_verifier_vlog(log, fmt, args); 688 va_end(args); 689 } 690 691 __btf_verifier_log(log, "\n"); 692 } 693 694 static void btf_verifier_log_hdr(struct btf_verifier_env *env, 695 u32 btf_data_size) 696 { 697 struct bpf_verifier_log *log = &env->log; 698 const struct btf *btf = env->btf; 699 const struct btf_header *hdr; 700 701 if (!bpf_verifier_log_needed(log)) 702 return; 703 704 hdr = &btf->hdr; 705 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic); 706 __btf_verifier_log(log, "version: %u\n", hdr->version); 707 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags); 708 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len); 709 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off); 710 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len); 711 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off); 712 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len); 713 __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size); 714 } 715 716 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t) 717 { 718 struct btf *btf = env->btf; 719 720 /* < 2 because +1 for btf_void which is always in btf->types[0]. 721 * btf_void is not accounted in btf->nr_types because btf_void 722 * does not come from the BTF file. 723 */ 724 if (btf->types_size - btf->nr_types < 2) { 725 /* Expand 'types' array */ 726 727 struct btf_type **new_types; 728 u32 expand_by, new_size; 729 730 if (btf->types_size == BTF_MAX_TYPE) { 731 btf_verifier_log(env, "Exceeded max num of types"); 732 return -E2BIG; 733 } 734 735 expand_by = max_t(u32, btf->types_size >> 2, 16); 736 new_size = min_t(u32, BTF_MAX_TYPE, 737 btf->types_size + expand_by); 738 739 new_types = kvcalloc(new_size, sizeof(*new_types), 740 GFP_KERNEL | __GFP_NOWARN); 741 if (!new_types) 742 return -ENOMEM; 743 744 if (btf->nr_types == 0) 745 new_types[0] = &btf_void; 746 else 747 memcpy(new_types, btf->types, 748 sizeof(*btf->types) * (btf->nr_types + 1)); 749 750 kvfree(btf->types); 751 btf->types = new_types; 752 btf->types_size = new_size; 753 } 754 755 btf->types[++(btf->nr_types)] = t; 756 757 return 0; 758 } 759 760 static int btf_alloc_id(struct btf *btf) 761 { 762 int id; 763 764 idr_preload(GFP_KERNEL); 765 spin_lock_bh(&btf_idr_lock); 766 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC); 767 if (id > 0) 768 btf->id = id; 769 spin_unlock_bh(&btf_idr_lock); 770 idr_preload_end(); 771 772 if (WARN_ON_ONCE(!id)) 773 return -ENOSPC; 774 775 return id > 0 ? 0 : id; 776 } 777 778 static void btf_free_id(struct btf *btf) 779 { 780 unsigned long flags; 781 782 /* 783 * In map-in-map, calling map_delete_elem() on outer 784 * map will call bpf_map_put on the inner map. 785 * It will then eventually call btf_free_id() 786 * on the inner map. Some of the map_delete_elem() 787 * implementation may have irq disabled, so 788 * we need to use the _irqsave() version instead 789 * of the _bh() version. 790 */ 791 spin_lock_irqsave(&btf_idr_lock, flags); 792 idr_remove(&btf_idr, btf->id); 793 spin_unlock_irqrestore(&btf_idr_lock, flags); 794 } 795 796 static void btf_free(struct btf *btf) 797 { 798 kvfree(btf->types); 799 kvfree(btf->resolved_sizes); 800 kvfree(btf->resolved_ids); 801 kvfree(btf->data); 802 kfree(btf); 803 } 804 805 static void btf_free_rcu(struct rcu_head *rcu) 806 { 807 struct btf *btf = container_of(rcu, struct btf, rcu); 808 809 btf_free(btf); 810 } 811 812 void btf_put(struct btf *btf) 813 { 814 if (btf && refcount_dec_and_test(&btf->refcnt)) { 815 btf_free_id(btf); 816 call_rcu(&btf->rcu, btf_free_rcu); 817 } 818 } 819 820 static int env_resolve_init(struct btf_verifier_env *env) 821 { 822 struct btf *btf = env->btf; 823 u32 nr_types = btf->nr_types; 824 u32 *resolved_sizes = NULL; 825 u32 *resolved_ids = NULL; 826 u8 *visit_states = NULL; 827 828 /* +1 for btf_void */ 829 resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes), 830 GFP_KERNEL | __GFP_NOWARN); 831 if (!resolved_sizes) 832 goto nomem; 833 834 resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids), 835 GFP_KERNEL | __GFP_NOWARN); 836 if (!resolved_ids) 837 goto nomem; 838 839 visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states), 840 GFP_KERNEL | __GFP_NOWARN); 841 if (!visit_states) 842 goto nomem; 843 844 btf->resolved_sizes = resolved_sizes; 845 btf->resolved_ids = resolved_ids; 846 env->visit_states = visit_states; 847 848 return 0; 849 850 nomem: 851 kvfree(resolved_sizes); 852 kvfree(resolved_ids); 853 kvfree(visit_states); 854 return -ENOMEM; 855 } 856 857 static void btf_verifier_env_free(struct btf_verifier_env *env) 858 { 859 kvfree(env->visit_states); 860 kfree(env); 861 } 862 863 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env, 864 const struct btf_type *next_type) 865 { 866 switch (env->resolve_mode) { 867 case RESOLVE_TBD: 868 /* int, enum or void is a sink */ 869 return !btf_type_needs_resolve(next_type); 870 case RESOLVE_PTR: 871 /* int, enum, void, struct, array, func or func_proto is a sink 872 * for ptr 873 */ 874 return !btf_type_is_modifier(next_type) && 875 !btf_type_is_ptr(next_type); 876 case RESOLVE_STRUCT_OR_ARRAY: 877 /* int, enum, void, ptr, func or func_proto is a sink 878 * for struct and array 879 */ 880 return !btf_type_is_modifier(next_type) && 881 !btf_type_is_array(next_type) && 882 !btf_type_is_struct(next_type); 883 default: 884 BUG(); 885 } 886 } 887 888 static bool env_type_is_resolved(const struct btf_verifier_env *env, 889 u32 type_id) 890 { 891 return env->visit_states[type_id] == RESOLVED; 892 } 893 894 static int env_stack_push(struct btf_verifier_env *env, 895 const struct btf_type *t, u32 type_id) 896 { 897 struct resolve_vertex *v; 898 899 if (env->top_stack == MAX_RESOLVE_DEPTH) 900 return -E2BIG; 901 902 if (env->visit_states[type_id] != NOT_VISITED) 903 return -EEXIST; 904 905 env->visit_states[type_id] = VISITED; 906 907 v = &env->stack[env->top_stack++]; 908 v->t = t; 909 v->type_id = type_id; 910 v->next_member = 0; 911 912 if (env->resolve_mode == RESOLVE_TBD) { 913 if (btf_type_is_ptr(t)) 914 env->resolve_mode = RESOLVE_PTR; 915 else if (btf_type_is_struct(t) || btf_type_is_array(t)) 916 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY; 917 } 918 919 return 0; 920 } 921 922 static void env_stack_set_next_member(struct btf_verifier_env *env, 923 u16 next_member) 924 { 925 env->stack[env->top_stack - 1].next_member = next_member; 926 } 927 928 static void env_stack_pop_resolved(struct btf_verifier_env *env, 929 u32 resolved_type_id, 930 u32 resolved_size) 931 { 932 u32 type_id = env->stack[--(env->top_stack)].type_id; 933 struct btf *btf = env->btf; 934 935 btf->resolved_sizes[type_id] = resolved_size; 936 btf->resolved_ids[type_id] = resolved_type_id; 937 env->visit_states[type_id] = RESOLVED; 938 } 939 940 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env) 941 { 942 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL; 943 } 944 945 /* The input param "type_id" must point to a needs_resolve type */ 946 static const struct btf_type *btf_type_id_resolve(const struct btf *btf, 947 u32 *type_id) 948 { 949 *type_id = btf->resolved_ids[*type_id]; 950 return btf_type_by_id(btf, *type_id); 951 } 952 953 const struct btf_type *btf_type_id_size(const struct btf *btf, 954 u32 *type_id, u32 *ret_size) 955 { 956 const struct btf_type *size_type; 957 u32 size_type_id = *type_id; 958 u32 size = 0; 959 960 size_type = btf_type_by_id(btf, size_type_id); 961 if (btf_type_nosize_or_null(size_type)) 962 return NULL; 963 964 if (btf_type_has_size(size_type)) { 965 size = size_type->size; 966 } else if (btf_type_is_array(size_type)) { 967 size = btf->resolved_sizes[size_type_id]; 968 } else if (btf_type_is_ptr(size_type)) { 969 size = sizeof(void *); 970 } else { 971 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type))) 972 return NULL; 973 974 size = btf->resolved_sizes[size_type_id]; 975 size_type_id = btf->resolved_ids[size_type_id]; 976 size_type = btf_type_by_id(btf, size_type_id); 977 if (btf_type_nosize_or_null(size_type)) 978 return NULL; 979 } 980 981 *type_id = size_type_id; 982 if (ret_size) 983 *ret_size = size; 984 985 return size_type; 986 } 987 988 static int btf_df_check_member(struct btf_verifier_env *env, 989 const struct btf_type *struct_type, 990 const struct btf_member *member, 991 const struct btf_type *member_type) 992 { 993 btf_verifier_log_basic(env, struct_type, 994 "Unsupported check_member"); 995 return -EINVAL; 996 } 997 998 static int btf_df_check_kflag_member(struct btf_verifier_env *env, 999 const struct btf_type *struct_type, 1000 const struct btf_member *member, 1001 const struct btf_type *member_type) 1002 { 1003 btf_verifier_log_basic(env, struct_type, 1004 "Unsupported check_kflag_member"); 1005 return -EINVAL; 1006 } 1007 1008 /* Used for ptr, array and struct/union type members. 1009 * int, enum and modifier types have their specific callback functions. 1010 */ 1011 static int btf_generic_check_kflag_member(struct btf_verifier_env *env, 1012 const struct btf_type *struct_type, 1013 const struct btf_member *member, 1014 const struct btf_type *member_type) 1015 { 1016 if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) { 1017 btf_verifier_log_member(env, struct_type, member, 1018 "Invalid member bitfield_size"); 1019 return -EINVAL; 1020 } 1021 1022 /* bitfield size is 0, so member->offset represents bit offset only. 1023 * It is safe to call non kflag check_member variants. 1024 */ 1025 return btf_type_ops(member_type)->check_member(env, struct_type, 1026 member, 1027 member_type); 1028 } 1029 1030 static int btf_df_resolve(struct btf_verifier_env *env, 1031 const struct resolve_vertex *v) 1032 { 1033 btf_verifier_log_basic(env, v->t, "Unsupported resolve"); 1034 return -EINVAL; 1035 } 1036 1037 static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t, 1038 u32 type_id, void *data, u8 bits_offsets, 1039 struct seq_file *m) 1040 { 1041 seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info)); 1042 } 1043 1044 static int btf_int_check_member(struct btf_verifier_env *env, 1045 const struct btf_type *struct_type, 1046 const struct btf_member *member, 1047 const struct btf_type *member_type) 1048 { 1049 u32 int_data = btf_type_int(member_type); 1050 u32 struct_bits_off = member->offset; 1051 u32 struct_size = struct_type->size; 1052 u32 nr_copy_bits; 1053 u32 bytes_offset; 1054 1055 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) { 1056 btf_verifier_log_member(env, struct_type, member, 1057 "bits_offset exceeds U32_MAX"); 1058 return -EINVAL; 1059 } 1060 1061 struct_bits_off += BTF_INT_OFFSET(int_data); 1062 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 1063 nr_copy_bits = BTF_INT_BITS(int_data) + 1064 BITS_PER_BYTE_MASKED(struct_bits_off); 1065 1066 if (nr_copy_bits > BITS_PER_U64) { 1067 btf_verifier_log_member(env, struct_type, member, 1068 "nr_copy_bits exceeds 64"); 1069 return -EINVAL; 1070 } 1071 1072 if (struct_size < bytes_offset || 1073 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { 1074 btf_verifier_log_member(env, struct_type, member, 1075 "Member exceeds struct_size"); 1076 return -EINVAL; 1077 } 1078 1079 return 0; 1080 } 1081 1082 static int btf_int_check_kflag_member(struct btf_verifier_env *env, 1083 const struct btf_type *struct_type, 1084 const struct btf_member *member, 1085 const struct btf_type *member_type) 1086 { 1087 u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset; 1088 u32 int_data = btf_type_int(member_type); 1089 u32 struct_size = struct_type->size; 1090 u32 nr_copy_bits; 1091 1092 /* a regular int type is required for the kflag int member */ 1093 if (!btf_type_int_is_regular(member_type)) { 1094 btf_verifier_log_member(env, struct_type, member, 1095 "Invalid member base type"); 1096 return -EINVAL; 1097 } 1098 1099 /* check sanity of bitfield size */ 1100 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); 1101 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); 1102 nr_int_data_bits = BTF_INT_BITS(int_data); 1103 if (!nr_bits) { 1104 /* Not a bitfield member, member offset must be at byte 1105 * boundary. 1106 */ 1107 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 1108 btf_verifier_log_member(env, struct_type, member, 1109 "Invalid member offset"); 1110 return -EINVAL; 1111 } 1112 1113 nr_bits = nr_int_data_bits; 1114 } else if (nr_bits > nr_int_data_bits) { 1115 btf_verifier_log_member(env, struct_type, member, 1116 "Invalid member bitfield_size"); 1117 return -EINVAL; 1118 } 1119 1120 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 1121 nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off); 1122 if (nr_copy_bits > BITS_PER_U64) { 1123 btf_verifier_log_member(env, struct_type, member, 1124 "nr_copy_bits exceeds 64"); 1125 return -EINVAL; 1126 } 1127 1128 if (struct_size < bytes_offset || 1129 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { 1130 btf_verifier_log_member(env, struct_type, member, 1131 "Member exceeds struct_size"); 1132 return -EINVAL; 1133 } 1134 1135 return 0; 1136 } 1137 1138 static s32 btf_int_check_meta(struct btf_verifier_env *env, 1139 const struct btf_type *t, 1140 u32 meta_left) 1141 { 1142 u32 int_data, nr_bits, meta_needed = sizeof(int_data); 1143 u16 encoding; 1144 1145 if (meta_left < meta_needed) { 1146 btf_verifier_log_basic(env, t, 1147 "meta_left:%u meta_needed:%u", 1148 meta_left, meta_needed); 1149 return -EINVAL; 1150 } 1151 1152 if (btf_type_vlen(t)) { 1153 btf_verifier_log_type(env, t, "vlen != 0"); 1154 return -EINVAL; 1155 } 1156 1157 if (btf_type_kflag(t)) { 1158 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 1159 return -EINVAL; 1160 } 1161 1162 int_data = btf_type_int(t); 1163 if (int_data & ~BTF_INT_MASK) { 1164 btf_verifier_log_basic(env, t, "Invalid int_data:%x", 1165 int_data); 1166 return -EINVAL; 1167 } 1168 1169 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data); 1170 1171 if (nr_bits > BITS_PER_U64) { 1172 btf_verifier_log_type(env, t, "nr_bits exceeds %zu", 1173 BITS_PER_U64); 1174 return -EINVAL; 1175 } 1176 1177 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) { 1178 btf_verifier_log_type(env, t, "nr_bits exceeds type_size"); 1179 return -EINVAL; 1180 } 1181 1182 /* 1183 * Only one of the encoding bits is allowed and it 1184 * should be sufficient for the pretty print purpose (i.e. decoding). 1185 * Multiple bits can be allowed later if it is found 1186 * to be insufficient. 1187 */ 1188 encoding = BTF_INT_ENCODING(int_data); 1189 if (encoding && 1190 encoding != BTF_INT_SIGNED && 1191 encoding != BTF_INT_CHAR && 1192 encoding != BTF_INT_BOOL) { 1193 btf_verifier_log_type(env, t, "Unsupported encoding"); 1194 return -ENOTSUPP; 1195 } 1196 1197 btf_verifier_log_type(env, t, NULL); 1198 1199 return meta_needed; 1200 } 1201 1202 static void btf_int_log(struct btf_verifier_env *env, 1203 const struct btf_type *t) 1204 { 1205 int int_data = btf_type_int(t); 1206 1207 btf_verifier_log(env, 1208 "size=%u bits_offset=%u nr_bits=%u encoding=%s", 1209 t->size, BTF_INT_OFFSET(int_data), 1210 BTF_INT_BITS(int_data), 1211 btf_int_encoding_str(BTF_INT_ENCODING(int_data))); 1212 } 1213 1214 static void btf_bitfield_seq_show(void *data, u8 bits_offset, 1215 u8 nr_bits, struct seq_file *m) 1216 { 1217 u16 left_shift_bits, right_shift_bits; 1218 u8 nr_copy_bytes; 1219 u8 nr_copy_bits; 1220 u64 print_num; 1221 1222 nr_copy_bits = nr_bits + bits_offset; 1223 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); 1224 1225 print_num = 0; 1226 memcpy(&print_num, data, nr_copy_bytes); 1227 1228 #ifdef __BIG_ENDIAN_BITFIELD 1229 left_shift_bits = bits_offset; 1230 #else 1231 left_shift_bits = BITS_PER_U64 - nr_copy_bits; 1232 #endif 1233 right_shift_bits = BITS_PER_U64 - nr_bits; 1234 1235 print_num <<= left_shift_bits; 1236 print_num >>= right_shift_bits; 1237 1238 seq_printf(m, "0x%llx", print_num); 1239 } 1240 1241 1242 static void btf_int_bits_seq_show(const struct btf *btf, 1243 const struct btf_type *t, 1244 void *data, u8 bits_offset, 1245 struct seq_file *m) 1246 { 1247 u32 int_data = btf_type_int(t); 1248 u8 nr_bits = BTF_INT_BITS(int_data); 1249 u8 total_bits_offset; 1250 1251 /* 1252 * bits_offset is at most 7. 1253 * BTF_INT_OFFSET() cannot exceed 64 bits. 1254 */ 1255 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); 1256 data += BITS_ROUNDDOWN_BYTES(total_bits_offset); 1257 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset); 1258 btf_bitfield_seq_show(data, bits_offset, nr_bits, m); 1259 } 1260 1261 static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, 1262 u32 type_id, void *data, u8 bits_offset, 1263 struct seq_file *m) 1264 { 1265 u32 int_data = btf_type_int(t); 1266 u8 encoding = BTF_INT_ENCODING(int_data); 1267 bool sign = encoding & BTF_INT_SIGNED; 1268 u8 nr_bits = BTF_INT_BITS(int_data); 1269 1270 if (bits_offset || BTF_INT_OFFSET(int_data) || 1271 BITS_PER_BYTE_MASKED(nr_bits)) { 1272 btf_int_bits_seq_show(btf, t, data, bits_offset, m); 1273 return; 1274 } 1275 1276 switch (nr_bits) { 1277 case 64: 1278 if (sign) 1279 seq_printf(m, "%lld", *(s64 *)data); 1280 else 1281 seq_printf(m, "%llu", *(u64 *)data); 1282 break; 1283 case 32: 1284 if (sign) 1285 seq_printf(m, "%d", *(s32 *)data); 1286 else 1287 seq_printf(m, "%u", *(u32 *)data); 1288 break; 1289 case 16: 1290 if (sign) 1291 seq_printf(m, "%d", *(s16 *)data); 1292 else 1293 seq_printf(m, "%u", *(u16 *)data); 1294 break; 1295 case 8: 1296 if (sign) 1297 seq_printf(m, "%d", *(s8 *)data); 1298 else 1299 seq_printf(m, "%u", *(u8 *)data); 1300 break; 1301 default: 1302 btf_int_bits_seq_show(btf, t, data, bits_offset, m); 1303 } 1304 } 1305 1306 static const struct btf_kind_operations int_ops = { 1307 .check_meta = btf_int_check_meta, 1308 .resolve = btf_df_resolve, 1309 .check_member = btf_int_check_member, 1310 .check_kflag_member = btf_int_check_kflag_member, 1311 .log_details = btf_int_log, 1312 .seq_show = btf_int_seq_show, 1313 }; 1314 1315 static int btf_modifier_check_member(struct btf_verifier_env *env, 1316 const struct btf_type *struct_type, 1317 const struct btf_member *member, 1318 const struct btf_type *member_type) 1319 { 1320 const struct btf_type *resolved_type; 1321 u32 resolved_type_id = member->type; 1322 struct btf_member resolved_member; 1323 struct btf *btf = env->btf; 1324 1325 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL); 1326 if (!resolved_type) { 1327 btf_verifier_log_member(env, struct_type, member, 1328 "Invalid member"); 1329 return -EINVAL; 1330 } 1331 1332 resolved_member = *member; 1333 resolved_member.type = resolved_type_id; 1334 1335 return btf_type_ops(resolved_type)->check_member(env, struct_type, 1336 &resolved_member, 1337 resolved_type); 1338 } 1339 1340 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env, 1341 const struct btf_type *struct_type, 1342 const struct btf_member *member, 1343 const struct btf_type *member_type) 1344 { 1345 const struct btf_type *resolved_type; 1346 u32 resolved_type_id = member->type; 1347 struct btf_member resolved_member; 1348 struct btf *btf = env->btf; 1349 1350 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL); 1351 if (!resolved_type) { 1352 btf_verifier_log_member(env, struct_type, member, 1353 "Invalid member"); 1354 return -EINVAL; 1355 } 1356 1357 resolved_member = *member; 1358 resolved_member.type = resolved_type_id; 1359 1360 return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type, 1361 &resolved_member, 1362 resolved_type); 1363 } 1364 1365 static int btf_ptr_check_member(struct btf_verifier_env *env, 1366 const struct btf_type *struct_type, 1367 const struct btf_member *member, 1368 const struct btf_type *member_type) 1369 { 1370 u32 struct_size, struct_bits_off, bytes_offset; 1371 1372 struct_size = struct_type->size; 1373 struct_bits_off = member->offset; 1374 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 1375 1376 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 1377 btf_verifier_log_member(env, struct_type, member, 1378 "Member is not byte aligned"); 1379 return -EINVAL; 1380 } 1381 1382 if (struct_size - bytes_offset < sizeof(void *)) { 1383 btf_verifier_log_member(env, struct_type, member, 1384 "Member exceeds struct_size"); 1385 return -EINVAL; 1386 } 1387 1388 return 0; 1389 } 1390 1391 static int btf_ref_type_check_meta(struct btf_verifier_env *env, 1392 const struct btf_type *t, 1393 u32 meta_left) 1394 { 1395 if (btf_type_vlen(t)) { 1396 btf_verifier_log_type(env, t, "vlen != 0"); 1397 return -EINVAL; 1398 } 1399 1400 if (btf_type_kflag(t)) { 1401 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 1402 return -EINVAL; 1403 } 1404 1405 if (!BTF_TYPE_ID_VALID(t->type)) { 1406 btf_verifier_log_type(env, t, "Invalid type_id"); 1407 return -EINVAL; 1408 } 1409 1410 /* typedef type must have a valid name, and other ref types, 1411 * volatile, const, restrict, should have a null name. 1412 */ 1413 if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) { 1414 if (!t->name_off || 1415 !btf_name_valid_identifier(env->btf, t->name_off)) { 1416 btf_verifier_log_type(env, t, "Invalid name"); 1417 return -EINVAL; 1418 } 1419 } else { 1420 if (t->name_off) { 1421 btf_verifier_log_type(env, t, "Invalid name"); 1422 return -EINVAL; 1423 } 1424 } 1425 1426 btf_verifier_log_type(env, t, NULL); 1427 1428 return 0; 1429 } 1430 1431 static int btf_modifier_resolve(struct btf_verifier_env *env, 1432 const struct resolve_vertex *v) 1433 { 1434 const struct btf_type *t = v->t; 1435 const struct btf_type *next_type; 1436 u32 next_type_id = t->type; 1437 struct btf *btf = env->btf; 1438 u32 next_type_size = 0; 1439 1440 next_type = btf_type_by_id(btf, next_type_id); 1441 if (!next_type) { 1442 btf_verifier_log_type(env, v->t, "Invalid type_id"); 1443 return -EINVAL; 1444 } 1445 1446 if (!env_type_is_resolve_sink(env, next_type) && 1447 !env_type_is_resolved(env, next_type_id)) 1448 return env_stack_push(env, next_type, next_type_id); 1449 1450 /* Figure out the resolved next_type_id with size. 1451 * They will be stored in the current modifier's 1452 * resolved_ids and resolved_sizes such that it can 1453 * save us a few type-following when we use it later (e.g. in 1454 * pretty print). 1455 */ 1456 if (!btf_type_id_size(btf, &next_type_id, &next_type_size)) { 1457 if (env_type_is_resolved(env, next_type_id)) 1458 next_type = btf_type_id_resolve(btf, &next_type_id); 1459 1460 /* "typedef void new_void", "const void"...etc */ 1461 if (!btf_type_is_void(next_type) && 1462 !btf_type_is_fwd(next_type) && 1463 !btf_type_is_func_proto(next_type)) { 1464 btf_verifier_log_type(env, v->t, "Invalid type_id"); 1465 return -EINVAL; 1466 } 1467 } 1468 1469 env_stack_pop_resolved(env, next_type_id, next_type_size); 1470 1471 return 0; 1472 } 1473 1474 static int btf_ptr_resolve(struct btf_verifier_env *env, 1475 const struct resolve_vertex *v) 1476 { 1477 const struct btf_type *next_type; 1478 const struct btf_type *t = v->t; 1479 u32 next_type_id = t->type; 1480 struct btf *btf = env->btf; 1481 1482 next_type = btf_type_by_id(btf, next_type_id); 1483 if (!next_type) { 1484 btf_verifier_log_type(env, v->t, "Invalid type_id"); 1485 return -EINVAL; 1486 } 1487 1488 if (!env_type_is_resolve_sink(env, next_type) && 1489 !env_type_is_resolved(env, next_type_id)) 1490 return env_stack_push(env, next_type, next_type_id); 1491 1492 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY, 1493 * the modifier may have stopped resolving when it was resolved 1494 * to a ptr (last-resolved-ptr). 1495 * 1496 * We now need to continue from the last-resolved-ptr to 1497 * ensure the last-resolved-ptr will not referring back to 1498 * the currenct ptr (t). 1499 */ 1500 if (btf_type_is_modifier(next_type)) { 1501 const struct btf_type *resolved_type; 1502 u32 resolved_type_id; 1503 1504 resolved_type_id = next_type_id; 1505 resolved_type = btf_type_id_resolve(btf, &resolved_type_id); 1506 1507 if (btf_type_is_ptr(resolved_type) && 1508 !env_type_is_resolve_sink(env, resolved_type) && 1509 !env_type_is_resolved(env, resolved_type_id)) 1510 return env_stack_push(env, resolved_type, 1511 resolved_type_id); 1512 } 1513 1514 if (!btf_type_id_size(btf, &next_type_id, NULL)) { 1515 if (env_type_is_resolved(env, next_type_id)) 1516 next_type = btf_type_id_resolve(btf, &next_type_id); 1517 1518 if (!btf_type_is_void(next_type) && 1519 !btf_type_is_fwd(next_type) && 1520 !btf_type_is_func_proto(next_type)) { 1521 btf_verifier_log_type(env, v->t, "Invalid type_id"); 1522 return -EINVAL; 1523 } 1524 } 1525 1526 env_stack_pop_resolved(env, next_type_id, 0); 1527 1528 return 0; 1529 } 1530 1531 static void btf_modifier_seq_show(const struct btf *btf, 1532 const struct btf_type *t, 1533 u32 type_id, void *data, 1534 u8 bits_offset, struct seq_file *m) 1535 { 1536 t = btf_type_id_resolve(btf, &type_id); 1537 1538 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m); 1539 } 1540 1541 static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t, 1542 u32 type_id, void *data, u8 bits_offset, 1543 struct seq_file *m) 1544 { 1545 /* It is a hashed value */ 1546 seq_printf(m, "%p", *(void **)data); 1547 } 1548 1549 static void btf_ref_type_log(struct btf_verifier_env *env, 1550 const struct btf_type *t) 1551 { 1552 btf_verifier_log(env, "type_id=%u", t->type); 1553 } 1554 1555 static struct btf_kind_operations modifier_ops = { 1556 .check_meta = btf_ref_type_check_meta, 1557 .resolve = btf_modifier_resolve, 1558 .check_member = btf_modifier_check_member, 1559 .check_kflag_member = btf_modifier_check_kflag_member, 1560 .log_details = btf_ref_type_log, 1561 .seq_show = btf_modifier_seq_show, 1562 }; 1563 1564 static struct btf_kind_operations ptr_ops = { 1565 .check_meta = btf_ref_type_check_meta, 1566 .resolve = btf_ptr_resolve, 1567 .check_member = btf_ptr_check_member, 1568 .check_kflag_member = btf_generic_check_kflag_member, 1569 .log_details = btf_ref_type_log, 1570 .seq_show = btf_ptr_seq_show, 1571 }; 1572 1573 static s32 btf_fwd_check_meta(struct btf_verifier_env *env, 1574 const struct btf_type *t, 1575 u32 meta_left) 1576 { 1577 if (btf_type_vlen(t)) { 1578 btf_verifier_log_type(env, t, "vlen != 0"); 1579 return -EINVAL; 1580 } 1581 1582 if (t->type) { 1583 btf_verifier_log_type(env, t, "type != 0"); 1584 return -EINVAL; 1585 } 1586 1587 /* fwd type must have a valid name */ 1588 if (!t->name_off || 1589 !btf_name_valid_identifier(env->btf, t->name_off)) { 1590 btf_verifier_log_type(env, t, "Invalid name"); 1591 return -EINVAL; 1592 } 1593 1594 btf_verifier_log_type(env, t, NULL); 1595 1596 return 0; 1597 } 1598 1599 static void btf_fwd_type_log(struct btf_verifier_env *env, 1600 const struct btf_type *t) 1601 { 1602 btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct"); 1603 } 1604 1605 static struct btf_kind_operations fwd_ops = { 1606 .check_meta = btf_fwd_check_meta, 1607 .resolve = btf_df_resolve, 1608 .check_member = btf_df_check_member, 1609 .check_kflag_member = btf_df_check_kflag_member, 1610 .log_details = btf_fwd_type_log, 1611 .seq_show = btf_df_seq_show, 1612 }; 1613 1614 static int btf_array_check_member(struct btf_verifier_env *env, 1615 const struct btf_type *struct_type, 1616 const struct btf_member *member, 1617 const struct btf_type *member_type) 1618 { 1619 u32 struct_bits_off = member->offset; 1620 u32 struct_size, bytes_offset; 1621 u32 array_type_id, array_size; 1622 struct btf *btf = env->btf; 1623 1624 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 1625 btf_verifier_log_member(env, struct_type, member, 1626 "Member is not byte aligned"); 1627 return -EINVAL; 1628 } 1629 1630 array_type_id = member->type; 1631 btf_type_id_size(btf, &array_type_id, &array_size); 1632 struct_size = struct_type->size; 1633 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 1634 if (struct_size - bytes_offset < array_size) { 1635 btf_verifier_log_member(env, struct_type, member, 1636 "Member exceeds struct_size"); 1637 return -EINVAL; 1638 } 1639 1640 return 0; 1641 } 1642 1643 static s32 btf_array_check_meta(struct btf_verifier_env *env, 1644 const struct btf_type *t, 1645 u32 meta_left) 1646 { 1647 const struct btf_array *array = btf_type_array(t); 1648 u32 meta_needed = sizeof(*array); 1649 1650 if (meta_left < meta_needed) { 1651 btf_verifier_log_basic(env, t, 1652 "meta_left:%u meta_needed:%u", 1653 meta_left, meta_needed); 1654 return -EINVAL; 1655 } 1656 1657 /* array type should not have a name */ 1658 if (t->name_off) { 1659 btf_verifier_log_type(env, t, "Invalid name"); 1660 return -EINVAL; 1661 } 1662 1663 if (btf_type_vlen(t)) { 1664 btf_verifier_log_type(env, t, "vlen != 0"); 1665 return -EINVAL; 1666 } 1667 1668 if (btf_type_kflag(t)) { 1669 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 1670 return -EINVAL; 1671 } 1672 1673 if (t->size) { 1674 btf_verifier_log_type(env, t, "size != 0"); 1675 return -EINVAL; 1676 } 1677 1678 /* Array elem type and index type cannot be in type void, 1679 * so !array->type and !array->index_type are not allowed. 1680 */ 1681 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) { 1682 btf_verifier_log_type(env, t, "Invalid elem"); 1683 return -EINVAL; 1684 } 1685 1686 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) { 1687 btf_verifier_log_type(env, t, "Invalid index"); 1688 return -EINVAL; 1689 } 1690 1691 btf_verifier_log_type(env, t, NULL); 1692 1693 return meta_needed; 1694 } 1695 1696 static int btf_array_resolve(struct btf_verifier_env *env, 1697 const struct resolve_vertex *v) 1698 { 1699 const struct btf_array *array = btf_type_array(v->t); 1700 const struct btf_type *elem_type, *index_type; 1701 u32 elem_type_id, index_type_id; 1702 struct btf *btf = env->btf; 1703 u32 elem_size; 1704 1705 /* Check array->index_type */ 1706 index_type_id = array->index_type; 1707 index_type = btf_type_by_id(btf, index_type_id); 1708 if (btf_type_nosize_or_null(index_type)) { 1709 btf_verifier_log_type(env, v->t, "Invalid index"); 1710 return -EINVAL; 1711 } 1712 1713 if (!env_type_is_resolve_sink(env, index_type) && 1714 !env_type_is_resolved(env, index_type_id)) 1715 return env_stack_push(env, index_type, index_type_id); 1716 1717 index_type = btf_type_id_size(btf, &index_type_id, NULL); 1718 if (!index_type || !btf_type_is_int(index_type) || 1719 !btf_type_int_is_regular(index_type)) { 1720 btf_verifier_log_type(env, v->t, "Invalid index"); 1721 return -EINVAL; 1722 } 1723 1724 /* Check array->type */ 1725 elem_type_id = array->type; 1726 elem_type = btf_type_by_id(btf, elem_type_id); 1727 if (btf_type_nosize_or_null(elem_type)) { 1728 btf_verifier_log_type(env, v->t, 1729 "Invalid elem"); 1730 return -EINVAL; 1731 } 1732 1733 if (!env_type_is_resolve_sink(env, elem_type) && 1734 !env_type_is_resolved(env, elem_type_id)) 1735 return env_stack_push(env, elem_type, elem_type_id); 1736 1737 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); 1738 if (!elem_type) { 1739 btf_verifier_log_type(env, v->t, "Invalid elem"); 1740 return -EINVAL; 1741 } 1742 1743 if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) { 1744 btf_verifier_log_type(env, v->t, "Invalid array of int"); 1745 return -EINVAL; 1746 } 1747 1748 if (array->nelems && elem_size > U32_MAX / array->nelems) { 1749 btf_verifier_log_type(env, v->t, 1750 "Array size overflows U32_MAX"); 1751 return -EINVAL; 1752 } 1753 1754 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems); 1755 1756 return 0; 1757 } 1758 1759 static void btf_array_log(struct btf_verifier_env *env, 1760 const struct btf_type *t) 1761 { 1762 const struct btf_array *array = btf_type_array(t); 1763 1764 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u", 1765 array->type, array->index_type, array->nelems); 1766 } 1767 1768 static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t, 1769 u32 type_id, void *data, u8 bits_offset, 1770 struct seq_file *m) 1771 { 1772 const struct btf_array *array = btf_type_array(t); 1773 const struct btf_kind_operations *elem_ops; 1774 const struct btf_type *elem_type; 1775 u32 i, elem_size, elem_type_id; 1776 1777 elem_type_id = array->type; 1778 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); 1779 elem_ops = btf_type_ops(elem_type); 1780 seq_puts(m, "["); 1781 for (i = 0; i < array->nelems; i++) { 1782 if (i) 1783 seq_puts(m, ","); 1784 1785 elem_ops->seq_show(btf, elem_type, elem_type_id, data, 1786 bits_offset, m); 1787 data += elem_size; 1788 } 1789 seq_puts(m, "]"); 1790 } 1791 1792 static struct btf_kind_operations array_ops = { 1793 .check_meta = btf_array_check_meta, 1794 .resolve = btf_array_resolve, 1795 .check_member = btf_array_check_member, 1796 .check_kflag_member = btf_generic_check_kflag_member, 1797 .log_details = btf_array_log, 1798 .seq_show = btf_array_seq_show, 1799 }; 1800 1801 static int btf_struct_check_member(struct btf_verifier_env *env, 1802 const struct btf_type *struct_type, 1803 const struct btf_member *member, 1804 const struct btf_type *member_type) 1805 { 1806 u32 struct_bits_off = member->offset; 1807 u32 struct_size, bytes_offset; 1808 1809 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 1810 btf_verifier_log_member(env, struct_type, member, 1811 "Member is not byte aligned"); 1812 return -EINVAL; 1813 } 1814 1815 struct_size = struct_type->size; 1816 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 1817 if (struct_size - bytes_offset < member_type->size) { 1818 btf_verifier_log_member(env, struct_type, member, 1819 "Member exceeds struct_size"); 1820 return -EINVAL; 1821 } 1822 1823 return 0; 1824 } 1825 1826 static s32 btf_struct_check_meta(struct btf_verifier_env *env, 1827 const struct btf_type *t, 1828 u32 meta_left) 1829 { 1830 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION; 1831 const struct btf_member *member; 1832 u32 meta_needed, last_offset; 1833 struct btf *btf = env->btf; 1834 u32 struct_size = t->size; 1835 u32 offset; 1836 u16 i; 1837 1838 meta_needed = btf_type_vlen(t) * sizeof(*member); 1839 if (meta_left < meta_needed) { 1840 btf_verifier_log_basic(env, t, 1841 "meta_left:%u meta_needed:%u", 1842 meta_left, meta_needed); 1843 return -EINVAL; 1844 } 1845 1846 /* struct type either no name or a valid one */ 1847 if (t->name_off && 1848 !btf_name_valid_identifier(env->btf, t->name_off)) { 1849 btf_verifier_log_type(env, t, "Invalid name"); 1850 return -EINVAL; 1851 } 1852 1853 btf_verifier_log_type(env, t, NULL); 1854 1855 last_offset = 0; 1856 for_each_member(i, t, member) { 1857 if (!btf_name_offset_valid(btf, member->name_off)) { 1858 btf_verifier_log_member(env, t, member, 1859 "Invalid member name_offset:%u", 1860 member->name_off); 1861 return -EINVAL; 1862 } 1863 1864 /* struct member either no name or a valid one */ 1865 if (member->name_off && 1866 !btf_name_valid_identifier(btf, member->name_off)) { 1867 btf_verifier_log_member(env, t, member, "Invalid name"); 1868 return -EINVAL; 1869 } 1870 /* A member cannot be in type void */ 1871 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) { 1872 btf_verifier_log_member(env, t, member, 1873 "Invalid type_id"); 1874 return -EINVAL; 1875 } 1876 1877 offset = btf_member_bit_offset(t, member); 1878 if (is_union && offset) { 1879 btf_verifier_log_member(env, t, member, 1880 "Invalid member bits_offset"); 1881 return -EINVAL; 1882 } 1883 1884 /* 1885 * ">" instead of ">=" because the last member could be 1886 * "char a[0];" 1887 */ 1888 if (last_offset > offset) { 1889 btf_verifier_log_member(env, t, member, 1890 "Invalid member bits_offset"); 1891 return -EINVAL; 1892 } 1893 1894 if (BITS_ROUNDUP_BYTES(offset) > struct_size) { 1895 btf_verifier_log_member(env, t, member, 1896 "Member bits_offset exceeds its struct size"); 1897 return -EINVAL; 1898 } 1899 1900 btf_verifier_log_member(env, t, member, NULL); 1901 last_offset = offset; 1902 } 1903 1904 return meta_needed; 1905 } 1906 1907 static int btf_struct_resolve(struct btf_verifier_env *env, 1908 const struct resolve_vertex *v) 1909 { 1910 const struct btf_member *member; 1911 int err; 1912 u16 i; 1913 1914 /* Before continue resolving the next_member, 1915 * ensure the last member is indeed resolved to a 1916 * type with size info. 1917 */ 1918 if (v->next_member) { 1919 const struct btf_type *last_member_type; 1920 const struct btf_member *last_member; 1921 u16 last_member_type_id; 1922 1923 last_member = btf_type_member(v->t) + v->next_member - 1; 1924 last_member_type_id = last_member->type; 1925 if (WARN_ON_ONCE(!env_type_is_resolved(env, 1926 last_member_type_id))) 1927 return -EINVAL; 1928 1929 last_member_type = btf_type_by_id(env->btf, 1930 last_member_type_id); 1931 if (btf_type_kflag(v->t)) 1932 err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t, 1933 last_member, 1934 last_member_type); 1935 else 1936 err = btf_type_ops(last_member_type)->check_member(env, v->t, 1937 last_member, 1938 last_member_type); 1939 if (err) 1940 return err; 1941 } 1942 1943 for_each_member_from(i, v->next_member, v->t, member) { 1944 u32 member_type_id = member->type; 1945 const struct btf_type *member_type = btf_type_by_id(env->btf, 1946 member_type_id); 1947 1948 if (btf_type_nosize_or_null(member_type)) { 1949 btf_verifier_log_member(env, v->t, member, 1950 "Invalid member"); 1951 return -EINVAL; 1952 } 1953 1954 if (!env_type_is_resolve_sink(env, member_type) && 1955 !env_type_is_resolved(env, member_type_id)) { 1956 env_stack_set_next_member(env, i + 1); 1957 return env_stack_push(env, member_type, member_type_id); 1958 } 1959 1960 if (btf_type_kflag(v->t)) 1961 err = btf_type_ops(member_type)->check_kflag_member(env, v->t, 1962 member, 1963 member_type); 1964 else 1965 err = btf_type_ops(member_type)->check_member(env, v->t, 1966 member, 1967 member_type); 1968 if (err) 1969 return err; 1970 } 1971 1972 env_stack_pop_resolved(env, 0, 0); 1973 1974 return 0; 1975 } 1976 1977 static void btf_struct_log(struct btf_verifier_env *env, 1978 const struct btf_type *t) 1979 { 1980 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); 1981 } 1982 1983 static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t, 1984 u32 type_id, void *data, u8 bits_offset, 1985 struct seq_file *m) 1986 { 1987 const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ","; 1988 const struct btf_member *member; 1989 u32 i; 1990 1991 seq_puts(m, "{"); 1992 for_each_member(i, t, member) { 1993 const struct btf_type *member_type = btf_type_by_id(btf, 1994 member->type); 1995 const struct btf_kind_operations *ops; 1996 u32 member_offset, bitfield_size; 1997 u32 bytes_offset; 1998 u8 bits8_offset; 1999 2000 if (i) 2001 seq_puts(m, seq); 2002 2003 member_offset = btf_member_bit_offset(t, member); 2004 bitfield_size = btf_member_bitfield_size(t, member); 2005 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); 2006 bits8_offset = BITS_PER_BYTE_MASKED(member_offset); 2007 if (bitfield_size) { 2008 btf_bitfield_seq_show(data + bytes_offset, bits8_offset, 2009 bitfield_size, m); 2010 } else { 2011 ops = btf_type_ops(member_type); 2012 ops->seq_show(btf, member_type, member->type, 2013 data + bytes_offset, bits8_offset, m); 2014 } 2015 } 2016 seq_puts(m, "}"); 2017 } 2018 2019 static struct btf_kind_operations struct_ops = { 2020 .check_meta = btf_struct_check_meta, 2021 .resolve = btf_struct_resolve, 2022 .check_member = btf_struct_check_member, 2023 .check_kflag_member = btf_generic_check_kflag_member, 2024 .log_details = btf_struct_log, 2025 .seq_show = btf_struct_seq_show, 2026 }; 2027 2028 static int btf_enum_check_member(struct btf_verifier_env *env, 2029 const struct btf_type *struct_type, 2030 const struct btf_member *member, 2031 const struct btf_type *member_type) 2032 { 2033 u32 struct_bits_off = member->offset; 2034 u32 struct_size, bytes_offset; 2035 2036 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 2037 btf_verifier_log_member(env, struct_type, member, 2038 "Member is not byte aligned"); 2039 return -EINVAL; 2040 } 2041 2042 struct_size = struct_type->size; 2043 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2044 if (struct_size - bytes_offset < sizeof(int)) { 2045 btf_verifier_log_member(env, struct_type, member, 2046 "Member exceeds struct_size"); 2047 return -EINVAL; 2048 } 2049 2050 return 0; 2051 } 2052 2053 static int btf_enum_check_kflag_member(struct btf_verifier_env *env, 2054 const struct btf_type *struct_type, 2055 const struct btf_member *member, 2056 const struct btf_type *member_type) 2057 { 2058 u32 struct_bits_off, nr_bits, bytes_end, struct_size; 2059 u32 int_bitsize = sizeof(int) * BITS_PER_BYTE; 2060 2061 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); 2062 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); 2063 if (!nr_bits) { 2064 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 2065 btf_verifier_log_member(env, struct_type, member, 2066 "Member is not byte aligned"); 2067 return -EINVAL; 2068 } 2069 2070 nr_bits = int_bitsize; 2071 } else if (nr_bits > int_bitsize) { 2072 btf_verifier_log_member(env, struct_type, member, 2073 "Invalid member bitfield_size"); 2074 return -EINVAL; 2075 } 2076 2077 struct_size = struct_type->size; 2078 bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits); 2079 if (struct_size < bytes_end) { 2080 btf_verifier_log_member(env, struct_type, member, 2081 "Member exceeds struct_size"); 2082 return -EINVAL; 2083 } 2084 2085 return 0; 2086 } 2087 2088 static s32 btf_enum_check_meta(struct btf_verifier_env *env, 2089 const struct btf_type *t, 2090 u32 meta_left) 2091 { 2092 const struct btf_enum *enums = btf_type_enum(t); 2093 struct btf *btf = env->btf; 2094 u16 i, nr_enums; 2095 u32 meta_needed; 2096 2097 nr_enums = btf_type_vlen(t); 2098 meta_needed = nr_enums * sizeof(*enums); 2099 2100 if (meta_left < meta_needed) { 2101 btf_verifier_log_basic(env, t, 2102 "meta_left:%u meta_needed:%u", 2103 meta_left, meta_needed); 2104 return -EINVAL; 2105 } 2106 2107 if (btf_type_kflag(t)) { 2108 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 2109 return -EINVAL; 2110 } 2111 2112 if (t->size != sizeof(int)) { 2113 btf_verifier_log_type(env, t, "Expected size:%zu", 2114 sizeof(int)); 2115 return -EINVAL; 2116 } 2117 2118 /* enum type either no name or a valid one */ 2119 if (t->name_off && 2120 !btf_name_valid_identifier(env->btf, t->name_off)) { 2121 btf_verifier_log_type(env, t, "Invalid name"); 2122 return -EINVAL; 2123 } 2124 2125 btf_verifier_log_type(env, t, NULL); 2126 2127 for (i = 0; i < nr_enums; i++) { 2128 if (!btf_name_offset_valid(btf, enums[i].name_off)) { 2129 btf_verifier_log(env, "\tInvalid name_offset:%u", 2130 enums[i].name_off); 2131 return -EINVAL; 2132 } 2133 2134 /* enum member must have a valid name */ 2135 if (!enums[i].name_off || 2136 !btf_name_valid_identifier(btf, enums[i].name_off)) { 2137 btf_verifier_log_type(env, t, "Invalid name"); 2138 return -EINVAL; 2139 } 2140 2141 2142 btf_verifier_log(env, "\t%s val=%d\n", 2143 __btf_name_by_offset(btf, enums[i].name_off), 2144 enums[i].val); 2145 } 2146 2147 return meta_needed; 2148 } 2149 2150 static void btf_enum_log(struct btf_verifier_env *env, 2151 const struct btf_type *t) 2152 { 2153 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); 2154 } 2155 2156 static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t, 2157 u32 type_id, void *data, u8 bits_offset, 2158 struct seq_file *m) 2159 { 2160 const struct btf_enum *enums = btf_type_enum(t); 2161 u32 i, nr_enums = btf_type_vlen(t); 2162 int v = *(int *)data; 2163 2164 for (i = 0; i < nr_enums; i++) { 2165 if (v == enums[i].val) { 2166 seq_printf(m, "%s", 2167 __btf_name_by_offset(btf, 2168 enums[i].name_off)); 2169 return; 2170 } 2171 } 2172 2173 seq_printf(m, "%d", v); 2174 } 2175 2176 static struct btf_kind_operations enum_ops = { 2177 .check_meta = btf_enum_check_meta, 2178 .resolve = btf_df_resolve, 2179 .check_member = btf_enum_check_member, 2180 .check_kflag_member = btf_enum_check_kflag_member, 2181 .log_details = btf_enum_log, 2182 .seq_show = btf_enum_seq_show, 2183 }; 2184 2185 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env, 2186 const struct btf_type *t, 2187 u32 meta_left) 2188 { 2189 u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param); 2190 2191 if (meta_left < meta_needed) { 2192 btf_verifier_log_basic(env, t, 2193 "meta_left:%u meta_needed:%u", 2194 meta_left, meta_needed); 2195 return -EINVAL; 2196 } 2197 2198 if (t->name_off) { 2199 btf_verifier_log_type(env, t, "Invalid name"); 2200 return -EINVAL; 2201 } 2202 2203 if (btf_type_kflag(t)) { 2204 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 2205 return -EINVAL; 2206 } 2207 2208 btf_verifier_log_type(env, t, NULL); 2209 2210 return meta_needed; 2211 } 2212 2213 static void btf_func_proto_log(struct btf_verifier_env *env, 2214 const struct btf_type *t) 2215 { 2216 const struct btf_param *args = (const struct btf_param *)(t + 1); 2217 u16 nr_args = btf_type_vlen(t), i; 2218 2219 btf_verifier_log(env, "return=%u args=(", t->type); 2220 if (!nr_args) { 2221 btf_verifier_log(env, "void"); 2222 goto done; 2223 } 2224 2225 if (nr_args == 1 && !args[0].type) { 2226 /* Only one vararg */ 2227 btf_verifier_log(env, "vararg"); 2228 goto done; 2229 } 2230 2231 btf_verifier_log(env, "%u %s", args[0].type, 2232 __btf_name_by_offset(env->btf, 2233 args[0].name_off)); 2234 for (i = 1; i < nr_args - 1; i++) 2235 btf_verifier_log(env, ", %u %s", args[i].type, 2236 __btf_name_by_offset(env->btf, 2237 args[i].name_off)); 2238 2239 if (nr_args > 1) { 2240 const struct btf_param *last_arg = &args[nr_args - 1]; 2241 2242 if (last_arg->type) 2243 btf_verifier_log(env, ", %u %s", last_arg->type, 2244 __btf_name_by_offset(env->btf, 2245 last_arg->name_off)); 2246 else 2247 btf_verifier_log(env, ", vararg"); 2248 } 2249 2250 done: 2251 btf_verifier_log(env, ")"); 2252 } 2253 2254 static struct btf_kind_operations func_proto_ops = { 2255 .check_meta = btf_func_proto_check_meta, 2256 .resolve = btf_df_resolve, 2257 /* 2258 * BTF_KIND_FUNC_PROTO cannot be directly referred by 2259 * a struct's member. 2260 * 2261 * It should be a funciton pointer instead. 2262 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO) 2263 * 2264 * Hence, there is no btf_func_check_member(). 2265 */ 2266 .check_member = btf_df_check_member, 2267 .check_kflag_member = btf_df_check_kflag_member, 2268 .log_details = btf_func_proto_log, 2269 .seq_show = btf_df_seq_show, 2270 }; 2271 2272 static s32 btf_func_check_meta(struct btf_verifier_env *env, 2273 const struct btf_type *t, 2274 u32 meta_left) 2275 { 2276 if (!t->name_off || 2277 !btf_name_valid_identifier(env->btf, t->name_off)) { 2278 btf_verifier_log_type(env, t, "Invalid name"); 2279 return -EINVAL; 2280 } 2281 2282 if (btf_type_vlen(t)) { 2283 btf_verifier_log_type(env, t, "vlen != 0"); 2284 return -EINVAL; 2285 } 2286 2287 if (btf_type_kflag(t)) { 2288 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 2289 return -EINVAL; 2290 } 2291 2292 btf_verifier_log_type(env, t, NULL); 2293 2294 return 0; 2295 } 2296 2297 static struct btf_kind_operations func_ops = { 2298 .check_meta = btf_func_check_meta, 2299 .resolve = btf_df_resolve, 2300 .check_member = btf_df_check_member, 2301 .check_kflag_member = btf_df_check_kflag_member, 2302 .log_details = btf_ref_type_log, 2303 .seq_show = btf_df_seq_show, 2304 }; 2305 2306 static int btf_func_proto_check(struct btf_verifier_env *env, 2307 const struct btf_type *t) 2308 { 2309 const struct btf_type *ret_type; 2310 const struct btf_param *args; 2311 const struct btf *btf; 2312 u16 nr_args, i; 2313 int err; 2314 2315 btf = env->btf; 2316 args = (const struct btf_param *)(t + 1); 2317 nr_args = btf_type_vlen(t); 2318 2319 /* Check func return type which could be "void" (t->type == 0) */ 2320 if (t->type) { 2321 u32 ret_type_id = t->type; 2322 2323 ret_type = btf_type_by_id(btf, ret_type_id); 2324 if (!ret_type) { 2325 btf_verifier_log_type(env, t, "Invalid return type"); 2326 return -EINVAL; 2327 } 2328 2329 if (btf_type_needs_resolve(ret_type) && 2330 !env_type_is_resolved(env, ret_type_id)) { 2331 err = btf_resolve(env, ret_type, ret_type_id); 2332 if (err) 2333 return err; 2334 } 2335 2336 /* Ensure the return type is a type that has a size */ 2337 if (!btf_type_id_size(btf, &ret_type_id, NULL)) { 2338 btf_verifier_log_type(env, t, "Invalid return type"); 2339 return -EINVAL; 2340 } 2341 } 2342 2343 if (!nr_args) 2344 return 0; 2345 2346 /* Last func arg type_id could be 0 if it is a vararg */ 2347 if (!args[nr_args - 1].type) { 2348 if (args[nr_args - 1].name_off) { 2349 btf_verifier_log_type(env, t, "Invalid arg#%u", 2350 nr_args); 2351 return -EINVAL; 2352 } 2353 nr_args--; 2354 } 2355 2356 err = 0; 2357 for (i = 0; i < nr_args; i++) { 2358 const struct btf_type *arg_type; 2359 u32 arg_type_id; 2360 2361 arg_type_id = args[i].type; 2362 arg_type = btf_type_by_id(btf, arg_type_id); 2363 if (!arg_type) { 2364 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); 2365 err = -EINVAL; 2366 break; 2367 } 2368 2369 if (args[i].name_off && 2370 (!btf_name_offset_valid(btf, args[i].name_off) || 2371 !btf_name_valid_identifier(btf, args[i].name_off))) { 2372 btf_verifier_log_type(env, t, 2373 "Invalid arg#%u", i + 1); 2374 err = -EINVAL; 2375 break; 2376 } 2377 2378 if (btf_type_needs_resolve(arg_type) && 2379 !env_type_is_resolved(env, arg_type_id)) { 2380 err = btf_resolve(env, arg_type, arg_type_id); 2381 if (err) 2382 break; 2383 } 2384 2385 if (!btf_type_id_size(btf, &arg_type_id, NULL)) { 2386 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); 2387 err = -EINVAL; 2388 break; 2389 } 2390 } 2391 2392 return err; 2393 } 2394 2395 static int btf_func_check(struct btf_verifier_env *env, 2396 const struct btf_type *t) 2397 { 2398 const struct btf_type *proto_type; 2399 const struct btf_param *args; 2400 const struct btf *btf; 2401 u16 nr_args, i; 2402 2403 btf = env->btf; 2404 proto_type = btf_type_by_id(btf, t->type); 2405 2406 if (!proto_type || !btf_type_is_func_proto(proto_type)) { 2407 btf_verifier_log_type(env, t, "Invalid type_id"); 2408 return -EINVAL; 2409 } 2410 2411 args = (const struct btf_param *)(proto_type + 1); 2412 nr_args = btf_type_vlen(proto_type); 2413 for (i = 0; i < nr_args; i++) { 2414 if (!args[i].name_off && args[i].type) { 2415 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); 2416 return -EINVAL; 2417 } 2418 } 2419 2420 return 0; 2421 } 2422 2423 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = { 2424 [BTF_KIND_INT] = &int_ops, 2425 [BTF_KIND_PTR] = &ptr_ops, 2426 [BTF_KIND_ARRAY] = &array_ops, 2427 [BTF_KIND_STRUCT] = &struct_ops, 2428 [BTF_KIND_UNION] = &struct_ops, 2429 [BTF_KIND_ENUM] = &enum_ops, 2430 [BTF_KIND_FWD] = &fwd_ops, 2431 [BTF_KIND_TYPEDEF] = &modifier_ops, 2432 [BTF_KIND_VOLATILE] = &modifier_ops, 2433 [BTF_KIND_CONST] = &modifier_ops, 2434 [BTF_KIND_RESTRICT] = &modifier_ops, 2435 [BTF_KIND_FUNC] = &func_ops, 2436 [BTF_KIND_FUNC_PROTO] = &func_proto_ops, 2437 }; 2438 2439 static s32 btf_check_meta(struct btf_verifier_env *env, 2440 const struct btf_type *t, 2441 u32 meta_left) 2442 { 2443 u32 saved_meta_left = meta_left; 2444 s32 var_meta_size; 2445 2446 if (meta_left < sizeof(*t)) { 2447 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu", 2448 env->log_type_id, meta_left, sizeof(*t)); 2449 return -EINVAL; 2450 } 2451 meta_left -= sizeof(*t); 2452 2453 if (t->info & ~BTF_INFO_MASK) { 2454 btf_verifier_log(env, "[%u] Invalid btf_info:%x", 2455 env->log_type_id, t->info); 2456 return -EINVAL; 2457 } 2458 2459 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX || 2460 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) { 2461 btf_verifier_log(env, "[%u] Invalid kind:%u", 2462 env->log_type_id, BTF_INFO_KIND(t->info)); 2463 return -EINVAL; 2464 } 2465 2466 if (!btf_name_offset_valid(env->btf, t->name_off)) { 2467 btf_verifier_log(env, "[%u] Invalid name_offset:%u", 2468 env->log_type_id, t->name_off); 2469 return -EINVAL; 2470 } 2471 2472 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left); 2473 if (var_meta_size < 0) 2474 return var_meta_size; 2475 2476 meta_left -= var_meta_size; 2477 2478 return saved_meta_left - meta_left; 2479 } 2480 2481 static int btf_check_all_metas(struct btf_verifier_env *env) 2482 { 2483 struct btf *btf = env->btf; 2484 struct btf_header *hdr; 2485 void *cur, *end; 2486 2487 hdr = &btf->hdr; 2488 cur = btf->nohdr_data + hdr->type_off; 2489 end = cur + hdr->type_len; 2490 2491 env->log_type_id = 1; 2492 while (cur < end) { 2493 struct btf_type *t = cur; 2494 s32 meta_size; 2495 2496 meta_size = btf_check_meta(env, t, end - cur); 2497 if (meta_size < 0) 2498 return meta_size; 2499 2500 btf_add_type(env, t); 2501 cur += meta_size; 2502 env->log_type_id++; 2503 } 2504 2505 return 0; 2506 } 2507 2508 static bool btf_resolve_valid(struct btf_verifier_env *env, 2509 const struct btf_type *t, 2510 u32 type_id) 2511 { 2512 struct btf *btf = env->btf; 2513 2514 if (!env_type_is_resolved(env, type_id)) 2515 return false; 2516 2517 if (btf_type_is_struct(t)) 2518 return !btf->resolved_ids[type_id] && 2519 !btf->resolved_sizes[type_id]; 2520 2521 if (btf_type_is_modifier(t) || btf_type_is_ptr(t)) { 2522 t = btf_type_id_resolve(btf, &type_id); 2523 return t && !btf_type_is_modifier(t); 2524 } 2525 2526 if (btf_type_is_array(t)) { 2527 const struct btf_array *array = btf_type_array(t); 2528 const struct btf_type *elem_type; 2529 u32 elem_type_id = array->type; 2530 u32 elem_size; 2531 2532 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); 2533 return elem_type && !btf_type_is_modifier(elem_type) && 2534 (array->nelems * elem_size == 2535 btf->resolved_sizes[type_id]); 2536 } 2537 2538 return false; 2539 } 2540 2541 static int btf_resolve(struct btf_verifier_env *env, 2542 const struct btf_type *t, u32 type_id) 2543 { 2544 u32 save_log_type_id = env->log_type_id; 2545 const struct resolve_vertex *v; 2546 int err = 0; 2547 2548 env->resolve_mode = RESOLVE_TBD; 2549 env_stack_push(env, t, type_id); 2550 while (!err && (v = env_stack_peak(env))) { 2551 env->log_type_id = v->type_id; 2552 err = btf_type_ops(v->t)->resolve(env, v); 2553 } 2554 2555 env->log_type_id = type_id; 2556 if (err == -E2BIG) { 2557 btf_verifier_log_type(env, t, 2558 "Exceeded max resolving depth:%u", 2559 MAX_RESOLVE_DEPTH); 2560 } else if (err == -EEXIST) { 2561 btf_verifier_log_type(env, t, "Loop detected"); 2562 } 2563 2564 /* Final sanity check */ 2565 if (!err && !btf_resolve_valid(env, t, type_id)) { 2566 btf_verifier_log_type(env, t, "Invalid resolve state"); 2567 err = -EINVAL; 2568 } 2569 2570 env->log_type_id = save_log_type_id; 2571 return err; 2572 } 2573 2574 static int btf_check_all_types(struct btf_verifier_env *env) 2575 { 2576 struct btf *btf = env->btf; 2577 u32 type_id; 2578 int err; 2579 2580 err = env_resolve_init(env); 2581 if (err) 2582 return err; 2583 2584 env->phase++; 2585 for (type_id = 1; type_id <= btf->nr_types; type_id++) { 2586 const struct btf_type *t = btf_type_by_id(btf, type_id); 2587 2588 env->log_type_id = type_id; 2589 if (btf_type_needs_resolve(t) && 2590 !env_type_is_resolved(env, type_id)) { 2591 err = btf_resolve(env, t, type_id); 2592 if (err) 2593 return err; 2594 } 2595 2596 if (btf_type_is_func_proto(t)) { 2597 err = btf_func_proto_check(env, t); 2598 if (err) 2599 return err; 2600 } 2601 2602 if (btf_type_is_func(t)) { 2603 err = btf_func_check(env, t); 2604 if (err) 2605 return err; 2606 } 2607 } 2608 2609 return 0; 2610 } 2611 2612 static int btf_parse_type_sec(struct btf_verifier_env *env) 2613 { 2614 const struct btf_header *hdr = &env->btf->hdr; 2615 int err; 2616 2617 /* Type section must align to 4 bytes */ 2618 if (hdr->type_off & (sizeof(u32) - 1)) { 2619 btf_verifier_log(env, "Unaligned type_off"); 2620 return -EINVAL; 2621 } 2622 2623 if (!hdr->type_len) { 2624 btf_verifier_log(env, "No type found"); 2625 return -EINVAL; 2626 } 2627 2628 err = btf_check_all_metas(env); 2629 if (err) 2630 return err; 2631 2632 return btf_check_all_types(env); 2633 } 2634 2635 static int btf_parse_str_sec(struct btf_verifier_env *env) 2636 { 2637 const struct btf_header *hdr; 2638 struct btf *btf = env->btf; 2639 const char *start, *end; 2640 2641 hdr = &btf->hdr; 2642 start = btf->nohdr_data + hdr->str_off; 2643 end = start + hdr->str_len; 2644 2645 if (end != btf->data + btf->data_size) { 2646 btf_verifier_log(env, "String section is not at the end"); 2647 return -EINVAL; 2648 } 2649 2650 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || 2651 start[0] || end[-1]) { 2652 btf_verifier_log(env, "Invalid string section"); 2653 return -EINVAL; 2654 } 2655 2656 btf->strings = start; 2657 2658 return 0; 2659 } 2660 2661 static const size_t btf_sec_info_offset[] = { 2662 offsetof(struct btf_header, type_off), 2663 offsetof(struct btf_header, str_off), 2664 }; 2665 2666 static int btf_sec_info_cmp(const void *a, const void *b) 2667 { 2668 const struct btf_sec_info *x = a; 2669 const struct btf_sec_info *y = b; 2670 2671 return (int)(x->off - y->off) ? : (int)(x->len - y->len); 2672 } 2673 2674 static int btf_check_sec_info(struct btf_verifier_env *env, 2675 u32 btf_data_size) 2676 { 2677 struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)]; 2678 u32 total, expected_total, i; 2679 const struct btf_header *hdr; 2680 const struct btf *btf; 2681 2682 btf = env->btf; 2683 hdr = &btf->hdr; 2684 2685 /* Populate the secs from hdr */ 2686 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) 2687 secs[i] = *(struct btf_sec_info *)((void *)hdr + 2688 btf_sec_info_offset[i]); 2689 2690 sort(secs, ARRAY_SIZE(btf_sec_info_offset), 2691 sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL); 2692 2693 /* Check for gaps and overlap among sections */ 2694 total = 0; 2695 expected_total = btf_data_size - hdr->hdr_len; 2696 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) { 2697 if (expected_total < secs[i].off) { 2698 btf_verifier_log(env, "Invalid section offset"); 2699 return -EINVAL; 2700 } 2701 if (total < secs[i].off) { 2702 /* gap */ 2703 btf_verifier_log(env, "Unsupported section found"); 2704 return -EINVAL; 2705 } 2706 if (total > secs[i].off) { 2707 btf_verifier_log(env, "Section overlap found"); 2708 return -EINVAL; 2709 } 2710 if (expected_total - total < secs[i].len) { 2711 btf_verifier_log(env, 2712 "Total section length too long"); 2713 return -EINVAL; 2714 } 2715 total += secs[i].len; 2716 } 2717 2718 /* There is data other than hdr and known sections */ 2719 if (expected_total != total) { 2720 btf_verifier_log(env, "Unsupported section found"); 2721 return -EINVAL; 2722 } 2723 2724 return 0; 2725 } 2726 2727 static int btf_parse_hdr(struct btf_verifier_env *env) 2728 { 2729 u32 hdr_len, hdr_copy, btf_data_size; 2730 const struct btf_header *hdr; 2731 struct btf *btf; 2732 int err; 2733 2734 btf = env->btf; 2735 btf_data_size = btf->data_size; 2736 2737 if (btf_data_size < 2738 offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) { 2739 btf_verifier_log(env, "hdr_len not found"); 2740 return -EINVAL; 2741 } 2742 2743 hdr = btf->data; 2744 hdr_len = hdr->hdr_len; 2745 if (btf_data_size < hdr_len) { 2746 btf_verifier_log(env, "btf_header not found"); 2747 return -EINVAL; 2748 } 2749 2750 /* Ensure the unsupported header fields are zero */ 2751 if (hdr_len > sizeof(btf->hdr)) { 2752 u8 *expected_zero = btf->data + sizeof(btf->hdr); 2753 u8 *end = btf->data + hdr_len; 2754 2755 for (; expected_zero < end; expected_zero++) { 2756 if (*expected_zero) { 2757 btf_verifier_log(env, "Unsupported btf_header"); 2758 return -E2BIG; 2759 } 2760 } 2761 } 2762 2763 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr)); 2764 memcpy(&btf->hdr, btf->data, hdr_copy); 2765 2766 hdr = &btf->hdr; 2767 2768 btf_verifier_log_hdr(env, btf_data_size); 2769 2770 if (hdr->magic != BTF_MAGIC) { 2771 btf_verifier_log(env, "Invalid magic"); 2772 return -EINVAL; 2773 } 2774 2775 if (hdr->version != BTF_VERSION) { 2776 btf_verifier_log(env, "Unsupported version"); 2777 return -ENOTSUPP; 2778 } 2779 2780 if (hdr->flags) { 2781 btf_verifier_log(env, "Unsupported flags"); 2782 return -ENOTSUPP; 2783 } 2784 2785 if (btf_data_size == hdr->hdr_len) { 2786 btf_verifier_log(env, "No data"); 2787 return -EINVAL; 2788 } 2789 2790 err = btf_check_sec_info(env, btf_data_size); 2791 if (err) 2792 return err; 2793 2794 return 0; 2795 } 2796 2797 static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size, 2798 u32 log_level, char __user *log_ubuf, u32 log_size) 2799 { 2800 struct btf_verifier_env *env = NULL; 2801 struct bpf_verifier_log *log; 2802 struct btf *btf = NULL; 2803 u8 *data; 2804 int err; 2805 2806 if (btf_data_size > BTF_MAX_SIZE) 2807 return ERR_PTR(-E2BIG); 2808 2809 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); 2810 if (!env) 2811 return ERR_PTR(-ENOMEM); 2812 2813 log = &env->log; 2814 if (log_level || log_ubuf || log_size) { 2815 /* user requested verbose verifier output 2816 * and supplied buffer to store the verification trace 2817 */ 2818 log->level = log_level; 2819 log->ubuf = log_ubuf; 2820 log->len_total = log_size; 2821 2822 /* log attributes have to be sane */ 2823 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || 2824 !log->level || !log->ubuf) { 2825 err = -EINVAL; 2826 goto errout; 2827 } 2828 } 2829 2830 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); 2831 if (!btf) { 2832 err = -ENOMEM; 2833 goto errout; 2834 } 2835 env->btf = btf; 2836 2837 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN); 2838 if (!data) { 2839 err = -ENOMEM; 2840 goto errout; 2841 } 2842 2843 btf->data = data; 2844 btf->data_size = btf_data_size; 2845 2846 if (copy_from_user(data, btf_data, btf_data_size)) { 2847 err = -EFAULT; 2848 goto errout; 2849 } 2850 2851 err = btf_parse_hdr(env); 2852 if (err) 2853 goto errout; 2854 2855 btf->nohdr_data = btf->data + btf->hdr.hdr_len; 2856 2857 err = btf_parse_str_sec(env); 2858 if (err) 2859 goto errout; 2860 2861 err = btf_parse_type_sec(env); 2862 if (err) 2863 goto errout; 2864 2865 if (log->level && bpf_verifier_log_full(log)) { 2866 err = -ENOSPC; 2867 goto errout; 2868 } 2869 2870 btf_verifier_env_free(env); 2871 refcount_set(&btf->refcnt, 1); 2872 return btf; 2873 2874 errout: 2875 btf_verifier_env_free(env); 2876 if (btf) 2877 btf_free(btf); 2878 return ERR_PTR(err); 2879 } 2880 2881 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, 2882 struct seq_file *m) 2883 { 2884 const struct btf_type *t = btf_type_by_id(btf, type_id); 2885 2886 btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m); 2887 } 2888 2889 static int btf_release(struct inode *inode, struct file *filp) 2890 { 2891 btf_put(filp->private_data); 2892 return 0; 2893 } 2894 2895 const struct file_operations btf_fops = { 2896 .release = btf_release, 2897 }; 2898 2899 static int __btf_new_fd(struct btf *btf) 2900 { 2901 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC); 2902 } 2903 2904 int btf_new_fd(const union bpf_attr *attr) 2905 { 2906 struct btf *btf; 2907 int ret; 2908 2909 btf = btf_parse(u64_to_user_ptr(attr->btf), 2910 attr->btf_size, attr->btf_log_level, 2911 u64_to_user_ptr(attr->btf_log_buf), 2912 attr->btf_log_size); 2913 if (IS_ERR(btf)) 2914 return PTR_ERR(btf); 2915 2916 ret = btf_alloc_id(btf); 2917 if (ret) { 2918 btf_free(btf); 2919 return ret; 2920 } 2921 2922 /* 2923 * The BTF ID is published to the userspace. 2924 * All BTF free must go through call_rcu() from 2925 * now on (i.e. free by calling btf_put()). 2926 */ 2927 2928 ret = __btf_new_fd(btf); 2929 if (ret < 0) 2930 btf_put(btf); 2931 2932 return ret; 2933 } 2934 2935 struct btf *btf_get_by_fd(int fd) 2936 { 2937 struct btf *btf; 2938 struct fd f; 2939 2940 f = fdget(fd); 2941 2942 if (!f.file) 2943 return ERR_PTR(-EBADF); 2944 2945 if (f.file->f_op != &btf_fops) { 2946 fdput(f); 2947 return ERR_PTR(-EINVAL); 2948 } 2949 2950 btf = f.file->private_data; 2951 refcount_inc(&btf->refcnt); 2952 fdput(f); 2953 2954 return btf; 2955 } 2956 2957 int btf_get_info_by_fd(const struct btf *btf, 2958 const union bpf_attr *attr, 2959 union bpf_attr __user *uattr) 2960 { 2961 struct bpf_btf_info __user *uinfo; 2962 struct bpf_btf_info info = {}; 2963 u32 info_copy, btf_copy; 2964 void __user *ubtf; 2965 u32 uinfo_len; 2966 2967 uinfo = u64_to_user_ptr(attr->info.info); 2968 uinfo_len = attr->info.info_len; 2969 2970 info_copy = min_t(u32, uinfo_len, sizeof(info)); 2971 if (copy_from_user(&info, uinfo, info_copy)) 2972 return -EFAULT; 2973 2974 info.id = btf->id; 2975 ubtf = u64_to_user_ptr(info.btf); 2976 btf_copy = min_t(u32, btf->data_size, info.btf_size); 2977 if (copy_to_user(ubtf, btf->data, btf_copy)) 2978 return -EFAULT; 2979 info.btf_size = btf->data_size; 2980 2981 if (copy_to_user(uinfo, &info, info_copy) || 2982 put_user(info_copy, &uattr->info.info_len)) 2983 return -EFAULT; 2984 2985 return 0; 2986 } 2987 2988 int btf_get_fd_by_id(u32 id) 2989 { 2990 struct btf *btf; 2991 int fd; 2992 2993 rcu_read_lock(); 2994 btf = idr_find(&btf_idr, id); 2995 if (!btf || !refcount_inc_not_zero(&btf->refcnt)) 2996 btf = ERR_PTR(-ENOENT); 2997 rcu_read_unlock(); 2998 2999 if (IS_ERR(btf)) 3000 return PTR_ERR(btf); 3001 3002 fd = __btf_new_fd(btf); 3003 if (fd < 0) 3004 btf_put(btf); 3005 3006 return fd; 3007 } 3008 3009 u32 btf_id(const struct btf *btf) 3010 { 3011 return btf->id; 3012 } 3013