1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018 Facebook */ 3 4 #include <uapi/linux/btf.h> 5 #include <uapi/linux/bpf.h> 6 #include <uapi/linux/bpf_perf_event.h> 7 #include <uapi/linux/types.h> 8 #include <linux/seq_file.h> 9 #include <linux/compiler.h> 10 #include <linux/ctype.h> 11 #include <linux/errno.h> 12 #include <linux/slab.h> 13 #include <linux/anon_inodes.h> 14 #include <linux/file.h> 15 #include <linux/uaccess.h> 16 #include <linux/kernel.h> 17 #include <linux/idr.h> 18 #include <linux/sort.h> 19 #include <linux/bpf_verifier.h> 20 #include <linux/btf.h> 21 #include <linux/btf_ids.h> 22 #include <linux/skmsg.h> 23 #include <linux/perf_event.h> 24 #include <linux/bsearch.h> 25 #include <linux/kobject.h> 26 #include <linux/sysfs.h> 27 #include <net/sock.h> 28 #include "../tools/lib/bpf/relo_core.h" 29 30 /* BTF (BPF Type Format) is the meta data format which describes 31 * the data types of BPF program/map. Hence, it basically focus 32 * on the C programming language which the modern BPF is primary 33 * using. 34 * 35 * ELF Section: 36 * ~~~~~~~~~~~ 37 * The BTF data is stored under the ".BTF" ELF section 38 * 39 * struct btf_type: 40 * ~~~~~~~~~~~~~~~ 41 * Each 'struct btf_type' object describes a C data type. 42 * Depending on the type it is describing, a 'struct btf_type' 43 * object may be followed by more data. F.e. 44 * To describe an array, 'struct btf_type' is followed by 45 * 'struct btf_array'. 46 * 47 * 'struct btf_type' and any extra data following it are 48 * 4 bytes aligned. 49 * 50 * Type section: 51 * ~~~~~~~~~~~~~ 52 * The BTF type section contains a list of 'struct btf_type' objects. 53 * Each one describes a C type. Recall from the above section 54 * that a 'struct btf_type' object could be immediately followed by extra 55 * data in order to describe some particular C types. 56 * 57 * type_id: 58 * ~~~~~~~ 59 * Each btf_type object is identified by a type_id. The type_id 60 * is implicitly implied by the location of the btf_type object in 61 * the BTF type section. The first one has type_id 1. The second 62 * one has type_id 2...etc. Hence, an earlier btf_type has 63 * a smaller type_id. 64 * 65 * A btf_type object may refer to another btf_type object by using 66 * type_id (i.e. the "type" in the "struct btf_type"). 67 * 68 * NOTE that we cannot assume any reference-order. 69 * A btf_type object can refer to an earlier btf_type object 70 * but it can also refer to a later btf_type object. 71 * 72 * For example, to describe "const void *". A btf_type 73 * object describing "const" may refer to another btf_type 74 * object describing "void *". This type-reference is done 75 * by specifying type_id: 76 * 77 * [1] CONST (anon) type_id=2 78 * [2] PTR (anon) type_id=0 79 * 80 * The above is the btf_verifier debug log: 81 * - Each line started with "[?]" is a btf_type object 82 * - [?] is the type_id of the btf_type object. 83 * - CONST/PTR is the BTF_KIND_XXX 84 * - "(anon)" is the name of the type. It just 85 * happens that CONST and PTR has no name. 86 * - type_id=XXX is the 'u32 type' in btf_type 87 * 88 * NOTE: "void" has type_id 0 89 * 90 * String section: 91 * ~~~~~~~~~~~~~~ 92 * The BTF string section contains the names used by the type section. 93 * Each string is referred by an "offset" from the beginning of the 94 * string section. 95 * 96 * Each string is '\0' terminated. 97 * 98 * The first character in the string section must be '\0' 99 * which is used to mean 'anonymous'. Some btf_type may not 100 * have a name. 101 */ 102 103 /* BTF verification: 104 * 105 * To verify BTF data, two passes are needed. 106 * 107 * Pass #1 108 * ~~~~~~~ 109 * The first pass is to collect all btf_type objects to 110 * an array: "btf->types". 111 * 112 * Depending on the C type that a btf_type is describing, 113 * a btf_type may be followed by extra data. We don't know 114 * how many btf_type is there, and more importantly we don't 115 * know where each btf_type is located in the type section. 116 * 117 * Without knowing the location of each type_id, most verifications 118 * cannot be done. e.g. an earlier btf_type may refer to a later 119 * btf_type (recall the "const void *" above), so we cannot 120 * check this type-reference in the first pass. 121 * 122 * In the first pass, it still does some verifications (e.g. 123 * checking the name is a valid offset to the string section). 124 * 125 * Pass #2 126 * ~~~~~~~ 127 * The main focus is to resolve a btf_type that is referring 128 * to another type. 129 * 130 * We have to ensure the referring type: 131 * 1) does exist in the BTF (i.e. in btf->types[]) 132 * 2) does not cause a loop: 133 * struct A { 134 * struct B b; 135 * }; 136 * 137 * struct B { 138 * struct A a; 139 * }; 140 * 141 * btf_type_needs_resolve() decides if a btf_type needs 142 * to be resolved. 143 * 144 * The needs_resolve type implements the "resolve()" ops which 145 * essentially does a DFS and detects backedge. 146 * 147 * During resolve (or DFS), different C types have different 148 * "RESOLVED" conditions. 149 * 150 * When resolving a BTF_KIND_STRUCT, we need to resolve all its 151 * members because a member is always referring to another 152 * type. A struct's member can be treated as "RESOLVED" if 153 * it is referring to a BTF_KIND_PTR. Otherwise, the 154 * following valid C struct would be rejected: 155 * 156 * struct A { 157 * int m; 158 * struct A *a; 159 * }; 160 * 161 * When resolving a BTF_KIND_PTR, it needs to keep resolving if 162 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot 163 * detect a pointer loop, e.g.: 164 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR + 165 * ^ | 166 * +-----------------------------------------+ 167 * 168 */ 169 170 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2) 171 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1) 172 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK) 173 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3) 174 #define BITS_ROUNDUP_BYTES(bits) \ 175 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits)) 176 177 #define BTF_INFO_MASK 0x9f00ffff 178 #define BTF_INT_MASK 0x0fffffff 179 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE) 180 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET) 181 182 /* 16MB for 64k structs and each has 16 members and 183 * a few MB spaces for the string section. 184 * The hard limit is S32_MAX. 185 */ 186 #define BTF_MAX_SIZE (16 * 1024 * 1024) 187 188 #define for_each_member_from(i, from, struct_type, member) \ 189 for (i = from, member = btf_type_member(struct_type) + from; \ 190 i < btf_type_vlen(struct_type); \ 191 i++, member++) 192 193 #define for_each_vsi_from(i, from, struct_type, member) \ 194 for (i = from, member = btf_type_var_secinfo(struct_type) + from; \ 195 i < btf_type_vlen(struct_type); \ 196 i++, member++) 197 198 DEFINE_IDR(btf_idr); 199 DEFINE_SPINLOCK(btf_idr_lock); 200 201 enum btf_kfunc_hook { 202 BTF_KFUNC_HOOK_XDP, 203 BTF_KFUNC_HOOK_TC, 204 BTF_KFUNC_HOOK_STRUCT_OPS, 205 BTF_KFUNC_HOOK_MAX, 206 }; 207 208 enum { 209 BTF_KFUNC_SET_MAX_CNT = 32, 210 }; 211 212 struct btf_kfunc_set_tab { 213 struct btf_id_set *sets[BTF_KFUNC_HOOK_MAX][BTF_KFUNC_TYPE_MAX]; 214 }; 215 216 struct btf { 217 void *data; 218 struct btf_type **types; 219 u32 *resolved_ids; 220 u32 *resolved_sizes; 221 const char *strings; 222 void *nohdr_data; 223 struct btf_header hdr; 224 u32 nr_types; /* includes VOID for base BTF */ 225 u32 types_size; 226 u32 data_size; 227 refcount_t refcnt; 228 u32 id; 229 struct rcu_head rcu; 230 struct btf_kfunc_set_tab *kfunc_set_tab; 231 232 /* split BTF support */ 233 struct btf *base_btf; 234 u32 start_id; /* first type ID in this BTF (0 for base BTF) */ 235 u32 start_str_off; /* first string offset (0 for base BTF) */ 236 char name[MODULE_NAME_LEN]; 237 bool kernel_btf; 238 }; 239 240 enum verifier_phase { 241 CHECK_META, 242 CHECK_TYPE, 243 }; 244 245 struct resolve_vertex { 246 const struct btf_type *t; 247 u32 type_id; 248 u16 next_member; 249 }; 250 251 enum visit_state { 252 NOT_VISITED, 253 VISITED, 254 RESOLVED, 255 }; 256 257 enum resolve_mode { 258 RESOLVE_TBD, /* To Be Determined */ 259 RESOLVE_PTR, /* Resolving for Pointer */ 260 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union 261 * or array 262 */ 263 }; 264 265 #define MAX_RESOLVE_DEPTH 32 266 267 struct btf_sec_info { 268 u32 off; 269 u32 len; 270 }; 271 272 struct btf_verifier_env { 273 struct btf *btf; 274 u8 *visit_states; 275 struct resolve_vertex stack[MAX_RESOLVE_DEPTH]; 276 struct bpf_verifier_log log; 277 u32 log_type_id; 278 u32 top_stack; 279 enum verifier_phase phase; 280 enum resolve_mode resolve_mode; 281 }; 282 283 static const char * const btf_kind_str[NR_BTF_KINDS] = { 284 [BTF_KIND_UNKN] = "UNKNOWN", 285 [BTF_KIND_INT] = "INT", 286 [BTF_KIND_PTR] = "PTR", 287 [BTF_KIND_ARRAY] = "ARRAY", 288 [BTF_KIND_STRUCT] = "STRUCT", 289 [BTF_KIND_UNION] = "UNION", 290 [BTF_KIND_ENUM] = "ENUM", 291 [BTF_KIND_FWD] = "FWD", 292 [BTF_KIND_TYPEDEF] = "TYPEDEF", 293 [BTF_KIND_VOLATILE] = "VOLATILE", 294 [BTF_KIND_CONST] = "CONST", 295 [BTF_KIND_RESTRICT] = "RESTRICT", 296 [BTF_KIND_FUNC] = "FUNC", 297 [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO", 298 [BTF_KIND_VAR] = "VAR", 299 [BTF_KIND_DATASEC] = "DATASEC", 300 [BTF_KIND_FLOAT] = "FLOAT", 301 [BTF_KIND_DECL_TAG] = "DECL_TAG", 302 [BTF_KIND_TYPE_TAG] = "TYPE_TAG", 303 }; 304 305 const char *btf_type_str(const struct btf_type *t) 306 { 307 return btf_kind_str[BTF_INFO_KIND(t->info)]; 308 } 309 310 /* Chunk size we use in safe copy of data to be shown. */ 311 #define BTF_SHOW_OBJ_SAFE_SIZE 32 312 313 /* 314 * This is the maximum size of a base type value (equivalent to a 315 * 128-bit int); if we are at the end of our safe buffer and have 316 * less than 16 bytes space we can't be assured of being able 317 * to copy the next type safely, so in such cases we will initiate 318 * a new copy. 319 */ 320 #define BTF_SHOW_OBJ_BASE_TYPE_SIZE 16 321 322 /* Type name size */ 323 #define BTF_SHOW_NAME_SIZE 80 324 325 /* 326 * Common data to all BTF show operations. Private show functions can add 327 * their own data to a structure containing a struct btf_show and consult it 328 * in the show callback. See btf_type_show() below. 329 * 330 * One challenge with showing nested data is we want to skip 0-valued 331 * data, but in order to figure out whether a nested object is all zeros 332 * we need to walk through it. As a result, we need to make two passes 333 * when handling structs, unions and arrays; the first path simply looks 334 * for nonzero data, while the second actually does the display. The first 335 * pass is signalled by show->state.depth_check being set, and if we 336 * encounter a non-zero value we set show->state.depth_to_show to 337 * the depth at which we encountered it. When we have completed the 338 * first pass, we will know if anything needs to be displayed if 339 * depth_to_show > depth. See btf_[struct,array]_show() for the 340 * implementation of this. 341 * 342 * Another problem is we want to ensure the data for display is safe to 343 * access. To support this, the anonymous "struct {} obj" tracks the data 344 * object and our safe copy of it. We copy portions of the data needed 345 * to the object "copy" buffer, but because its size is limited to 346 * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we 347 * traverse larger objects for display. 348 * 349 * The various data type show functions all start with a call to 350 * btf_show_start_type() which returns a pointer to the safe copy 351 * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the 352 * raw data itself). btf_show_obj_safe() is responsible for 353 * using copy_from_kernel_nofault() to update the safe data if necessary 354 * as we traverse the object's data. skbuff-like semantics are 355 * used: 356 * 357 * - obj.head points to the start of the toplevel object for display 358 * - obj.size is the size of the toplevel object 359 * - obj.data points to the current point in the original data at 360 * which our safe data starts. obj.data will advance as we copy 361 * portions of the data. 362 * 363 * In most cases a single copy will suffice, but larger data structures 364 * such as "struct task_struct" will require many copies. The logic in 365 * btf_show_obj_safe() handles the logic that determines if a new 366 * copy_from_kernel_nofault() is needed. 367 */ 368 struct btf_show { 369 u64 flags; 370 void *target; /* target of show operation (seq file, buffer) */ 371 void (*showfn)(struct btf_show *show, const char *fmt, va_list args); 372 const struct btf *btf; 373 /* below are used during iteration */ 374 struct { 375 u8 depth; 376 u8 depth_to_show; 377 u8 depth_check; 378 u8 array_member:1, 379 array_terminated:1; 380 u16 array_encoding; 381 u32 type_id; 382 int status; /* non-zero for error */ 383 const struct btf_type *type; 384 const struct btf_member *member; 385 char name[BTF_SHOW_NAME_SIZE]; /* space for member name/type */ 386 } state; 387 struct { 388 u32 size; 389 void *head; 390 void *data; 391 u8 safe[BTF_SHOW_OBJ_SAFE_SIZE]; 392 } obj; 393 }; 394 395 struct btf_kind_operations { 396 s32 (*check_meta)(struct btf_verifier_env *env, 397 const struct btf_type *t, 398 u32 meta_left); 399 int (*resolve)(struct btf_verifier_env *env, 400 const struct resolve_vertex *v); 401 int (*check_member)(struct btf_verifier_env *env, 402 const struct btf_type *struct_type, 403 const struct btf_member *member, 404 const struct btf_type *member_type); 405 int (*check_kflag_member)(struct btf_verifier_env *env, 406 const struct btf_type *struct_type, 407 const struct btf_member *member, 408 const struct btf_type *member_type); 409 void (*log_details)(struct btf_verifier_env *env, 410 const struct btf_type *t); 411 void (*show)(const struct btf *btf, const struct btf_type *t, 412 u32 type_id, void *data, u8 bits_offsets, 413 struct btf_show *show); 414 }; 415 416 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS]; 417 static struct btf_type btf_void; 418 419 static int btf_resolve(struct btf_verifier_env *env, 420 const struct btf_type *t, u32 type_id); 421 422 static int btf_func_check(struct btf_verifier_env *env, 423 const struct btf_type *t); 424 425 static bool btf_type_is_modifier(const struct btf_type *t) 426 { 427 /* Some of them is not strictly a C modifier 428 * but they are grouped into the same bucket 429 * for BTF concern: 430 * A type (t) that refers to another 431 * type through t->type AND its size cannot 432 * be determined without following the t->type. 433 * 434 * ptr does not fall into this bucket 435 * because its size is always sizeof(void *). 436 */ 437 switch (BTF_INFO_KIND(t->info)) { 438 case BTF_KIND_TYPEDEF: 439 case BTF_KIND_VOLATILE: 440 case BTF_KIND_CONST: 441 case BTF_KIND_RESTRICT: 442 case BTF_KIND_TYPE_TAG: 443 return true; 444 } 445 446 return false; 447 } 448 449 bool btf_type_is_void(const struct btf_type *t) 450 { 451 return t == &btf_void; 452 } 453 454 static bool btf_type_is_fwd(const struct btf_type *t) 455 { 456 return BTF_INFO_KIND(t->info) == BTF_KIND_FWD; 457 } 458 459 static bool btf_type_nosize(const struct btf_type *t) 460 { 461 return btf_type_is_void(t) || btf_type_is_fwd(t) || 462 btf_type_is_func(t) || btf_type_is_func_proto(t); 463 } 464 465 static bool btf_type_nosize_or_null(const struct btf_type *t) 466 { 467 return !t || btf_type_nosize(t); 468 } 469 470 static bool __btf_type_is_struct(const struct btf_type *t) 471 { 472 return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT; 473 } 474 475 static bool btf_type_is_array(const struct btf_type *t) 476 { 477 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY; 478 } 479 480 static bool btf_type_is_datasec(const struct btf_type *t) 481 { 482 return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC; 483 } 484 485 static bool btf_type_is_decl_tag(const struct btf_type *t) 486 { 487 return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG; 488 } 489 490 static bool btf_type_is_decl_tag_target(const struct btf_type *t) 491 { 492 return btf_type_is_func(t) || btf_type_is_struct(t) || 493 btf_type_is_var(t) || btf_type_is_typedef(t); 494 } 495 496 u32 btf_nr_types(const struct btf *btf) 497 { 498 u32 total = 0; 499 500 while (btf) { 501 total += btf->nr_types; 502 btf = btf->base_btf; 503 } 504 505 return total; 506 } 507 508 s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind) 509 { 510 const struct btf_type *t; 511 const char *tname; 512 u32 i, total; 513 514 total = btf_nr_types(btf); 515 for (i = 1; i < total; i++) { 516 t = btf_type_by_id(btf, i); 517 if (BTF_INFO_KIND(t->info) != kind) 518 continue; 519 520 tname = btf_name_by_offset(btf, t->name_off); 521 if (!strcmp(tname, name)) 522 return i; 523 } 524 525 return -ENOENT; 526 } 527 528 static s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p) 529 { 530 struct btf *btf; 531 s32 ret; 532 int id; 533 534 btf = bpf_get_btf_vmlinux(); 535 if (IS_ERR(btf)) 536 return PTR_ERR(btf); 537 if (!btf) 538 return -EINVAL; 539 540 ret = btf_find_by_name_kind(btf, name, kind); 541 /* ret is never zero, since btf_find_by_name_kind returns 542 * positive btf_id or negative error. 543 */ 544 if (ret > 0) { 545 btf_get(btf); 546 *btf_p = btf; 547 return ret; 548 } 549 550 /* If name is not found in vmlinux's BTF then search in module's BTFs */ 551 spin_lock_bh(&btf_idr_lock); 552 idr_for_each_entry(&btf_idr, btf, id) { 553 if (!btf_is_module(btf)) 554 continue; 555 /* linear search could be slow hence unlock/lock 556 * the IDR to avoiding holding it for too long 557 */ 558 btf_get(btf); 559 spin_unlock_bh(&btf_idr_lock); 560 ret = btf_find_by_name_kind(btf, name, kind); 561 if (ret > 0) { 562 *btf_p = btf; 563 return ret; 564 } 565 spin_lock_bh(&btf_idr_lock); 566 btf_put(btf); 567 } 568 spin_unlock_bh(&btf_idr_lock); 569 return ret; 570 } 571 572 const struct btf_type *btf_type_skip_modifiers(const struct btf *btf, 573 u32 id, u32 *res_id) 574 { 575 const struct btf_type *t = btf_type_by_id(btf, id); 576 577 while (btf_type_is_modifier(t)) { 578 id = t->type; 579 t = btf_type_by_id(btf, t->type); 580 } 581 582 if (res_id) 583 *res_id = id; 584 585 return t; 586 } 587 588 const struct btf_type *btf_type_resolve_ptr(const struct btf *btf, 589 u32 id, u32 *res_id) 590 { 591 const struct btf_type *t; 592 593 t = btf_type_skip_modifiers(btf, id, NULL); 594 if (!btf_type_is_ptr(t)) 595 return NULL; 596 597 return btf_type_skip_modifiers(btf, t->type, res_id); 598 } 599 600 const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf, 601 u32 id, u32 *res_id) 602 { 603 const struct btf_type *ptype; 604 605 ptype = btf_type_resolve_ptr(btf, id, res_id); 606 if (ptype && btf_type_is_func_proto(ptype)) 607 return ptype; 608 609 return NULL; 610 } 611 612 /* Types that act only as a source, not sink or intermediate 613 * type when resolving. 614 */ 615 static bool btf_type_is_resolve_source_only(const struct btf_type *t) 616 { 617 return btf_type_is_var(t) || 618 btf_type_is_decl_tag(t) || 619 btf_type_is_datasec(t); 620 } 621 622 /* What types need to be resolved? 623 * 624 * btf_type_is_modifier() is an obvious one. 625 * 626 * btf_type_is_struct() because its member refers to 627 * another type (through member->type). 628 * 629 * btf_type_is_var() because the variable refers to 630 * another type. btf_type_is_datasec() holds multiple 631 * btf_type_is_var() types that need resolving. 632 * 633 * btf_type_is_array() because its element (array->type) 634 * refers to another type. Array can be thought of a 635 * special case of struct while array just has the same 636 * member-type repeated by array->nelems of times. 637 */ 638 static bool btf_type_needs_resolve(const struct btf_type *t) 639 { 640 return btf_type_is_modifier(t) || 641 btf_type_is_ptr(t) || 642 btf_type_is_struct(t) || 643 btf_type_is_array(t) || 644 btf_type_is_var(t) || 645 btf_type_is_func(t) || 646 btf_type_is_decl_tag(t) || 647 btf_type_is_datasec(t); 648 } 649 650 /* t->size can be used */ 651 static bool btf_type_has_size(const struct btf_type *t) 652 { 653 switch (BTF_INFO_KIND(t->info)) { 654 case BTF_KIND_INT: 655 case BTF_KIND_STRUCT: 656 case BTF_KIND_UNION: 657 case BTF_KIND_ENUM: 658 case BTF_KIND_DATASEC: 659 case BTF_KIND_FLOAT: 660 return true; 661 } 662 663 return false; 664 } 665 666 static const char *btf_int_encoding_str(u8 encoding) 667 { 668 if (encoding == 0) 669 return "(none)"; 670 else if (encoding == BTF_INT_SIGNED) 671 return "SIGNED"; 672 else if (encoding == BTF_INT_CHAR) 673 return "CHAR"; 674 else if (encoding == BTF_INT_BOOL) 675 return "BOOL"; 676 else 677 return "UNKN"; 678 } 679 680 static u32 btf_type_int(const struct btf_type *t) 681 { 682 return *(u32 *)(t + 1); 683 } 684 685 static const struct btf_array *btf_type_array(const struct btf_type *t) 686 { 687 return (const struct btf_array *)(t + 1); 688 } 689 690 static const struct btf_enum *btf_type_enum(const struct btf_type *t) 691 { 692 return (const struct btf_enum *)(t + 1); 693 } 694 695 static const struct btf_var *btf_type_var(const struct btf_type *t) 696 { 697 return (const struct btf_var *)(t + 1); 698 } 699 700 static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t) 701 { 702 return (const struct btf_decl_tag *)(t + 1); 703 } 704 705 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) 706 { 707 return kind_ops[BTF_INFO_KIND(t->info)]; 708 } 709 710 static bool btf_name_offset_valid(const struct btf *btf, u32 offset) 711 { 712 if (!BTF_STR_OFFSET_VALID(offset)) 713 return false; 714 715 while (offset < btf->start_str_off) 716 btf = btf->base_btf; 717 718 offset -= btf->start_str_off; 719 return offset < btf->hdr.str_len; 720 } 721 722 static bool __btf_name_char_ok(char c, bool first, bool dot_ok) 723 { 724 if ((first ? !isalpha(c) : 725 !isalnum(c)) && 726 c != '_' && 727 ((c == '.' && !dot_ok) || 728 c != '.')) 729 return false; 730 return true; 731 } 732 733 static const char *btf_str_by_offset(const struct btf *btf, u32 offset) 734 { 735 while (offset < btf->start_str_off) 736 btf = btf->base_btf; 737 738 offset -= btf->start_str_off; 739 if (offset < btf->hdr.str_len) 740 return &btf->strings[offset]; 741 742 return NULL; 743 } 744 745 static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok) 746 { 747 /* offset must be valid */ 748 const char *src = btf_str_by_offset(btf, offset); 749 const char *src_limit; 750 751 if (!__btf_name_char_ok(*src, true, dot_ok)) 752 return false; 753 754 /* set a limit on identifier length */ 755 src_limit = src + KSYM_NAME_LEN; 756 src++; 757 while (*src && src < src_limit) { 758 if (!__btf_name_char_ok(*src, false, dot_ok)) 759 return false; 760 src++; 761 } 762 763 return !*src; 764 } 765 766 /* Only C-style identifier is permitted. This can be relaxed if 767 * necessary. 768 */ 769 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset) 770 { 771 return __btf_name_valid(btf, offset, false); 772 } 773 774 static bool btf_name_valid_section(const struct btf *btf, u32 offset) 775 { 776 return __btf_name_valid(btf, offset, true); 777 } 778 779 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset) 780 { 781 const char *name; 782 783 if (!offset) 784 return "(anon)"; 785 786 name = btf_str_by_offset(btf, offset); 787 return name ?: "(invalid-name-offset)"; 788 } 789 790 const char *btf_name_by_offset(const struct btf *btf, u32 offset) 791 { 792 return btf_str_by_offset(btf, offset); 793 } 794 795 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) 796 { 797 while (type_id < btf->start_id) 798 btf = btf->base_btf; 799 800 type_id -= btf->start_id; 801 if (type_id >= btf->nr_types) 802 return NULL; 803 return btf->types[type_id]; 804 } 805 806 /* 807 * Regular int is not a bit field and it must be either 808 * u8/u16/u32/u64 or __int128. 809 */ 810 static bool btf_type_int_is_regular(const struct btf_type *t) 811 { 812 u8 nr_bits, nr_bytes; 813 u32 int_data; 814 815 int_data = btf_type_int(t); 816 nr_bits = BTF_INT_BITS(int_data); 817 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits); 818 if (BITS_PER_BYTE_MASKED(nr_bits) || 819 BTF_INT_OFFSET(int_data) || 820 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) && 821 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) && 822 nr_bytes != (2 * sizeof(u64)))) { 823 return false; 824 } 825 826 return true; 827 } 828 829 /* 830 * Check that given struct member is a regular int with expected 831 * offset and size. 832 */ 833 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, 834 const struct btf_member *m, 835 u32 expected_offset, u32 expected_size) 836 { 837 const struct btf_type *t; 838 u32 id, int_data; 839 u8 nr_bits; 840 841 id = m->type; 842 t = btf_type_id_size(btf, &id, NULL); 843 if (!t || !btf_type_is_int(t)) 844 return false; 845 846 int_data = btf_type_int(t); 847 nr_bits = BTF_INT_BITS(int_data); 848 if (btf_type_kflag(s)) { 849 u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset); 850 u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset); 851 852 /* if kflag set, int should be a regular int and 853 * bit offset should be at byte boundary. 854 */ 855 return !bitfield_size && 856 BITS_ROUNDUP_BYTES(bit_offset) == expected_offset && 857 BITS_ROUNDUP_BYTES(nr_bits) == expected_size; 858 } 859 860 if (BTF_INT_OFFSET(int_data) || 861 BITS_PER_BYTE_MASKED(m->offset) || 862 BITS_ROUNDUP_BYTES(m->offset) != expected_offset || 863 BITS_PER_BYTE_MASKED(nr_bits) || 864 BITS_ROUNDUP_BYTES(nr_bits) != expected_size) 865 return false; 866 867 return true; 868 } 869 870 /* Similar to btf_type_skip_modifiers() but does not skip typedefs. */ 871 static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf, 872 u32 id) 873 { 874 const struct btf_type *t = btf_type_by_id(btf, id); 875 876 while (btf_type_is_modifier(t) && 877 BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) { 878 t = btf_type_by_id(btf, t->type); 879 } 880 881 return t; 882 } 883 884 #define BTF_SHOW_MAX_ITER 10 885 886 #define BTF_KIND_BIT(kind) (1ULL << kind) 887 888 /* 889 * Populate show->state.name with type name information. 890 * Format of type name is 891 * 892 * [.member_name = ] (type_name) 893 */ 894 static const char *btf_show_name(struct btf_show *show) 895 { 896 /* BTF_MAX_ITER array suffixes "[]" */ 897 const char *array_suffixes = "[][][][][][][][][][]"; 898 const char *array_suffix = &array_suffixes[strlen(array_suffixes)]; 899 /* BTF_MAX_ITER pointer suffixes "*" */ 900 const char *ptr_suffixes = "**********"; 901 const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)]; 902 const char *name = NULL, *prefix = "", *parens = ""; 903 const struct btf_member *m = show->state.member; 904 const struct btf_type *t; 905 const struct btf_array *array; 906 u32 id = show->state.type_id; 907 const char *member = NULL; 908 bool show_member = false; 909 u64 kinds = 0; 910 int i; 911 912 show->state.name[0] = '\0'; 913 914 /* 915 * Don't show type name if we're showing an array member; 916 * in that case we show the array type so don't need to repeat 917 * ourselves for each member. 918 */ 919 if (show->state.array_member) 920 return ""; 921 922 /* Retrieve member name, if any. */ 923 if (m) { 924 member = btf_name_by_offset(show->btf, m->name_off); 925 show_member = strlen(member) > 0; 926 id = m->type; 927 } 928 929 /* 930 * Start with type_id, as we have resolved the struct btf_type * 931 * via btf_modifier_show() past the parent typedef to the child 932 * struct, int etc it is defined as. In such cases, the type_id 933 * still represents the starting type while the struct btf_type * 934 * in our show->state points at the resolved type of the typedef. 935 */ 936 t = btf_type_by_id(show->btf, id); 937 if (!t) 938 return ""; 939 940 /* 941 * The goal here is to build up the right number of pointer and 942 * array suffixes while ensuring the type name for a typedef 943 * is represented. Along the way we accumulate a list of 944 * BTF kinds we have encountered, since these will inform later 945 * display; for example, pointer types will not require an 946 * opening "{" for struct, we will just display the pointer value. 947 * 948 * We also want to accumulate the right number of pointer or array 949 * indices in the format string while iterating until we get to 950 * the typedef/pointee/array member target type. 951 * 952 * We start by pointing at the end of pointer and array suffix 953 * strings; as we accumulate pointers and arrays we move the pointer 954 * or array string backwards so it will show the expected number of 955 * '*' or '[]' for the type. BTF_SHOW_MAX_ITER of nesting of pointers 956 * and/or arrays and typedefs are supported as a precaution. 957 * 958 * We also want to get typedef name while proceeding to resolve 959 * type it points to so that we can add parentheses if it is a 960 * "typedef struct" etc. 961 */ 962 for (i = 0; i < BTF_SHOW_MAX_ITER; i++) { 963 964 switch (BTF_INFO_KIND(t->info)) { 965 case BTF_KIND_TYPEDEF: 966 if (!name) 967 name = btf_name_by_offset(show->btf, 968 t->name_off); 969 kinds |= BTF_KIND_BIT(BTF_KIND_TYPEDEF); 970 id = t->type; 971 break; 972 case BTF_KIND_ARRAY: 973 kinds |= BTF_KIND_BIT(BTF_KIND_ARRAY); 974 parens = "["; 975 if (!t) 976 return ""; 977 array = btf_type_array(t); 978 if (array_suffix > array_suffixes) 979 array_suffix -= 2; 980 id = array->type; 981 break; 982 case BTF_KIND_PTR: 983 kinds |= BTF_KIND_BIT(BTF_KIND_PTR); 984 if (ptr_suffix > ptr_suffixes) 985 ptr_suffix -= 1; 986 id = t->type; 987 break; 988 default: 989 id = 0; 990 break; 991 } 992 if (!id) 993 break; 994 t = btf_type_skip_qualifiers(show->btf, id); 995 } 996 /* We may not be able to represent this type; bail to be safe */ 997 if (i == BTF_SHOW_MAX_ITER) 998 return ""; 999 1000 if (!name) 1001 name = btf_name_by_offset(show->btf, t->name_off); 1002 1003 switch (BTF_INFO_KIND(t->info)) { 1004 case BTF_KIND_STRUCT: 1005 case BTF_KIND_UNION: 1006 prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ? 1007 "struct" : "union"; 1008 /* if it's an array of struct/union, parens is already set */ 1009 if (!(kinds & (BTF_KIND_BIT(BTF_KIND_ARRAY)))) 1010 parens = "{"; 1011 break; 1012 case BTF_KIND_ENUM: 1013 prefix = "enum"; 1014 break; 1015 default: 1016 break; 1017 } 1018 1019 /* pointer does not require parens */ 1020 if (kinds & BTF_KIND_BIT(BTF_KIND_PTR)) 1021 parens = ""; 1022 /* typedef does not require struct/union/enum prefix */ 1023 if (kinds & BTF_KIND_BIT(BTF_KIND_TYPEDEF)) 1024 prefix = ""; 1025 1026 if (!name) 1027 name = ""; 1028 1029 /* Even if we don't want type name info, we want parentheses etc */ 1030 if (show->flags & BTF_SHOW_NONAME) 1031 snprintf(show->state.name, sizeof(show->state.name), "%s", 1032 parens); 1033 else 1034 snprintf(show->state.name, sizeof(show->state.name), 1035 "%s%s%s(%s%s%s%s%s%s)%s", 1036 /* first 3 strings comprise ".member = " */ 1037 show_member ? "." : "", 1038 show_member ? member : "", 1039 show_member ? " = " : "", 1040 /* ...next is our prefix (struct, enum, etc) */ 1041 prefix, 1042 strlen(prefix) > 0 && strlen(name) > 0 ? " " : "", 1043 /* ...this is the type name itself */ 1044 name, 1045 /* ...suffixed by the appropriate '*', '[]' suffixes */ 1046 strlen(ptr_suffix) > 0 ? " " : "", ptr_suffix, 1047 array_suffix, parens); 1048 1049 return show->state.name; 1050 } 1051 1052 static const char *__btf_show_indent(struct btf_show *show) 1053 { 1054 const char *indents = " "; 1055 const char *indent = &indents[strlen(indents)]; 1056 1057 if ((indent - show->state.depth) >= indents) 1058 return indent - show->state.depth; 1059 return indents; 1060 } 1061 1062 static const char *btf_show_indent(struct btf_show *show) 1063 { 1064 return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show); 1065 } 1066 1067 static const char *btf_show_newline(struct btf_show *show) 1068 { 1069 return show->flags & BTF_SHOW_COMPACT ? "" : "\n"; 1070 } 1071 1072 static const char *btf_show_delim(struct btf_show *show) 1073 { 1074 if (show->state.depth == 0) 1075 return ""; 1076 1077 if ((show->flags & BTF_SHOW_COMPACT) && show->state.type && 1078 BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION) 1079 return "|"; 1080 1081 return ","; 1082 } 1083 1084 __printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...) 1085 { 1086 va_list args; 1087 1088 if (!show->state.depth_check) { 1089 va_start(args, fmt); 1090 show->showfn(show, fmt, args); 1091 va_end(args); 1092 } 1093 } 1094 1095 /* Macros are used here as btf_show_type_value[s]() prepends and appends 1096 * format specifiers to the format specifier passed in; these do the work of 1097 * adding indentation, delimiters etc while the caller simply has to specify 1098 * the type value(s) in the format specifier + value(s). 1099 */ 1100 #define btf_show_type_value(show, fmt, value) \ 1101 do { \ 1102 if ((value) != 0 || (show->flags & BTF_SHOW_ZERO) || \ 1103 show->state.depth == 0) { \ 1104 btf_show(show, "%s%s" fmt "%s%s", \ 1105 btf_show_indent(show), \ 1106 btf_show_name(show), \ 1107 value, btf_show_delim(show), \ 1108 btf_show_newline(show)); \ 1109 if (show->state.depth > show->state.depth_to_show) \ 1110 show->state.depth_to_show = show->state.depth; \ 1111 } \ 1112 } while (0) 1113 1114 #define btf_show_type_values(show, fmt, ...) \ 1115 do { \ 1116 btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show), \ 1117 btf_show_name(show), \ 1118 __VA_ARGS__, btf_show_delim(show), \ 1119 btf_show_newline(show)); \ 1120 if (show->state.depth > show->state.depth_to_show) \ 1121 show->state.depth_to_show = show->state.depth; \ 1122 } while (0) 1123 1124 /* How much is left to copy to safe buffer after @data? */ 1125 static int btf_show_obj_size_left(struct btf_show *show, void *data) 1126 { 1127 return show->obj.head + show->obj.size - data; 1128 } 1129 1130 /* Is object pointed to by @data of @size already copied to our safe buffer? */ 1131 static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size) 1132 { 1133 return data >= show->obj.data && 1134 (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE); 1135 } 1136 1137 /* 1138 * If object pointed to by @data of @size falls within our safe buffer, return 1139 * the equivalent pointer to the same safe data. Assumes 1140 * copy_from_kernel_nofault() has already happened and our safe buffer is 1141 * populated. 1142 */ 1143 static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size) 1144 { 1145 if (btf_show_obj_is_safe(show, data, size)) 1146 return show->obj.safe + (data - show->obj.data); 1147 return NULL; 1148 } 1149 1150 /* 1151 * Return a safe-to-access version of data pointed to by @data. 1152 * We do this by copying the relevant amount of information 1153 * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault(). 1154 * 1155 * If BTF_SHOW_UNSAFE is specified, just return data as-is; no 1156 * safe copy is needed. 1157 * 1158 * Otherwise we need to determine if we have the required amount 1159 * of data (determined by the @data pointer and the size of the 1160 * largest base type we can encounter (represented by 1161 * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures 1162 * that we will be able to print some of the current object, 1163 * and if more is needed a copy will be triggered. 1164 * Some objects such as structs will not fit into the buffer; 1165 * in such cases additional copies when we iterate over their 1166 * members may be needed. 1167 * 1168 * btf_show_obj_safe() is used to return a safe buffer for 1169 * btf_show_start_type(); this ensures that as we recurse into 1170 * nested types we always have safe data for the given type. 1171 * This approach is somewhat wasteful; it's possible for example 1172 * that when iterating over a large union we'll end up copying the 1173 * same data repeatedly, but the goal is safety not performance. 1174 * We use stack data as opposed to per-CPU buffers because the 1175 * iteration over a type can take some time, and preemption handling 1176 * would greatly complicate use of the safe buffer. 1177 */ 1178 static void *btf_show_obj_safe(struct btf_show *show, 1179 const struct btf_type *t, 1180 void *data) 1181 { 1182 const struct btf_type *rt; 1183 int size_left, size; 1184 void *safe = NULL; 1185 1186 if (show->flags & BTF_SHOW_UNSAFE) 1187 return data; 1188 1189 rt = btf_resolve_size(show->btf, t, &size); 1190 if (IS_ERR(rt)) { 1191 show->state.status = PTR_ERR(rt); 1192 return NULL; 1193 } 1194 1195 /* 1196 * Is this toplevel object? If so, set total object size and 1197 * initialize pointers. Otherwise check if we still fall within 1198 * our safe object data. 1199 */ 1200 if (show->state.depth == 0) { 1201 show->obj.size = size; 1202 show->obj.head = data; 1203 } else { 1204 /* 1205 * If the size of the current object is > our remaining 1206 * safe buffer we _may_ need to do a new copy. However 1207 * consider the case of a nested struct; it's size pushes 1208 * us over the safe buffer limit, but showing any individual 1209 * struct members does not. In such cases, we don't need 1210 * to initiate a fresh copy yet; however we definitely need 1211 * at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left 1212 * in our buffer, regardless of the current object size. 1213 * The logic here is that as we resolve types we will 1214 * hit a base type at some point, and we need to be sure 1215 * the next chunk of data is safely available to display 1216 * that type info safely. We cannot rely on the size of 1217 * the current object here because it may be much larger 1218 * than our current buffer (e.g. task_struct is 8k). 1219 * All we want to do here is ensure that we can print the 1220 * next basic type, which we can if either 1221 * - the current type size is within the safe buffer; or 1222 * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in 1223 * the safe buffer. 1224 */ 1225 safe = __btf_show_obj_safe(show, data, 1226 min(size, 1227 BTF_SHOW_OBJ_BASE_TYPE_SIZE)); 1228 } 1229 1230 /* 1231 * We need a new copy to our safe object, either because we haven't 1232 * yet copied and are initializing safe data, or because the data 1233 * we want falls outside the boundaries of the safe object. 1234 */ 1235 if (!safe) { 1236 size_left = btf_show_obj_size_left(show, data); 1237 if (size_left > BTF_SHOW_OBJ_SAFE_SIZE) 1238 size_left = BTF_SHOW_OBJ_SAFE_SIZE; 1239 show->state.status = copy_from_kernel_nofault(show->obj.safe, 1240 data, size_left); 1241 if (!show->state.status) { 1242 show->obj.data = data; 1243 safe = show->obj.safe; 1244 } 1245 } 1246 1247 return safe; 1248 } 1249 1250 /* 1251 * Set the type we are starting to show and return a safe data pointer 1252 * to be used for showing the associated data. 1253 */ 1254 static void *btf_show_start_type(struct btf_show *show, 1255 const struct btf_type *t, 1256 u32 type_id, void *data) 1257 { 1258 show->state.type = t; 1259 show->state.type_id = type_id; 1260 show->state.name[0] = '\0'; 1261 1262 return btf_show_obj_safe(show, t, data); 1263 } 1264 1265 static void btf_show_end_type(struct btf_show *show) 1266 { 1267 show->state.type = NULL; 1268 show->state.type_id = 0; 1269 show->state.name[0] = '\0'; 1270 } 1271 1272 static void *btf_show_start_aggr_type(struct btf_show *show, 1273 const struct btf_type *t, 1274 u32 type_id, void *data) 1275 { 1276 void *safe_data = btf_show_start_type(show, t, type_id, data); 1277 1278 if (!safe_data) 1279 return safe_data; 1280 1281 btf_show(show, "%s%s%s", btf_show_indent(show), 1282 btf_show_name(show), 1283 btf_show_newline(show)); 1284 show->state.depth++; 1285 return safe_data; 1286 } 1287 1288 static void btf_show_end_aggr_type(struct btf_show *show, 1289 const char *suffix) 1290 { 1291 show->state.depth--; 1292 btf_show(show, "%s%s%s%s", btf_show_indent(show), suffix, 1293 btf_show_delim(show), btf_show_newline(show)); 1294 btf_show_end_type(show); 1295 } 1296 1297 static void btf_show_start_member(struct btf_show *show, 1298 const struct btf_member *m) 1299 { 1300 show->state.member = m; 1301 } 1302 1303 static void btf_show_start_array_member(struct btf_show *show) 1304 { 1305 show->state.array_member = 1; 1306 btf_show_start_member(show, NULL); 1307 } 1308 1309 static void btf_show_end_member(struct btf_show *show) 1310 { 1311 show->state.member = NULL; 1312 } 1313 1314 static void btf_show_end_array_member(struct btf_show *show) 1315 { 1316 show->state.array_member = 0; 1317 btf_show_end_member(show); 1318 } 1319 1320 static void *btf_show_start_array_type(struct btf_show *show, 1321 const struct btf_type *t, 1322 u32 type_id, 1323 u16 array_encoding, 1324 void *data) 1325 { 1326 show->state.array_encoding = array_encoding; 1327 show->state.array_terminated = 0; 1328 return btf_show_start_aggr_type(show, t, type_id, data); 1329 } 1330 1331 static void btf_show_end_array_type(struct btf_show *show) 1332 { 1333 show->state.array_encoding = 0; 1334 show->state.array_terminated = 0; 1335 btf_show_end_aggr_type(show, "]"); 1336 } 1337 1338 static void *btf_show_start_struct_type(struct btf_show *show, 1339 const struct btf_type *t, 1340 u32 type_id, 1341 void *data) 1342 { 1343 return btf_show_start_aggr_type(show, t, type_id, data); 1344 } 1345 1346 static void btf_show_end_struct_type(struct btf_show *show) 1347 { 1348 btf_show_end_aggr_type(show, "}"); 1349 } 1350 1351 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log, 1352 const char *fmt, ...) 1353 { 1354 va_list args; 1355 1356 va_start(args, fmt); 1357 bpf_verifier_vlog(log, fmt, args); 1358 va_end(args); 1359 } 1360 1361 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env, 1362 const char *fmt, ...) 1363 { 1364 struct bpf_verifier_log *log = &env->log; 1365 va_list args; 1366 1367 if (!bpf_verifier_log_needed(log)) 1368 return; 1369 1370 va_start(args, fmt); 1371 bpf_verifier_vlog(log, fmt, args); 1372 va_end(args); 1373 } 1374 1375 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env, 1376 const struct btf_type *t, 1377 bool log_details, 1378 const char *fmt, ...) 1379 { 1380 struct bpf_verifier_log *log = &env->log; 1381 u8 kind = BTF_INFO_KIND(t->info); 1382 struct btf *btf = env->btf; 1383 va_list args; 1384 1385 if (!bpf_verifier_log_needed(log)) 1386 return; 1387 1388 /* btf verifier prints all types it is processing via 1389 * btf_verifier_log_type(..., fmt = NULL). 1390 * Skip those prints for in-kernel BTF verification. 1391 */ 1392 if (log->level == BPF_LOG_KERNEL && !fmt) 1393 return; 1394 1395 __btf_verifier_log(log, "[%u] %s %s%s", 1396 env->log_type_id, 1397 btf_kind_str[kind], 1398 __btf_name_by_offset(btf, t->name_off), 1399 log_details ? " " : ""); 1400 1401 if (log_details) 1402 btf_type_ops(t)->log_details(env, t); 1403 1404 if (fmt && *fmt) { 1405 __btf_verifier_log(log, " "); 1406 va_start(args, fmt); 1407 bpf_verifier_vlog(log, fmt, args); 1408 va_end(args); 1409 } 1410 1411 __btf_verifier_log(log, "\n"); 1412 } 1413 1414 #define btf_verifier_log_type(env, t, ...) \ 1415 __btf_verifier_log_type((env), (t), true, __VA_ARGS__) 1416 #define btf_verifier_log_basic(env, t, ...) \ 1417 __btf_verifier_log_type((env), (t), false, __VA_ARGS__) 1418 1419 __printf(4, 5) 1420 static void btf_verifier_log_member(struct btf_verifier_env *env, 1421 const struct btf_type *struct_type, 1422 const struct btf_member *member, 1423 const char *fmt, ...) 1424 { 1425 struct bpf_verifier_log *log = &env->log; 1426 struct btf *btf = env->btf; 1427 va_list args; 1428 1429 if (!bpf_verifier_log_needed(log)) 1430 return; 1431 1432 if (log->level == BPF_LOG_KERNEL && !fmt) 1433 return; 1434 /* The CHECK_META phase already did a btf dump. 1435 * 1436 * If member is logged again, it must hit an error in 1437 * parsing this member. It is useful to print out which 1438 * struct this member belongs to. 1439 */ 1440 if (env->phase != CHECK_META) 1441 btf_verifier_log_type(env, struct_type, NULL); 1442 1443 if (btf_type_kflag(struct_type)) 1444 __btf_verifier_log(log, 1445 "\t%s type_id=%u bitfield_size=%u bits_offset=%u", 1446 __btf_name_by_offset(btf, member->name_off), 1447 member->type, 1448 BTF_MEMBER_BITFIELD_SIZE(member->offset), 1449 BTF_MEMBER_BIT_OFFSET(member->offset)); 1450 else 1451 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u", 1452 __btf_name_by_offset(btf, member->name_off), 1453 member->type, member->offset); 1454 1455 if (fmt && *fmt) { 1456 __btf_verifier_log(log, " "); 1457 va_start(args, fmt); 1458 bpf_verifier_vlog(log, fmt, args); 1459 va_end(args); 1460 } 1461 1462 __btf_verifier_log(log, "\n"); 1463 } 1464 1465 __printf(4, 5) 1466 static void btf_verifier_log_vsi(struct btf_verifier_env *env, 1467 const struct btf_type *datasec_type, 1468 const struct btf_var_secinfo *vsi, 1469 const char *fmt, ...) 1470 { 1471 struct bpf_verifier_log *log = &env->log; 1472 va_list args; 1473 1474 if (!bpf_verifier_log_needed(log)) 1475 return; 1476 if (log->level == BPF_LOG_KERNEL && !fmt) 1477 return; 1478 if (env->phase != CHECK_META) 1479 btf_verifier_log_type(env, datasec_type, NULL); 1480 1481 __btf_verifier_log(log, "\t type_id=%u offset=%u size=%u", 1482 vsi->type, vsi->offset, vsi->size); 1483 if (fmt && *fmt) { 1484 __btf_verifier_log(log, " "); 1485 va_start(args, fmt); 1486 bpf_verifier_vlog(log, fmt, args); 1487 va_end(args); 1488 } 1489 1490 __btf_verifier_log(log, "\n"); 1491 } 1492 1493 static void btf_verifier_log_hdr(struct btf_verifier_env *env, 1494 u32 btf_data_size) 1495 { 1496 struct bpf_verifier_log *log = &env->log; 1497 const struct btf *btf = env->btf; 1498 const struct btf_header *hdr; 1499 1500 if (!bpf_verifier_log_needed(log)) 1501 return; 1502 1503 if (log->level == BPF_LOG_KERNEL) 1504 return; 1505 hdr = &btf->hdr; 1506 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic); 1507 __btf_verifier_log(log, "version: %u\n", hdr->version); 1508 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags); 1509 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len); 1510 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off); 1511 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len); 1512 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off); 1513 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len); 1514 __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size); 1515 } 1516 1517 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t) 1518 { 1519 struct btf *btf = env->btf; 1520 1521 if (btf->types_size == btf->nr_types) { 1522 /* Expand 'types' array */ 1523 1524 struct btf_type **new_types; 1525 u32 expand_by, new_size; 1526 1527 if (btf->start_id + btf->types_size == BTF_MAX_TYPE) { 1528 btf_verifier_log(env, "Exceeded max num of types"); 1529 return -E2BIG; 1530 } 1531 1532 expand_by = max_t(u32, btf->types_size >> 2, 16); 1533 new_size = min_t(u32, BTF_MAX_TYPE, 1534 btf->types_size + expand_by); 1535 1536 new_types = kvcalloc(new_size, sizeof(*new_types), 1537 GFP_KERNEL | __GFP_NOWARN); 1538 if (!new_types) 1539 return -ENOMEM; 1540 1541 if (btf->nr_types == 0) { 1542 if (!btf->base_btf) { 1543 /* lazily init VOID type */ 1544 new_types[0] = &btf_void; 1545 btf->nr_types++; 1546 } 1547 } else { 1548 memcpy(new_types, btf->types, 1549 sizeof(*btf->types) * btf->nr_types); 1550 } 1551 1552 kvfree(btf->types); 1553 btf->types = new_types; 1554 btf->types_size = new_size; 1555 } 1556 1557 btf->types[btf->nr_types++] = t; 1558 1559 return 0; 1560 } 1561 1562 static int btf_alloc_id(struct btf *btf) 1563 { 1564 int id; 1565 1566 idr_preload(GFP_KERNEL); 1567 spin_lock_bh(&btf_idr_lock); 1568 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC); 1569 if (id > 0) 1570 btf->id = id; 1571 spin_unlock_bh(&btf_idr_lock); 1572 idr_preload_end(); 1573 1574 if (WARN_ON_ONCE(!id)) 1575 return -ENOSPC; 1576 1577 return id > 0 ? 0 : id; 1578 } 1579 1580 static void btf_free_id(struct btf *btf) 1581 { 1582 unsigned long flags; 1583 1584 /* 1585 * In map-in-map, calling map_delete_elem() on outer 1586 * map will call bpf_map_put on the inner map. 1587 * It will then eventually call btf_free_id() 1588 * on the inner map. Some of the map_delete_elem() 1589 * implementation may have irq disabled, so 1590 * we need to use the _irqsave() version instead 1591 * of the _bh() version. 1592 */ 1593 spin_lock_irqsave(&btf_idr_lock, flags); 1594 idr_remove(&btf_idr, btf->id); 1595 spin_unlock_irqrestore(&btf_idr_lock, flags); 1596 } 1597 1598 static void btf_free_kfunc_set_tab(struct btf *btf) 1599 { 1600 struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab; 1601 int hook, type; 1602 1603 if (!tab) 1604 return; 1605 /* For module BTF, we directly assign the sets being registered, so 1606 * there is nothing to free except kfunc_set_tab. 1607 */ 1608 if (btf_is_module(btf)) 1609 goto free_tab; 1610 for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++) { 1611 for (type = 0; type < ARRAY_SIZE(tab->sets[0]); type++) 1612 kfree(tab->sets[hook][type]); 1613 } 1614 free_tab: 1615 kfree(tab); 1616 btf->kfunc_set_tab = NULL; 1617 } 1618 1619 static void btf_free(struct btf *btf) 1620 { 1621 btf_free_kfunc_set_tab(btf); 1622 kvfree(btf->types); 1623 kvfree(btf->resolved_sizes); 1624 kvfree(btf->resolved_ids); 1625 kvfree(btf->data); 1626 kfree(btf); 1627 } 1628 1629 static void btf_free_rcu(struct rcu_head *rcu) 1630 { 1631 struct btf *btf = container_of(rcu, struct btf, rcu); 1632 1633 btf_free(btf); 1634 } 1635 1636 void btf_get(struct btf *btf) 1637 { 1638 refcount_inc(&btf->refcnt); 1639 } 1640 1641 void btf_put(struct btf *btf) 1642 { 1643 if (btf && refcount_dec_and_test(&btf->refcnt)) { 1644 btf_free_id(btf); 1645 call_rcu(&btf->rcu, btf_free_rcu); 1646 } 1647 } 1648 1649 static int env_resolve_init(struct btf_verifier_env *env) 1650 { 1651 struct btf *btf = env->btf; 1652 u32 nr_types = btf->nr_types; 1653 u32 *resolved_sizes = NULL; 1654 u32 *resolved_ids = NULL; 1655 u8 *visit_states = NULL; 1656 1657 resolved_sizes = kvcalloc(nr_types, sizeof(*resolved_sizes), 1658 GFP_KERNEL | __GFP_NOWARN); 1659 if (!resolved_sizes) 1660 goto nomem; 1661 1662 resolved_ids = kvcalloc(nr_types, sizeof(*resolved_ids), 1663 GFP_KERNEL | __GFP_NOWARN); 1664 if (!resolved_ids) 1665 goto nomem; 1666 1667 visit_states = kvcalloc(nr_types, sizeof(*visit_states), 1668 GFP_KERNEL | __GFP_NOWARN); 1669 if (!visit_states) 1670 goto nomem; 1671 1672 btf->resolved_sizes = resolved_sizes; 1673 btf->resolved_ids = resolved_ids; 1674 env->visit_states = visit_states; 1675 1676 return 0; 1677 1678 nomem: 1679 kvfree(resolved_sizes); 1680 kvfree(resolved_ids); 1681 kvfree(visit_states); 1682 return -ENOMEM; 1683 } 1684 1685 static void btf_verifier_env_free(struct btf_verifier_env *env) 1686 { 1687 kvfree(env->visit_states); 1688 kfree(env); 1689 } 1690 1691 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env, 1692 const struct btf_type *next_type) 1693 { 1694 switch (env->resolve_mode) { 1695 case RESOLVE_TBD: 1696 /* int, enum or void is a sink */ 1697 return !btf_type_needs_resolve(next_type); 1698 case RESOLVE_PTR: 1699 /* int, enum, void, struct, array, func or func_proto is a sink 1700 * for ptr 1701 */ 1702 return !btf_type_is_modifier(next_type) && 1703 !btf_type_is_ptr(next_type); 1704 case RESOLVE_STRUCT_OR_ARRAY: 1705 /* int, enum, void, ptr, func or func_proto is a sink 1706 * for struct and array 1707 */ 1708 return !btf_type_is_modifier(next_type) && 1709 !btf_type_is_array(next_type) && 1710 !btf_type_is_struct(next_type); 1711 default: 1712 BUG(); 1713 } 1714 } 1715 1716 static bool env_type_is_resolved(const struct btf_verifier_env *env, 1717 u32 type_id) 1718 { 1719 /* base BTF types should be resolved by now */ 1720 if (type_id < env->btf->start_id) 1721 return true; 1722 1723 return env->visit_states[type_id - env->btf->start_id] == RESOLVED; 1724 } 1725 1726 static int env_stack_push(struct btf_verifier_env *env, 1727 const struct btf_type *t, u32 type_id) 1728 { 1729 const struct btf *btf = env->btf; 1730 struct resolve_vertex *v; 1731 1732 if (env->top_stack == MAX_RESOLVE_DEPTH) 1733 return -E2BIG; 1734 1735 if (type_id < btf->start_id 1736 || env->visit_states[type_id - btf->start_id] != NOT_VISITED) 1737 return -EEXIST; 1738 1739 env->visit_states[type_id - btf->start_id] = VISITED; 1740 1741 v = &env->stack[env->top_stack++]; 1742 v->t = t; 1743 v->type_id = type_id; 1744 v->next_member = 0; 1745 1746 if (env->resolve_mode == RESOLVE_TBD) { 1747 if (btf_type_is_ptr(t)) 1748 env->resolve_mode = RESOLVE_PTR; 1749 else if (btf_type_is_struct(t) || btf_type_is_array(t)) 1750 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY; 1751 } 1752 1753 return 0; 1754 } 1755 1756 static void env_stack_set_next_member(struct btf_verifier_env *env, 1757 u16 next_member) 1758 { 1759 env->stack[env->top_stack - 1].next_member = next_member; 1760 } 1761 1762 static void env_stack_pop_resolved(struct btf_verifier_env *env, 1763 u32 resolved_type_id, 1764 u32 resolved_size) 1765 { 1766 u32 type_id = env->stack[--(env->top_stack)].type_id; 1767 struct btf *btf = env->btf; 1768 1769 type_id -= btf->start_id; /* adjust to local type id */ 1770 btf->resolved_sizes[type_id] = resolved_size; 1771 btf->resolved_ids[type_id] = resolved_type_id; 1772 env->visit_states[type_id] = RESOLVED; 1773 } 1774 1775 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env) 1776 { 1777 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL; 1778 } 1779 1780 /* Resolve the size of a passed-in "type" 1781 * 1782 * type: is an array (e.g. u32 array[x][y]) 1783 * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY, 1784 * *type_size: (x * y * sizeof(u32)). Hence, *type_size always 1785 * corresponds to the return type. 1786 * *elem_type: u32 1787 * *elem_id: id of u32 1788 * *total_nelems: (x * y). Hence, individual elem size is 1789 * (*type_size / *total_nelems) 1790 * *type_id: id of type if it's changed within the function, 0 if not 1791 * 1792 * type: is not an array (e.g. const struct X) 1793 * return type: type "struct X" 1794 * *type_size: sizeof(struct X) 1795 * *elem_type: same as return type ("struct X") 1796 * *elem_id: 0 1797 * *total_nelems: 1 1798 * *type_id: id of type if it's changed within the function, 0 if not 1799 */ 1800 static const struct btf_type * 1801 __btf_resolve_size(const struct btf *btf, const struct btf_type *type, 1802 u32 *type_size, const struct btf_type **elem_type, 1803 u32 *elem_id, u32 *total_nelems, u32 *type_id) 1804 { 1805 const struct btf_type *array_type = NULL; 1806 const struct btf_array *array = NULL; 1807 u32 i, size, nelems = 1, id = 0; 1808 1809 for (i = 0; i < MAX_RESOLVE_DEPTH; i++) { 1810 switch (BTF_INFO_KIND(type->info)) { 1811 /* type->size can be used */ 1812 case BTF_KIND_INT: 1813 case BTF_KIND_STRUCT: 1814 case BTF_KIND_UNION: 1815 case BTF_KIND_ENUM: 1816 case BTF_KIND_FLOAT: 1817 size = type->size; 1818 goto resolved; 1819 1820 case BTF_KIND_PTR: 1821 size = sizeof(void *); 1822 goto resolved; 1823 1824 /* Modifiers */ 1825 case BTF_KIND_TYPEDEF: 1826 case BTF_KIND_VOLATILE: 1827 case BTF_KIND_CONST: 1828 case BTF_KIND_RESTRICT: 1829 case BTF_KIND_TYPE_TAG: 1830 id = type->type; 1831 type = btf_type_by_id(btf, type->type); 1832 break; 1833 1834 case BTF_KIND_ARRAY: 1835 if (!array_type) 1836 array_type = type; 1837 array = btf_type_array(type); 1838 if (nelems && array->nelems > U32_MAX / nelems) 1839 return ERR_PTR(-EINVAL); 1840 nelems *= array->nelems; 1841 type = btf_type_by_id(btf, array->type); 1842 break; 1843 1844 /* type without size */ 1845 default: 1846 return ERR_PTR(-EINVAL); 1847 } 1848 } 1849 1850 return ERR_PTR(-EINVAL); 1851 1852 resolved: 1853 if (nelems && size > U32_MAX / nelems) 1854 return ERR_PTR(-EINVAL); 1855 1856 *type_size = nelems * size; 1857 if (total_nelems) 1858 *total_nelems = nelems; 1859 if (elem_type) 1860 *elem_type = type; 1861 if (elem_id) 1862 *elem_id = array ? array->type : 0; 1863 if (type_id && id) 1864 *type_id = id; 1865 1866 return array_type ? : type; 1867 } 1868 1869 const struct btf_type * 1870 btf_resolve_size(const struct btf *btf, const struct btf_type *type, 1871 u32 *type_size) 1872 { 1873 return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL); 1874 } 1875 1876 static u32 btf_resolved_type_id(const struct btf *btf, u32 type_id) 1877 { 1878 while (type_id < btf->start_id) 1879 btf = btf->base_btf; 1880 1881 return btf->resolved_ids[type_id - btf->start_id]; 1882 } 1883 1884 /* The input param "type_id" must point to a needs_resolve type */ 1885 static const struct btf_type *btf_type_id_resolve(const struct btf *btf, 1886 u32 *type_id) 1887 { 1888 *type_id = btf_resolved_type_id(btf, *type_id); 1889 return btf_type_by_id(btf, *type_id); 1890 } 1891 1892 static u32 btf_resolved_type_size(const struct btf *btf, u32 type_id) 1893 { 1894 while (type_id < btf->start_id) 1895 btf = btf->base_btf; 1896 1897 return btf->resolved_sizes[type_id - btf->start_id]; 1898 } 1899 1900 const struct btf_type *btf_type_id_size(const struct btf *btf, 1901 u32 *type_id, u32 *ret_size) 1902 { 1903 const struct btf_type *size_type; 1904 u32 size_type_id = *type_id; 1905 u32 size = 0; 1906 1907 size_type = btf_type_by_id(btf, size_type_id); 1908 if (btf_type_nosize_or_null(size_type)) 1909 return NULL; 1910 1911 if (btf_type_has_size(size_type)) { 1912 size = size_type->size; 1913 } else if (btf_type_is_array(size_type)) { 1914 size = btf_resolved_type_size(btf, size_type_id); 1915 } else if (btf_type_is_ptr(size_type)) { 1916 size = sizeof(void *); 1917 } else { 1918 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) && 1919 !btf_type_is_var(size_type))) 1920 return NULL; 1921 1922 size_type_id = btf_resolved_type_id(btf, size_type_id); 1923 size_type = btf_type_by_id(btf, size_type_id); 1924 if (btf_type_nosize_or_null(size_type)) 1925 return NULL; 1926 else if (btf_type_has_size(size_type)) 1927 size = size_type->size; 1928 else if (btf_type_is_array(size_type)) 1929 size = btf_resolved_type_size(btf, size_type_id); 1930 else if (btf_type_is_ptr(size_type)) 1931 size = sizeof(void *); 1932 else 1933 return NULL; 1934 } 1935 1936 *type_id = size_type_id; 1937 if (ret_size) 1938 *ret_size = size; 1939 1940 return size_type; 1941 } 1942 1943 static int btf_df_check_member(struct btf_verifier_env *env, 1944 const struct btf_type *struct_type, 1945 const struct btf_member *member, 1946 const struct btf_type *member_type) 1947 { 1948 btf_verifier_log_basic(env, struct_type, 1949 "Unsupported check_member"); 1950 return -EINVAL; 1951 } 1952 1953 static int btf_df_check_kflag_member(struct btf_verifier_env *env, 1954 const struct btf_type *struct_type, 1955 const struct btf_member *member, 1956 const struct btf_type *member_type) 1957 { 1958 btf_verifier_log_basic(env, struct_type, 1959 "Unsupported check_kflag_member"); 1960 return -EINVAL; 1961 } 1962 1963 /* Used for ptr, array struct/union and float type members. 1964 * int, enum and modifier types have their specific callback functions. 1965 */ 1966 static int btf_generic_check_kflag_member(struct btf_verifier_env *env, 1967 const struct btf_type *struct_type, 1968 const struct btf_member *member, 1969 const struct btf_type *member_type) 1970 { 1971 if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) { 1972 btf_verifier_log_member(env, struct_type, member, 1973 "Invalid member bitfield_size"); 1974 return -EINVAL; 1975 } 1976 1977 /* bitfield size is 0, so member->offset represents bit offset only. 1978 * It is safe to call non kflag check_member variants. 1979 */ 1980 return btf_type_ops(member_type)->check_member(env, struct_type, 1981 member, 1982 member_type); 1983 } 1984 1985 static int btf_df_resolve(struct btf_verifier_env *env, 1986 const struct resolve_vertex *v) 1987 { 1988 btf_verifier_log_basic(env, v->t, "Unsupported resolve"); 1989 return -EINVAL; 1990 } 1991 1992 static void btf_df_show(const struct btf *btf, const struct btf_type *t, 1993 u32 type_id, void *data, u8 bits_offsets, 1994 struct btf_show *show) 1995 { 1996 btf_show(show, "<unsupported kind:%u>", BTF_INFO_KIND(t->info)); 1997 } 1998 1999 static int btf_int_check_member(struct btf_verifier_env *env, 2000 const struct btf_type *struct_type, 2001 const struct btf_member *member, 2002 const struct btf_type *member_type) 2003 { 2004 u32 int_data = btf_type_int(member_type); 2005 u32 struct_bits_off = member->offset; 2006 u32 struct_size = struct_type->size; 2007 u32 nr_copy_bits; 2008 u32 bytes_offset; 2009 2010 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) { 2011 btf_verifier_log_member(env, struct_type, member, 2012 "bits_offset exceeds U32_MAX"); 2013 return -EINVAL; 2014 } 2015 2016 struct_bits_off += BTF_INT_OFFSET(int_data); 2017 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2018 nr_copy_bits = BTF_INT_BITS(int_data) + 2019 BITS_PER_BYTE_MASKED(struct_bits_off); 2020 2021 if (nr_copy_bits > BITS_PER_U128) { 2022 btf_verifier_log_member(env, struct_type, member, 2023 "nr_copy_bits exceeds 128"); 2024 return -EINVAL; 2025 } 2026 2027 if (struct_size < bytes_offset || 2028 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { 2029 btf_verifier_log_member(env, struct_type, member, 2030 "Member exceeds struct_size"); 2031 return -EINVAL; 2032 } 2033 2034 return 0; 2035 } 2036 2037 static int btf_int_check_kflag_member(struct btf_verifier_env *env, 2038 const struct btf_type *struct_type, 2039 const struct btf_member *member, 2040 const struct btf_type *member_type) 2041 { 2042 u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset; 2043 u32 int_data = btf_type_int(member_type); 2044 u32 struct_size = struct_type->size; 2045 u32 nr_copy_bits; 2046 2047 /* a regular int type is required for the kflag int member */ 2048 if (!btf_type_int_is_regular(member_type)) { 2049 btf_verifier_log_member(env, struct_type, member, 2050 "Invalid member base type"); 2051 return -EINVAL; 2052 } 2053 2054 /* check sanity of bitfield size */ 2055 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); 2056 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); 2057 nr_int_data_bits = BTF_INT_BITS(int_data); 2058 if (!nr_bits) { 2059 /* Not a bitfield member, member offset must be at byte 2060 * boundary. 2061 */ 2062 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 2063 btf_verifier_log_member(env, struct_type, member, 2064 "Invalid member offset"); 2065 return -EINVAL; 2066 } 2067 2068 nr_bits = nr_int_data_bits; 2069 } else if (nr_bits > nr_int_data_bits) { 2070 btf_verifier_log_member(env, struct_type, member, 2071 "Invalid member bitfield_size"); 2072 return -EINVAL; 2073 } 2074 2075 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2076 nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off); 2077 if (nr_copy_bits > BITS_PER_U128) { 2078 btf_verifier_log_member(env, struct_type, member, 2079 "nr_copy_bits exceeds 128"); 2080 return -EINVAL; 2081 } 2082 2083 if (struct_size < bytes_offset || 2084 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { 2085 btf_verifier_log_member(env, struct_type, member, 2086 "Member exceeds struct_size"); 2087 return -EINVAL; 2088 } 2089 2090 return 0; 2091 } 2092 2093 static s32 btf_int_check_meta(struct btf_verifier_env *env, 2094 const struct btf_type *t, 2095 u32 meta_left) 2096 { 2097 u32 int_data, nr_bits, meta_needed = sizeof(int_data); 2098 u16 encoding; 2099 2100 if (meta_left < meta_needed) { 2101 btf_verifier_log_basic(env, t, 2102 "meta_left:%u meta_needed:%u", 2103 meta_left, meta_needed); 2104 return -EINVAL; 2105 } 2106 2107 if (btf_type_vlen(t)) { 2108 btf_verifier_log_type(env, t, "vlen != 0"); 2109 return -EINVAL; 2110 } 2111 2112 if (btf_type_kflag(t)) { 2113 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 2114 return -EINVAL; 2115 } 2116 2117 int_data = btf_type_int(t); 2118 if (int_data & ~BTF_INT_MASK) { 2119 btf_verifier_log_basic(env, t, "Invalid int_data:%x", 2120 int_data); 2121 return -EINVAL; 2122 } 2123 2124 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data); 2125 2126 if (nr_bits > BITS_PER_U128) { 2127 btf_verifier_log_type(env, t, "nr_bits exceeds %zu", 2128 BITS_PER_U128); 2129 return -EINVAL; 2130 } 2131 2132 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) { 2133 btf_verifier_log_type(env, t, "nr_bits exceeds type_size"); 2134 return -EINVAL; 2135 } 2136 2137 /* 2138 * Only one of the encoding bits is allowed and it 2139 * should be sufficient for the pretty print purpose (i.e. decoding). 2140 * Multiple bits can be allowed later if it is found 2141 * to be insufficient. 2142 */ 2143 encoding = BTF_INT_ENCODING(int_data); 2144 if (encoding && 2145 encoding != BTF_INT_SIGNED && 2146 encoding != BTF_INT_CHAR && 2147 encoding != BTF_INT_BOOL) { 2148 btf_verifier_log_type(env, t, "Unsupported encoding"); 2149 return -ENOTSUPP; 2150 } 2151 2152 btf_verifier_log_type(env, t, NULL); 2153 2154 return meta_needed; 2155 } 2156 2157 static void btf_int_log(struct btf_verifier_env *env, 2158 const struct btf_type *t) 2159 { 2160 int int_data = btf_type_int(t); 2161 2162 btf_verifier_log(env, 2163 "size=%u bits_offset=%u nr_bits=%u encoding=%s", 2164 t->size, BTF_INT_OFFSET(int_data), 2165 BTF_INT_BITS(int_data), 2166 btf_int_encoding_str(BTF_INT_ENCODING(int_data))); 2167 } 2168 2169 static void btf_int128_print(struct btf_show *show, void *data) 2170 { 2171 /* data points to a __int128 number. 2172 * Suppose 2173 * int128_num = *(__int128 *)data; 2174 * The below formulas shows what upper_num and lower_num represents: 2175 * upper_num = int128_num >> 64; 2176 * lower_num = int128_num & 0xffffffffFFFFFFFFULL; 2177 */ 2178 u64 upper_num, lower_num; 2179 2180 #ifdef __BIG_ENDIAN_BITFIELD 2181 upper_num = *(u64 *)data; 2182 lower_num = *(u64 *)(data + 8); 2183 #else 2184 upper_num = *(u64 *)(data + 8); 2185 lower_num = *(u64 *)data; 2186 #endif 2187 if (upper_num == 0) 2188 btf_show_type_value(show, "0x%llx", lower_num); 2189 else 2190 btf_show_type_values(show, "0x%llx%016llx", upper_num, 2191 lower_num); 2192 } 2193 2194 static void btf_int128_shift(u64 *print_num, u16 left_shift_bits, 2195 u16 right_shift_bits) 2196 { 2197 u64 upper_num, lower_num; 2198 2199 #ifdef __BIG_ENDIAN_BITFIELD 2200 upper_num = print_num[0]; 2201 lower_num = print_num[1]; 2202 #else 2203 upper_num = print_num[1]; 2204 lower_num = print_num[0]; 2205 #endif 2206 2207 /* shake out un-needed bits by shift/or operations */ 2208 if (left_shift_bits >= 64) { 2209 upper_num = lower_num << (left_shift_bits - 64); 2210 lower_num = 0; 2211 } else { 2212 upper_num = (upper_num << left_shift_bits) | 2213 (lower_num >> (64 - left_shift_bits)); 2214 lower_num = lower_num << left_shift_bits; 2215 } 2216 2217 if (right_shift_bits >= 64) { 2218 lower_num = upper_num >> (right_shift_bits - 64); 2219 upper_num = 0; 2220 } else { 2221 lower_num = (lower_num >> right_shift_bits) | 2222 (upper_num << (64 - right_shift_bits)); 2223 upper_num = upper_num >> right_shift_bits; 2224 } 2225 2226 #ifdef __BIG_ENDIAN_BITFIELD 2227 print_num[0] = upper_num; 2228 print_num[1] = lower_num; 2229 #else 2230 print_num[0] = lower_num; 2231 print_num[1] = upper_num; 2232 #endif 2233 } 2234 2235 static void btf_bitfield_show(void *data, u8 bits_offset, 2236 u8 nr_bits, struct btf_show *show) 2237 { 2238 u16 left_shift_bits, right_shift_bits; 2239 u8 nr_copy_bytes; 2240 u8 nr_copy_bits; 2241 u64 print_num[2] = {}; 2242 2243 nr_copy_bits = nr_bits + bits_offset; 2244 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); 2245 2246 memcpy(print_num, data, nr_copy_bytes); 2247 2248 #ifdef __BIG_ENDIAN_BITFIELD 2249 left_shift_bits = bits_offset; 2250 #else 2251 left_shift_bits = BITS_PER_U128 - nr_copy_bits; 2252 #endif 2253 right_shift_bits = BITS_PER_U128 - nr_bits; 2254 2255 btf_int128_shift(print_num, left_shift_bits, right_shift_bits); 2256 btf_int128_print(show, print_num); 2257 } 2258 2259 2260 static void btf_int_bits_show(const struct btf *btf, 2261 const struct btf_type *t, 2262 void *data, u8 bits_offset, 2263 struct btf_show *show) 2264 { 2265 u32 int_data = btf_type_int(t); 2266 u8 nr_bits = BTF_INT_BITS(int_data); 2267 u8 total_bits_offset; 2268 2269 /* 2270 * bits_offset is at most 7. 2271 * BTF_INT_OFFSET() cannot exceed 128 bits. 2272 */ 2273 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); 2274 data += BITS_ROUNDDOWN_BYTES(total_bits_offset); 2275 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset); 2276 btf_bitfield_show(data, bits_offset, nr_bits, show); 2277 } 2278 2279 static void btf_int_show(const struct btf *btf, const struct btf_type *t, 2280 u32 type_id, void *data, u8 bits_offset, 2281 struct btf_show *show) 2282 { 2283 u32 int_data = btf_type_int(t); 2284 u8 encoding = BTF_INT_ENCODING(int_data); 2285 bool sign = encoding & BTF_INT_SIGNED; 2286 u8 nr_bits = BTF_INT_BITS(int_data); 2287 void *safe_data; 2288 2289 safe_data = btf_show_start_type(show, t, type_id, data); 2290 if (!safe_data) 2291 return; 2292 2293 if (bits_offset || BTF_INT_OFFSET(int_data) || 2294 BITS_PER_BYTE_MASKED(nr_bits)) { 2295 btf_int_bits_show(btf, t, safe_data, bits_offset, show); 2296 goto out; 2297 } 2298 2299 switch (nr_bits) { 2300 case 128: 2301 btf_int128_print(show, safe_data); 2302 break; 2303 case 64: 2304 if (sign) 2305 btf_show_type_value(show, "%lld", *(s64 *)safe_data); 2306 else 2307 btf_show_type_value(show, "%llu", *(u64 *)safe_data); 2308 break; 2309 case 32: 2310 if (sign) 2311 btf_show_type_value(show, "%d", *(s32 *)safe_data); 2312 else 2313 btf_show_type_value(show, "%u", *(u32 *)safe_data); 2314 break; 2315 case 16: 2316 if (sign) 2317 btf_show_type_value(show, "%d", *(s16 *)safe_data); 2318 else 2319 btf_show_type_value(show, "%u", *(u16 *)safe_data); 2320 break; 2321 case 8: 2322 if (show->state.array_encoding == BTF_INT_CHAR) { 2323 /* check for null terminator */ 2324 if (show->state.array_terminated) 2325 break; 2326 if (*(char *)data == '\0') { 2327 show->state.array_terminated = 1; 2328 break; 2329 } 2330 if (isprint(*(char *)data)) { 2331 btf_show_type_value(show, "'%c'", 2332 *(char *)safe_data); 2333 break; 2334 } 2335 } 2336 if (sign) 2337 btf_show_type_value(show, "%d", *(s8 *)safe_data); 2338 else 2339 btf_show_type_value(show, "%u", *(u8 *)safe_data); 2340 break; 2341 default: 2342 btf_int_bits_show(btf, t, safe_data, bits_offset, show); 2343 break; 2344 } 2345 out: 2346 btf_show_end_type(show); 2347 } 2348 2349 static const struct btf_kind_operations int_ops = { 2350 .check_meta = btf_int_check_meta, 2351 .resolve = btf_df_resolve, 2352 .check_member = btf_int_check_member, 2353 .check_kflag_member = btf_int_check_kflag_member, 2354 .log_details = btf_int_log, 2355 .show = btf_int_show, 2356 }; 2357 2358 static int btf_modifier_check_member(struct btf_verifier_env *env, 2359 const struct btf_type *struct_type, 2360 const struct btf_member *member, 2361 const struct btf_type *member_type) 2362 { 2363 const struct btf_type *resolved_type; 2364 u32 resolved_type_id = member->type; 2365 struct btf_member resolved_member; 2366 struct btf *btf = env->btf; 2367 2368 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL); 2369 if (!resolved_type) { 2370 btf_verifier_log_member(env, struct_type, member, 2371 "Invalid member"); 2372 return -EINVAL; 2373 } 2374 2375 resolved_member = *member; 2376 resolved_member.type = resolved_type_id; 2377 2378 return btf_type_ops(resolved_type)->check_member(env, struct_type, 2379 &resolved_member, 2380 resolved_type); 2381 } 2382 2383 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env, 2384 const struct btf_type *struct_type, 2385 const struct btf_member *member, 2386 const struct btf_type *member_type) 2387 { 2388 const struct btf_type *resolved_type; 2389 u32 resolved_type_id = member->type; 2390 struct btf_member resolved_member; 2391 struct btf *btf = env->btf; 2392 2393 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL); 2394 if (!resolved_type) { 2395 btf_verifier_log_member(env, struct_type, member, 2396 "Invalid member"); 2397 return -EINVAL; 2398 } 2399 2400 resolved_member = *member; 2401 resolved_member.type = resolved_type_id; 2402 2403 return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type, 2404 &resolved_member, 2405 resolved_type); 2406 } 2407 2408 static int btf_ptr_check_member(struct btf_verifier_env *env, 2409 const struct btf_type *struct_type, 2410 const struct btf_member *member, 2411 const struct btf_type *member_type) 2412 { 2413 u32 struct_size, struct_bits_off, bytes_offset; 2414 2415 struct_size = struct_type->size; 2416 struct_bits_off = member->offset; 2417 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2418 2419 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 2420 btf_verifier_log_member(env, struct_type, member, 2421 "Member is not byte aligned"); 2422 return -EINVAL; 2423 } 2424 2425 if (struct_size - bytes_offset < sizeof(void *)) { 2426 btf_verifier_log_member(env, struct_type, member, 2427 "Member exceeds struct_size"); 2428 return -EINVAL; 2429 } 2430 2431 return 0; 2432 } 2433 2434 static int btf_ref_type_check_meta(struct btf_verifier_env *env, 2435 const struct btf_type *t, 2436 u32 meta_left) 2437 { 2438 const char *value; 2439 2440 if (btf_type_vlen(t)) { 2441 btf_verifier_log_type(env, t, "vlen != 0"); 2442 return -EINVAL; 2443 } 2444 2445 if (btf_type_kflag(t)) { 2446 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 2447 return -EINVAL; 2448 } 2449 2450 if (!BTF_TYPE_ID_VALID(t->type)) { 2451 btf_verifier_log_type(env, t, "Invalid type_id"); 2452 return -EINVAL; 2453 } 2454 2455 /* typedef/type_tag type must have a valid name, and other ref types, 2456 * volatile, const, restrict, should have a null name. 2457 */ 2458 if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) { 2459 if (!t->name_off || 2460 !btf_name_valid_identifier(env->btf, t->name_off)) { 2461 btf_verifier_log_type(env, t, "Invalid name"); 2462 return -EINVAL; 2463 } 2464 } else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) { 2465 value = btf_name_by_offset(env->btf, t->name_off); 2466 if (!value || !value[0]) { 2467 btf_verifier_log_type(env, t, "Invalid name"); 2468 return -EINVAL; 2469 } 2470 } else { 2471 if (t->name_off) { 2472 btf_verifier_log_type(env, t, "Invalid name"); 2473 return -EINVAL; 2474 } 2475 } 2476 2477 btf_verifier_log_type(env, t, NULL); 2478 2479 return 0; 2480 } 2481 2482 static int btf_modifier_resolve(struct btf_verifier_env *env, 2483 const struct resolve_vertex *v) 2484 { 2485 const struct btf_type *t = v->t; 2486 const struct btf_type *next_type; 2487 u32 next_type_id = t->type; 2488 struct btf *btf = env->btf; 2489 2490 next_type = btf_type_by_id(btf, next_type_id); 2491 if (!next_type || btf_type_is_resolve_source_only(next_type)) { 2492 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2493 return -EINVAL; 2494 } 2495 2496 if (!env_type_is_resolve_sink(env, next_type) && 2497 !env_type_is_resolved(env, next_type_id)) 2498 return env_stack_push(env, next_type, next_type_id); 2499 2500 /* Figure out the resolved next_type_id with size. 2501 * They will be stored in the current modifier's 2502 * resolved_ids and resolved_sizes such that it can 2503 * save us a few type-following when we use it later (e.g. in 2504 * pretty print). 2505 */ 2506 if (!btf_type_id_size(btf, &next_type_id, NULL)) { 2507 if (env_type_is_resolved(env, next_type_id)) 2508 next_type = btf_type_id_resolve(btf, &next_type_id); 2509 2510 /* "typedef void new_void", "const void"...etc */ 2511 if (!btf_type_is_void(next_type) && 2512 !btf_type_is_fwd(next_type) && 2513 !btf_type_is_func_proto(next_type)) { 2514 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2515 return -EINVAL; 2516 } 2517 } 2518 2519 env_stack_pop_resolved(env, next_type_id, 0); 2520 2521 return 0; 2522 } 2523 2524 static int btf_var_resolve(struct btf_verifier_env *env, 2525 const struct resolve_vertex *v) 2526 { 2527 const struct btf_type *next_type; 2528 const struct btf_type *t = v->t; 2529 u32 next_type_id = t->type; 2530 struct btf *btf = env->btf; 2531 2532 next_type = btf_type_by_id(btf, next_type_id); 2533 if (!next_type || btf_type_is_resolve_source_only(next_type)) { 2534 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2535 return -EINVAL; 2536 } 2537 2538 if (!env_type_is_resolve_sink(env, next_type) && 2539 !env_type_is_resolved(env, next_type_id)) 2540 return env_stack_push(env, next_type, next_type_id); 2541 2542 if (btf_type_is_modifier(next_type)) { 2543 const struct btf_type *resolved_type; 2544 u32 resolved_type_id; 2545 2546 resolved_type_id = next_type_id; 2547 resolved_type = btf_type_id_resolve(btf, &resolved_type_id); 2548 2549 if (btf_type_is_ptr(resolved_type) && 2550 !env_type_is_resolve_sink(env, resolved_type) && 2551 !env_type_is_resolved(env, resolved_type_id)) 2552 return env_stack_push(env, resolved_type, 2553 resolved_type_id); 2554 } 2555 2556 /* We must resolve to something concrete at this point, no 2557 * forward types or similar that would resolve to size of 2558 * zero is allowed. 2559 */ 2560 if (!btf_type_id_size(btf, &next_type_id, NULL)) { 2561 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2562 return -EINVAL; 2563 } 2564 2565 env_stack_pop_resolved(env, next_type_id, 0); 2566 2567 return 0; 2568 } 2569 2570 static int btf_ptr_resolve(struct btf_verifier_env *env, 2571 const struct resolve_vertex *v) 2572 { 2573 const struct btf_type *next_type; 2574 const struct btf_type *t = v->t; 2575 u32 next_type_id = t->type; 2576 struct btf *btf = env->btf; 2577 2578 next_type = btf_type_by_id(btf, next_type_id); 2579 if (!next_type || btf_type_is_resolve_source_only(next_type)) { 2580 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2581 return -EINVAL; 2582 } 2583 2584 if (!env_type_is_resolve_sink(env, next_type) && 2585 !env_type_is_resolved(env, next_type_id)) 2586 return env_stack_push(env, next_type, next_type_id); 2587 2588 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY, 2589 * the modifier may have stopped resolving when it was resolved 2590 * to a ptr (last-resolved-ptr). 2591 * 2592 * We now need to continue from the last-resolved-ptr to 2593 * ensure the last-resolved-ptr will not referring back to 2594 * the current ptr (t). 2595 */ 2596 if (btf_type_is_modifier(next_type)) { 2597 const struct btf_type *resolved_type; 2598 u32 resolved_type_id; 2599 2600 resolved_type_id = next_type_id; 2601 resolved_type = btf_type_id_resolve(btf, &resolved_type_id); 2602 2603 if (btf_type_is_ptr(resolved_type) && 2604 !env_type_is_resolve_sink(env, resolved_type) && 2605 !env_type_is_resolved(env, resolved_type_id)) 2606 return env_stack_push(env, resolved_type, 2607 resolved_type_id); 2608 } 2609 2610 if (!btf_type_id_size(btf, &next_type_id, NULL)) { 2611 if (env_type_is_resolved(env, next_type_id)) 2612 next_type = btf_type_id_resolve(btf, &next_type_id); 2613 2614 if (!btf_type_is_void(next_type) && 2615 !btf_type_is_fwd(next_type) && 2616 !btf_type_is_func_proto(next_type)) { 2617 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2618 return -EINVAL; 2619 } 2620 } 2621 2622 env_stack_pop_resolved(env, next_type_id, 0); 2623 2624 return 0; 2625 } 2626 2627 static void btf_modifier_show(const struct btf *btf, 2628 const struct btf_type *t, 2629 u32 type_id, void *data, 2630 u8 bits_offset, struct btf_show *show) 2631 { 2632 if (btf->resolved_ids) 2633 t = btf_type_id_resolve(btf, &type_id); 2634 else 2635 t = btf_type_skip_modifiers(btf, type_id, NULL); 2636 2637 btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show); 2638 } 2639 2640 static void btf_var_show(const struct btf *btf, const struct btf_type *t, 2641 u32 type_id, void *data, u8 bits_offset, 2642 struct btf_show *show) 2643 { 2644 t = btf_type_id_resolve(btf, &type_id); 2645 2646 btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show); 2647 } 2648 2649 static void btf_ptr_show(const struct btf *btf, const struct btf_type *t, 2650 u32 type_id, void *data, u8 bits_offset, 2651 struct btf_show *show) 2652 { 2653 void *safe_data; 2654 2655 safe_data = btf_show_start_type(show, t, type_id, data); 2656 if (!safe_data) 2657 return; 2658 2659 /* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */ 2660 if (show->flags & BTF_SHOW_PTR_RAW) 2661 btf_show_type_value(show, "0x%px", *(void **)safe_data); 2662 else 2663 btf_show_type_value(show, "0x%p", *(void **)safe_data); 2664 btf_show_end_type(show); 2665 } 2666 2667 static void btf_ref_type_log(struct btf_verifier_env *env, 2668 const struct btf_type *t) 2669 { 2670 btf_verifier_log(env, "type_id=%u", t->type); 2671 } 2672 2673 static struct btf_kind_operations modifier_ops = { 2674 .check_meta = btf_ref_type_check_meta, 2675 .resolve = btf_modifier_resolve, 2676 .check_member = btf_modifier_check_member, 2677 .check_kflag_member = btf_modifier_check_kflag_member, 2678 .log_details = btf_ref_type_log, 2679 .show = btf_modifier_show, 2680 }; 2681 2682 static struct btf_kind_operations ptr_ops = { 2683 .check_meta = btf_ref_type_check_meta, 2684 .resolve = btf_ptr_resolve, 2685 .check_member = btf_ptr_check_member, 2686 .check_kflag_member = btf_generic_check_kflag_member, 2687 .log_details = btf_ref_type_log, 2688 .show = btf_ptr_show, 2689 }; 2690 2691 static s32 btf_fwd_check_meta(struct btf_verifier_env *env, 2692 const struct btf_type *t, 2693 u32 meta_left) 2694 { 2695 if (btf_type_vlen(t)) { 2696 btf_verifier_log_type(env, t, "vlen != 0"); 2697 return -EINVAL; 2698 } 2699 2700 if (t->type) { 2701 btf_verifier_log_type(env, t, "type != 0"); 2702 return -EINVAL; 2703 } 2704 2705 /* fwd type must have a valid name */ 2706 if (!t->name_off || 2707 !btf_name_valid_identifier(env->btf, t->name_off)) { 2708 btf_verifier_log_type(env, t, "Invalid name"); 2709 return -EINVAL; 2710 } 2711 2712 btf_verifier_log_type(env, t, NULL); 2713 2714 return 0; 2715 } 2716 2717 static void btf_fwd_type_log(struct btf_verifier_env *env, 2718 const struct btf_type *t) 2719 { 2720 btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct"); 2721 } 2722 2723 static struct btf_kind_operations fwd_ops = { 2724 .check_meta = btf_fwd_check_meta, 2725 .resolve = btf_df_resolve, 2726 .check_member = btf_df_check_member, 2727 .check_kflag_member = btf_df_check_kflag_member, 2728 .log_details = btf_fwd_type_log, 2729 .show = btf_df_show, 2730 }; 2731 2732 static int btf_array_check_member(struct btf_verifier_env *env, 2733 const struct btf_type *struct_type, 2734 const struct btf_member *member, 2735 const struct btf_type *member_type) 2736 { 2737 u32 struct_bits_off = member->offset; 2738 u32 struct_size, bytes_offset; 2739 u32 array_type_id, array_size; 2740 struct btf *btf = env->btf; 2741 2742 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 2743 btf_verifier_log_member(env, struct_type, member, 2744 "Member is not byte aligned"); 2745 return -EINVAL; 2746 } 2747 2748 array_type_id = member->type; 2749 btf_type_id_size(btf, &array_type_id, &array_size); 2750 struct_size = struct_type->size; 2751 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2752 if (struct_size - bytes_offset < array_size) { 2753 btf_verifier_log_member(env, struct_type, member, 2754 "Member exceeds struct_size"); 2755 return -EINVAL; 2756 } 2757 2758 return 0; 2759 } 2760 2761 static s32 btf_array_check_meta(struct btf_verifier_env *env, 2762 const struct btf_type *t, 2763 u32 meta_left) 2764 { 2765 const struct btf_array *array = btf_type_array(t); 2766 u32 meta_needed = sizeof(*array); 2767 2768 if (meta_left < meta_needed) { 2769 btf_verifier_log_basic(env, t, 2770 "meta_left:%u meta_needed:%u", 2771 meta_left, meta_needed); 2772 return -EINVAL; 2773 } 2774 2775 /* array type should not have a name */ 2776 if (t->name_off) { 2777 btf_verifier_log_type(env, t, "Invalid name"); 2778 return -EINVAL; 2779 } 2780 2781 if (btf_type_vlen(t)) { 2782 btf_verifier_log_type(env, t, "vlen != 0"); 2783 return -EINVAL; 2784 } 2785 2786 if (btf_type_kflag(t)) { 2787 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 2788 return -EINVAL; 2789 } 2790 2791 if (t->size) { 2792 btf_verifier_log_type(env, t, "size != 0"); 2793 return -EINVAL; 2794 } 2795 2796 /* Array elem type and index type cannot be in type void, 2797 * so !array->type and !array->index_type are not allowed. 2798 */ 2799 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) { 2800 btf_verifier_log_type(env, t, "Invalid elem"); 2801 return -EINVAL; 2802 } 2803 2804 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) { 2805 btf_verifier_log_type(env, t, "Invalid index"); 2806 return -EINVAL; 2807 } 2808 2809 btf_verifier_log_type(env, t, NULL); 2810 2811 return meta_needed; 2812 } 2813 2814 static int btf_array_resolve(struct btf_verifier_env *env, 2815 const struct resolve_vertex *v) 2816 { 2817 const struct btf_array *array = btf_type_array(v->t); 2818 const struct btf_type *elem_type, *index_type; 2819 u32 elem_type_id, index_type_id; 2820 struct btf *btf = env->btf; 2821 u32 elem_size; 2822 2823 /* Check array->index_type */ 2824 index_type_id = array->index_type; 2825 index_type = btf_type_by_id(btf, index_type_id); 2826 if (btf_type_nosize_or_null(index_type) || 2827 btf_type_is_resolve_source_only(index_type)) { 2828 btf_verifier_log_type(env, v->t, "Invalid index"); 2829 return -EINVAL; 2830 } 2831 2832 if (!env_type_is_resolve_sink(env, index_type) && 2833 !env_type_is_resolved(env, index_type_id)) 2834 return env_stack_push(env, index_type, index_type_id); 2835 2836 index_type = btf_type_id_size(btf, &index_type_id, NULL); 2837 if (!index_type || !btf_type_is_int(index_type) || 2838 !btf_type_int_is_regular(index_type)) { 2839 btf_verifier_log_type(env, v->t, "Invalid index"); 2840 return -EINVAL; 2841 } 2842 2843 /* Check array->type */ 2844 elem_type_id = array->type; 2845 elem_type = btf_type_by_id(btf, elem_type_id); 2846 if (btf_type_nosize_or_null(elem_type) || 2847 btf_type_is_resolve_source_only(elem_type)) { 2848 btf_verifier_log_type(env, v->t, 2849 "Invalid elem"); 2850 return -EINVAL; 2851 } 2852 2853 if (!env_type_is_resolve_sink(env, elem_type) && 2854 !env_type_is_resolved(env, elem_type_id)) 2855 return env_stack_push(env, elem_type, elem_type_id); 2856 2857 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); 2858 if (!elem_type) { 2859 btf_verifier_log_type(env, v->t, "Invalid elem"); 2860 return -EINVAL; 2861 } 2862 2863 if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) { 2864 btf_verifier_log_type(env, v->t, "Invalid array of int"); 2865 return -EINVAL; 2866 } 2867 2868 if (array->nelems && elem_size > U32_MAX / array->nelems) { 2869 btf_verifier_log_type(env, v->t, 2870 "Array size overflows U32_MAX"); 2871 return -EINVAL; 2872 } 2873 2874 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems); 2875 2876 return 0; 2877 } 2878 2879 static void btf_array_log(struct btf_verifier_env *env, 2880 const struct btf_type *t) 2881 { 2882 const struct btf_array *array = btf_type_array(t); 2883 2884 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u", 2885 array->type, array->index_type, array->nelems); 2886 } 2887 2888 static void __btf_array_show(const struct btf *btf, const struct btf_type *t, 2889 u32 type_id, void *data, u8 bits_offset, 2890 struct btf_show *show) 2891 { 2892 const struct btf_array *array = btf_type_array(t); 2893 const struct btf_kind_operations *elem_ops; 2894 const struct btf_type *elem_type; 2895 u32 i, elem_size = 0, elem_type_id; 2896 u16 encoding = 0; 2897 2898 elem_type_id = array->type; 2899 elem_type = btf_type_skip_modifiers(btf, elem_type_id, NULL); 2900 if (elem_type && btf_type_has_size(elem_type)) 2901 elem_size = elem_type->size; 2902 2903 if (elem_type && btf_type_is_int(elem_type)) { 2904 u32 int_type = btf_type_int(elem_type); 2905 2906 encoding = BTF_INT_ENCODING(int_type); 2907 2908 /* 2909 * BTF_INT_CHAR encoding never seems to be set for 2910 * char arrays, so if size is 1 and element is 2911 * printable as a char, we'll do that. 2912 */ 2913 if (elem_size == 1) 2914 encoding = BTF_INT_CHAR; 2915 } 2916 2917 if (!btf_show_start_array_type(show, t, type_id, encoding, data)) 2918 return; 2919 2920 if (!elem_type) 2921 goto out; 2922 elem_ops = btf_type_ops(elem_type); 2923 2924 for (i = 0; i < array->nelems; i++) { 2925 2926 btf_show_start_array_member(show); 2927 2928 elem_ops->show(btf, elem_type, elem_type_id, data, 2929 bits_offset, show); 2930 data += elem_size; 2931 2932 btf_show_end_array_member(show); 2933 2934 if (show->state.array_terminated) 2935 break; 2936 } 2937 out: 2938 btf_show_end_array_type(show); 2939 } 2940 2941 static void btf_array_show(const struct btf *btf, const struct btf_type *t, 2942 u32 type_id, void *data, u8 bits_offset, 2943 struct btf_show *show) 2944 { 2945 const struct btf_member *m = show->state.member; 2946 2947 /* 2948 * First check if any members would be shown (are non-zero). 2949 * See comments above "struct btf_show" definition for more 2950 * details on how this works at a high-level. 2951 */ 2952 if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) { 2953 if (!show->state.depth_check) { 2954 show->state.depth_check = show->state.depth + 1; 2955 show->state.depth_to_show = 0; 2956 } 2957 __btf_array_show(btf, t, type_id, data, bits_offset, show); 2958 show->state.member = m; 2959 2960 if (show->state.depth_check != show->state.depth + 1) 2961 return; 2962 show->state.depth_check = 0; 2963 2964 if (show->state.depth_to_show <= show->state.depth) 2965 return; 2966 /* 2967 * Reaching here indicates we have recursed and found 2968 * non-zero array member(s). 2969 */ 2970 } 2971 __btf_array_show(btf, t, type_id, data, bits_offset, show); 2972 } 2973 2974 static struct btf_kind_operations array_ops = { 2975 .check_meta = btf_array_check_meta, 2976 .resolve = btf_array_resolve, 2977 .check_member = btf_array_check_member, 2978 .check_kflag_member = btf_generic_check_kflag_member, 2979 .log_details = btf_array_log, 2980 .show = btf_array_show, 2981 }; 2982 2983 static int btf_struct_check_member(struct btf_verifier_env *env, 2984 const struct btf_type *struct_type, 2985 const struct btf_member *member, 2986 const struct btf_type *member_type) 2987 { 2988 u32 struct_bits_off = member->offset; 2989 u32 struct_size, bytes_offset; 2990 2991 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 2992 btf_verifier_log_member(env, struct_type, member, 2993 "Member is not byte aligned"); 2994 return -EINVAL; 2995 } 2996 2997 struct_size = struct_type->size; 2998 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2999 if (struct_size - bytes_offset < member_type->size) { 3000 btf_verifier_log_member(env, struct_type, member, 3001 "Member exceeds struct_size"); 3002 return -EINVAL; 3003 } 3004 3005 return 0; 3006 } 3007 3008 static s32 btf_struct_check_meta(struct btf_verifier_env *env, 3009 const struct btf_type *t, 3010 u32 meta_left) 3011 { 3012 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION; 3013 const struct btf_member *member; 3014 u32 meta_needed, last_offset; 3015 struct btf *btf = env->btf; 3016 u32 struct_size = t->size; 3017 u32 offset; 3018 u16 i; 3019 3020 meta_needed = btf_type_vlen(t) * sizeof(*member); 3021 if (meta_left < meta_needed) { 3022 btf_verifier_log_basic(env, t, 3023 "meta_left:%u meta_needed:%u", 3024 meta_left, meta_needed); 3025 return -EINVAL; 3026 } 3027 3028 /* struct type either no name or a valid one */ 3029 if (t->name_off && 3030 !btf_name_valid_identifier(env->btf, t->name_off)) { 3031 btf_verifier_log_type(env, t, "Invalid name"); 3032 return -EINVAL; 3033 } 3034 3035 btf_verifier_log_type(env, t, NULL); 3036 3037 last_offset = 0; 3038 for_each_member(i, t, member) { 3039 if (!btf_name_offset_valid(btf, member->name_off)) { 3040 btf_verifier_log_member(env, t, member, 3041 "Invalid member name_offset:%u", 3042 member->name_off); 3043 return -EINVAL; 3044 } 3045 3046 /* struct member either no name or a valid one */ 3047 if (member->name_off && 3048 !btf_name_valid_identifier(btf, member->name_off)) { 3049 btf_verifier_log_member(env, t, member, "Invalid name"); 3050 return -EINVAL; 3051 } 3052 /* A member cannot be in type void */ 3053 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) { 3054 btf_verifier_log_member(env, t, member, 3055 "Invalid type_id"); 3056 return -EINVAL; 3057 } 3058 3059 offset = __btf_member_bit_offset(t, member); 3060 if (is_union && offset) { 3061 btf_verifier_log_member(env, t, member, 3062 "Invalid member bits_offset"); 3063 return -EINVAL; 3064 } 3065 3066 /* 3067 * ">" instead of ">=" because the last member could be 3068 * "char a[0];" 3069 */ 3070 if (last_offset > offset) { 3071 btf_verifier_log_member(env, t, member, 3072 "Invalid member bits_offset"); 3073 return -EINVAL; 3074 } 3075 3076 if (BITS_ROUNDUP_BYTES(offset) > struct_size) { 3077 btf_verifier_log_member(env, t, member, 3078 "Member bits_offset exceeds its struct size"); 3079 return -EINVAL; 3080 } 3081 3082 btf_verifier_log_member(env, t, member, NULL); 3083 last_offset = offset; 3084 } 3085 3086 return meta_needed; 3087 } 3088 3089 static int btf_struct_resolve(struct btf_verifier_env *env, 3090 const struct resolve_vertex *v) 3091 { 3092 const struct btf_member *member; 3093 int err; 3094 u16 i; 3095 3096 /* Before continue resolving the next_member, 3097 * ensure the last member is indeed resolved to a 3098 * type with size info. 3099 */ 3100 if (v->next_member) { 3101 const struct btf_type *last_member_type; 3102 const struct btf_member *last_member; 3103 u16 last_member_type_id; 3104 3105 last_member = btf_type_member(v->t) + v->next_member - 1; 3106 last_member_type_id = last_member->type; 3107 if (WARN_ON_ONCE(!env_type_is_resolved(env, 3108 last_member_type_id))) 3109 return -EINVAL; 3110 3111 last_member_type = btf_type_by_id(env->btf, 3112 last_member_type_id); 3113 if (btf_type_kflag(v->t)) 3114 err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t, 3115 last_member, 3116 last_member_type); 3117 else 3118 err = btf_type_ops(last_member_type)->check_member(env, v->t, 3119 last_member, 3120 last_member_type); 3121 if (err) 3122 return err; 3123 } 3124 3125 for_each_member_from(i, v->next_member, v->t, member) { 3126 u32 member_type_id = member->type; 3127 const struct btf_type *member_type = btf_type_by_id(env->btf, 3128 member_type_id); 3129 3130 if (btf_type_nosize_or_null(member_type) || 3131 btf_type_is_resolve_source_only(member_type)) { 3132 btf_verifier_log_member(env, v->t, member, 3133 "Invalid member"); 3134 return -EINVAL; 3135 } 3136 3137 if (!env_type_is_resolve_sink(env, member_type) && 3138 !env_type_is_resolved(env, member_type_id)) { 3139 env_stack_set_next_member(env, i + 1); 3140 return env_stack_push(env, member_type, member_type_id); 3141 } 3142 3143 if (btf_type_kflag(v->t)) 3144 err = btf_type_ops(member_type)->check_kflag_member(env, v->t, 3145 member, 3146 member_type); 3147 else 3148 err = btf_type_ops(member_type)->check_member(env, v->t, 3149 member, 3150 member_type); 3151 if (err) 3152 return err; 3153 } 3154 3155 env_stack_pop_resolved(env, 0, 0); 3156 3157 return 0; 3158 } 3159 3160 static void btf_struct_log(struct btf_verifier_env *env, 3161 const struct btf_type *t) 3162 { 3163 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); 3164 } 3165 3166 static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t, 3167 const char *name, int sz, int align) 3168 { 3169 const struct btf_member *member; 3170 u32 i, off = -ENOENT; 3171 3172 for_each_member(i, t, member) { 3173 const struct btf_type *member_type = btf_type_by_id(btf, 3174 member->type); 3175 if (!__btf_type_is_struct(member_type)) 3176 continue; 3177 if (member_type->size != sz) 3178 continue; 3179 if (strcmp(__btf_name_by_offset(btf, member_type->name_off), name)) 3180 continue; 3181 if (off != -ENOENT) 3182 /* only one such field is allowed */ 3183 return -E2BIG; 3184 off = __btf_member_bit_offset(t, member); 3185 if (off % 8) 3186 /* valid C code cannot generate such BTF */ 3187 return -EINVAL; 3188 off /= 8; 3189 if (off % align) 3190 return -EINVAL; 3191 } 3192 return off; 3193 } 3194 3195 static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t, 3196 const char *name, int sz, int align) 3197 { 3198 const struct btf_var_secinfo *vsi; 3199 u32 i, off = -ENOENT; 3200 3201 for_each_vsi(i, t, vsi) { 3202 const struct btf_type *var = btf_type_by_id(btf, vsi->type); 3203 const struct btf_type *var_type = btf_type_by_id(btf, var->type); 3204 3205 if (!__btf_type_is_struct(var_type)) 3206 continue; 3207 if (var_type->size != sz) 3208 continue; 3209 if (vsi->size != sz) 3210 continue; 3211 if (strcmp(__btf_name_by_offset(btf, var_type->name_off), name)) 3212 continue; 3213 if (off != -ENOENT) 3214 /* only one such field is allowed */ 3215 return -E2BIG; 3216 off = vsi->offset; 3217 if (off % align) 3218 return -EINVAL; 3219 } 3220 return off; 3221 } 3222 3223 static int btf_find_field(const struct btf *btf, const struct btf_type *t, 3224 const char *name, int sz, int align) 3225 { 3226 3227 if (__btf_type_is_struct(t)) 3228 return btf_find_struct_field(btf, t, name, sz, align); 3229 else if (btf_type_is_datasec(t)) 3230 return btf_find_datasec_var(btf, t, name, sz, align); 3231 return -EINVAL; 3232 } 3233 3234 /* find 'struct bpf_spin_lock' in map value. 3235 * return >= 0 offset if found 3236 * and < 0 in case of error 3237 */ 3238 int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t) 3239 { 3240 return btf_find_field(btf, t, "bpf_spin_lock", 3241 sizeof(struct bpf_spin_lock), 3242 __alignof__(struct bpf_spin_lock)); 3243 } 3244 3245 int btf_find_timer(const struct btf *btf, const struct btf_type *t) 3246 { 3247 return btf_find_field(btf, t, "bpf_timer", 3248 sizeof(struct bpf_timer), 3249 __alignof__(struct bpf_timer)); 3250 } 3251 3252 static void __btf_struct_show(const struct btf *btf, const struct btf_type *t, 3253 u32 type_id, void *data, u8 bits_offset, 3254 struct btf_show *show) 3255 { 3256 const struct btf_member *member; 3257 void *safe_data; 3258 u32 i; 3259 3260 safe_data = btf_show_start_struct_type(show, t, type_id, data); 3261 if (!safe_data) 3262 return; 3263 3264 for_each_member(i, t, member) { 3265 const struct btf_type *member_type = btf_type_by_id(btf, 3266 member->type); 3267 const struct btf_kind_operations *ops; 3268 u32 member_offset, bitfield_size; 3269 u32 bytes_offset; 3270 u8 bits8_offset; 3271 3272 btf_show_start_member(show, member); 3273 3274 member_offset = __btf_member_bit_offset(t, member); 3275 bitfield_size = __btf_member_bitfield_size(t, member); 3276 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); 3277 bits8_offset = BITS_PER_BYTE_MASKED(member_offset); 3278 if (bitfield_size) { 3279 safe_data = btf_show_start_type(show, member_type, 3280 member->type, 3281 data + bytes_offset); 3282 if (safe_data) 3283 btf_bitfield_show(safe_data, 3284 bits8_offset, 3285 bitfield_size, show); 3286 btf_show_end_type(show); 3287 } else { 3288 ops = btf_type_ops(member_type); 3289 ops->show(btf, member_type, member->type, 3290 data + bytes_offset, bits8_offset, show); 3291 } 3292 3293 btf_show_end_member(show); 3294 } 3295 3296 btf_show_end_struct_type(show); 3297 } 3298 3299 static void btf_struct_show(const struct btf *btf, const struct btf_type *t, 3300 u32 type_id, void *data, u8 bits_offset, 3301 struct btf_show *show) 3302 { 3303 const struct btf_member *m = show->state.member; 3304 3305 /* 3306 * First check if any members would be shown (are non-zero). 3307 * See comments above "struct btf_show" definition for more 3308 * details on how this works at a high-level. 3309 */ 3310 if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) { 3311 if (!show->state.depth_check) { 3312 show->state.depth_check = show->state.depth + 1; 3313 show->state.depth_to_show = 0; 3314 } 3315 __btf_struct_show(btf, t, type_id, data, bits_offset, show); 3316 /* Restore saved member data here */ 3317 show->state.member = m; 3318 if (show->state.depth_check != show->state.depth + 1) 3319 return; 3320 show->state.depth_check = 0; 3321 3322 if (show->state.depth_to_show <= show->state.depth) 3323 return; 3324 /* 3325 * Reaching here indicates we have recursed and found 3326 * non-zero child values. 3327 */ 3328 } 3329 3330 __btf_struct_show(btf, t, type_id, data, bits_offset, show); 3331 } 3332 3333 static struct btf_kind_operations struct_ops = { 3334 .check_meta = btf_struct_check_meta, 3335 .resolve = btf_struct_resolve, 3336 .check_member = btf_struct_check_member, 3337 .check_kflag_member = btf_generic_check_kflag_member, 3338 .log_details = btf_struct_log, 3339 .show = btf_struct_show, 3340 }; 3341 3342 static int btf_enum_check_member(struct btf_verifier_env *env, 3343 const struct btf_type *struct_type, 3344 const struct btf_member *member, 3345 const struct btf_type *member_type) 3346 { 3347 u32 struct_bits_off = member->offset; 3348 u32 struct_size, bytes_offset; 3349 3350 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 3351 btf_verifier_log_member(env, struct_type, member, 3352 "Member is not byte aligned"); 3353 return -EINVAL; 3354 } 3355 3356 struct_size = struct_type->size; 3357 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 3358 if (struct_size - bytes_offset < member_type->size) { 3359 btf_verifier_log_member(env, struct_type, member, 3360 "Member exceeds struct_size"); 3361 return -EINVAL; 3362 } 3363 3364 return 0; 3365 } 3366 3367 static int btf_enum_check_kflag_member(struct btf_verifier_env *env, 3368 const struct btf_type *struct_type, 3369 const struct btf_member *member, 3370 const struct btf_type *member_type) 3371 { 3372 u32 struct_bits_off, nr_bits, bytes_end, struct_size; 3373 u32 int_bitsize = sizeof(int) * BITS_PER_BYTE; 3374 3375 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); 3376 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); 3377 if (!nr_bits) { 3378 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 3379 btf_verifier_log_member(env, struct_type, member, 3380 "Member is not byte aligned"); 3381 return -EINVAL; 3382 } 3383 3384 nr_bits = int_bitsize; 3385 } else if (nr_bits > int_bitsize) { 3386 btf_verifier_log_member(env, struct_type, member, 3387 "Invalid member bitfield_size"); 3388 return -EINVAL; 3389 } 3390 3391 struct_size = struct_type->size; 3392 bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits); 3393 if (struct_size < bytes_end) { 3394 btf_verifier_log_member(env, struct_type, member, 3395 "Member exceeds struct_size"); 3396 return -EINVAL; 3397 } 3398 3399 return 0; 3400 } 3401 3402 static s32 btf_enum_check_meta(struct btf_verifier_env *env, 3403 const struct btf_type *t, 3404 u32 meta_left) 3405 { 3406 const struct btf_enum *enums = btf_type_enum(t); 3407 struct btf *btf = env->btf; 3408 u16 i, nr_enums; 3409 u32 meta_needed; 3410 3411 nr_enums = btf_type_vlen(t); 3412 meta_needed = nr_enums * sizeof(*enums); 3413 3414 if (meta_left < meta_needed) { 3415 btf_verifier_log_basic(env, t, 3416 "meta_left:%u meta_needed:%u", 3417 meta_left, meta_needed); 3418 return -EINVAL; 3419 } 3420 3421 if (btf_type_kflag(t)) { 3422 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 3423 return -EINVAL; 3424 } 3425 3426 if (t->size > 8 || !is_power_of_2(t->size)) { 3427 btf_verifier_log_type(env, t, "Unexpected size"); 3428 return -EINVAL; 3429 } 3430 3431 /* enum type either no name or a valid one */ 3432 if (t->name_off && 3433 !btf_name_valid_identifier(env->btf, t->name_off)) { 3434 btf_verifier_log_type(env, t, "Invalid name"); 3435 return -EINVAL; 3436 } 3437 3438 btf_verifier_log_type(env, t, NULL); 3439 3440 for (i = 0; i < nr_enums; i++) { 3441 if (!btf_name_offset_valid(btf, enums[i].name_off)) { 3442 btf_verifier_log(env, "\tInvalid name_offset:%u", 3443 enums[i].name_off); 3444 return -EINVAL; 3445 } 3446 3447 /* enum member must have a valid name */ 3448 if (!enums[i].name_off || 3449 !btf_name_valid_identifier(btf, enums[i].name_off)) { 3450 btf_verifier_log_type(env, t, "Invalid name"); 3451 return -EINVAL; 3452 } 3453 3454 if (env->log.level == BPF_LOG_KERNEL) 3455 continue; 3456 btf_verifier_log(env, "\t%s val=%d\n", 3457 __btf_name_by_offset(btf, enums[i].name_off), 3458 enums[i].val); 3459 } 3460 3461 return meta_needed; 3462 } 3463 3464 static void btf_enum_log(struct btf_verifier_env *env, 3465 const struct btf_type *t) 3466 { 3467 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); 3468 } 3469 3470 static void btf_enum_show(const struct btf *btf, const struct btf_type *t, 3471 u32 type_id, void *data, u8 bits_offset, 3472 struct btf_show *show) 3473 { 3474 const struct btf_enum *enums = btf_type_enum(t); 3475 u32 i, nr_enums = btf_type_vlen(t); 3476 void *safe_data; 3477 int v; 3478 3479 safe_data = btf_show_start_type(show, t, type_id, data); 3480 if (!safe_data) 3481 return; 3482 3483 v = *(int *)safe_data; 3484 3485 for (i = 0; i < nr_enums; i++) { 3486 if (v != enums[i].val) 3487 continue; 3488 3489 btf_show_type_value(show, "%s", 3490 __btf_name_by_offset(btf, 3491 enums[i].name_off)); 3492 3493 btf_show_end_type(show); 3494 return; 3495 } 3496 3497 btf_show_type_value(show, "%d", v); 3498 btf_show_end_type(show); 3499 } 3500 3501 static struct btf_kind_operations enum_ops = { 3502 .check_meta = btf_enum_check_meta, 3503 .resolve = btf_df_resolve, 3504 .check_member = btf_enum_check_member, 3505 .check_kflag_member = btf_enum_check_kflag_member, 3506 .log_details = btf_enum_log, 3507 .show = btf_enum_show, 3508 }; 3509 3510 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env, 3511 const struct btf_type *t, 3512 u32 meta_left) 3513 { 3514 u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param); 3515 3516 if (meta_left < meta_needed) { 3517 btf_verifier_log_basic(env, t, 3518 "meta_left:%u meta_needed:%u", 3519 meta_left, meta_needed); 3520 return -EINVAL; 3521 } 3522 3523 if (t->name_off) { 3524 btf_verifier_log_type(env, t, "Invalid name"); 3525 return -EINVAL; 3526 } 3527 3528 if (btf_type_kflag(t)) { 3529 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 3530 return -EINVAL; 3531 } 3532 3533 btf_verifier_log_type(env, t, NULL); 3534 3535 return meta_needed; 3536 } 3537 3538 static void btf_func_proto_log(struct btf_verifier_env *env, 3539 const struct btf_type *t) 3540 { 3541 const struct btf_param *args = (const struct btf_param *)(t + 1); 3542 u16 nr_args = btf_type_vlen(t), i; 3543 3544 btf_verifier_log(env, "return=%u args=(", t->type); 3545 if (!nr_args) { 3546 btf_verifier_log(env, "void"); 3547 goto done; 3548 } 3549 3550 if (nr_args == 1 && !args[0].type) { 3551 /* Only one vararg */ 3552 btf_verifier_log(env, "vararg"); 3553 goto done; 3554 } 3555 3556 btf_verifier_log(env, "%u %s", args[0].type, 3557 __btf_name_by_offset(env->btf, 3558 args[0].name_off)); 3559 for (i = 1; i < nr_args - 1; i++) 3560 btf_verifier_log(env, ", %u %s", args[i].type, 3561 __btf_name_by_offset(env->btf, 3562 args[i].name_off)); 3563 3564 if (nr_args > 1) { 3565 const struct btf_param *last_arg = &args[nr_args - 1]; 3566 3567 if (last_arg->type) 3568 btf_verifier_log(env, ", %u %s", last_arg->type, 3569 __btf_name_by_offset(env->btf, 3570 last_arg->name_off)); 3571 else 3572 btf_verifier_log(env, ", vararg"); 3573 } 3574 3575 done: 3576 btf_verifier_log(env, ")"); 3577 } 3578 3579 static struct btf_kind_operations func_proto_ops = { 3580 .check_meta = btf_func_proto_check_meta, 3581 .resolve = btf_df_resolve, 3582 /* 3583 * BTF_KIND_FUNC_PROTO cannot be directly referred by 3584 * a struct's member. 3585 * 3586 * It should be a function pointer instead. 3587 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO) 3588 * 3589 * Hence, there is no btf_func_check_member(). 3590 */ 3591 .check_member = btf_df_check_member, 3592 .check_kflag_member = btf_df_check_kflag_member, 3593 .log_details = btf_func_proto_log, 3594 .show = btf_df_show, 3595 }; 3596 3597 static s32 btf_func_check_meta(struct btf_verifier_env *env, 3598 const struct btf_type *t, 3599 u32 meta_left) 3600 { 3601 if (!t->name_off || 3602 !btf_name_valid_identifier(env->btf, t->name_off)) { 3603 btf_verifier_log_type(env, t, "Invalid name"); 3604 return -EINVAL; 3605 } 3606 3607 if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) { 3608 btf_verifier_log_type(env, t, "Invalid func linkage"); 3609 return -EINVAL; 3610 } 3611 3612 if (btf_type_kflag(t)) { 3613 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 3614 return -EINVAL; 3615 } 3616 3617 btf_verifier_log_type(env, t, NULL); 3618 3619 return 0; 3620 } 3621 3622 static int btf_func_resolve(struct btf_verifier_env *env, 3623 const struct resolve_vertex *v) 3624 { 3625 const struct btf_type *t = v->t; 3626 u32 next_type_id = t->type; 3627 int err; 3628 3629 err = btf_func_check(env, t); 3630 if (err) 3631 return err; 3632 3633 env_stack_pop_resolved(env, next_type_id, 0); 3634 return 0; 3635 } 3636 3637 static struct btf_kind_operations func_ops = { 3638 .check_meta = btf_func_check_meta, 3639 .resolve = btf_func_resolve, 3640 .check_member = btf_df_check_member, 3641 .check_kflag_member = btf_df_check_kflag_member, 3642 .log_details = btf_ref_type_log, 3643 .show = btf_df_show, 3644 }; 3645 3646 static s32 btf_var_check_meta(struct btf_verifier_env *env, 3647 const struct btf_type *t, 3648 u32 meta_left) 3649 { 3650 const struct btf_var *var; 3651 u32 meta_needed = sizeof(*var); 3652 3653 if (meta_left < meta_needed) { 3654 btf_verifier_log_basic(env, t, 3655 "meta_left:%u meta_needed:%u", 3656 meta_left, meta_needed); 3657 return -EINVAL; 3658 } 3659 3660 if (btf_type_vlen(t)) { 3661 btf_verifier_log_type(env, t, "vlen != 0"); 3662 return -EINVAL; 3663 } 3664 3665 if (btf_type_kflag(t)) { 3666 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 3667 return -EINVAL; 3668 } 3669 3670 if (!t->name_off || 3671 !__btf_name_valid(env->btf, t->name_off, true)) { 3672 btf_verifier_log_type(env, t, "Invalid name"); 3673 return -EINVAL; 3674 } 3675 3676 /* A var cannot be in type void */ 3677 if (!t->type || !BTF_TYPE_ID_VALID(t->type)) { 3678 btf_verifier_log_type(env, t, "Invalid type_id"); 3679 return -EINVAL; 3680 } 3681 3682 var = btf_type_var(t); 3683 if (var->linkage != BTF_VAR_STATIC && 3684 var->linkage != BTF_VAR_GLOBAL_ALLOCATED) { 3685 btf_verifier_log_type(env, t, "Linkage not supported"); 3686 return -EINVAL; 3687 } 3688 3689 btf_verifier_log_type(env, t, NULL); 3690 3691 return meta_needed; 3692 } 3693 3694 static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t) 3695 { 3696 const struct btf_var *var = btf_type_var(t); 3697 3698 btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage); 3699 } 3700 3701 static const struct btf_kind_operations var_ops = { 3702 .check_meta = btf_var_check_meta, 3703 .resolve = btf_var_resolve, 3704 .check_member = btf_df_check_member, 3705 .check_kflag_member = btf_df_check_kflag_member, 3706 .log_details = btf_var_log, 3707 .show = btf_var_show, 3708 }; 3709 3710 static s32 btf_datasec_check_meta(struct btf_verifier_env *env, 3711 const struct btf_type *t, 3712 u32 meta_left) 3713 { 3714 const struct btf_var_secinfo *vsi; 3715 u64 last_vsi_end_off = 0, sum = 0; 3716 u32 i, meta_needed; 3717 3718 meta_needed = btf_type_vlen(t) * sizeof(*vsi); 3719 if (meta_left < meta_needed) { 3720 btf_verifier_log_basic(env, t, 3721 "meta_left:%u meta_needed:%u", 3722 meta_left, meta_needed); 3723 return -EINVAL; 3724 } 3725 3726 if (!t->size) { 3727 btf_verifier_log_type(env, t, "size == 0"); 3728 return -EINVAL; 3729 } 3730 3731 if (btf_type_kflag(t)) { 3732 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 3733 return -EINVAL; 3734 } 3735 3736 if (!t->name_off || 3737 !btf_name_valid_section(env->btf, t->name_off)) { 3738 btf_verifier_log_type(env, t, "Invalid name"); 3739 return -EINVAL; 3740 } 3741 3742 btf_verifier_log_type(env, t, NULL); 3743 3744 for_each_vsi(i, t, vsi) { 3745 /* A var cannot be in type void */ 3746 if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) { 3747 btf_verifier_log_vsi(env, t, vsi, 3748 "Invalid type_id"); 3749 return -EINVAL; 3750 } 3751 3752 if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) { 3753 btf_verifier_log_vsi(env, t, vsi, 3754 "Invalid offset"); 3755 return -EINVAL; 3756 } 3757 3758 if (!vsi->size || vsi->size > t->size) { 3759 btf_verifier_log_vsi(env, t, vsi, 3760 "Invalid size"); 3761 return -EINVAL; 3762 } 3763 3764 last_vsi_end_off = vsi->offset + vsi->size; 3765 if (last_vsi_end_off > t->size) { 3766 btf_verifier_log_vsi(env, t, vsi, 3767 "Invalid offset+size"); 3768 return -EINVAL; 3769 } 3770 3771 btf_verifier_log_vsi(env, t, vsi, NULL); 3772 sum += vsi->size; 3773 } 3774 3775 if (t->size < sum) { 3776 btf_verifier_log_type(env, t, "Invalid btf_info size"); 3777 return -EINVAL; 3778 } 3779 3780 return meta_needed; 3781 } 3782 3783 static int btf_datasec_resolve(struct btf_verifier_env *env, 3784 const struct resolve_vertex *v) 3785 { 3786 const struct btf_var_secinfo *vsi; 3787 struct btf *btf = env->btf; 3788 u16 i; 3789 3790 for_each_vsi_from(i, v->next_member, v->t, vsi) { 3791 u32 var_type_id = vsi->type, type_id, type_size = 0; 3792 const struct btf_type *var_type = btf_type_by_id(env->btf, 3793 var_type_id); 3794 if (!var_type || !btf_type_is_var(var_type)) { 3795 btf_verifier_log_vsi(env, v->t, vsi, 3796 "Not a VAR kind member"); 3797 return -EINVAL; 3798 } 3799 3800 if (!env_type_is_resolve_sink(env, var_type) && 3801 !env_type_is_resolved(env, var_type_id)) { 3802 env_stack_set_next_member(env, i + 1); 3803 return env_stack_push(env, var_type, var_type_id); 3804 } 3805 3806 type_id = var_type->type; 3807 if (!btf_type_id_size(btf, &type_id, &type_size)) { 3808 btf_verifier_log_vsi(env, v->t, vsi, "Invalid type"); 3809 return -EINVAL; 3810 } 3811 3812 if (vsi->size < type_size) { 3813 btf_verifier_log_vsi(env, v->t, vsi, "Invalid size"); 3814 return -EINVAL; 3815 } 3816 } 3817 3818 env_stack_pop_resolved(env, 0, 0); 3819 return 0; 3820 } 3821 3822 static void btf_datasec_log(struct btf_verifier_env *env, 3823 const struct btf_type *t) 3824 { 3825 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); 3826 } 3827 3828 static void btf_datasec_show(const struct btf *btf, 3829 const struct btf_type *t, u32 type_id, 3830 void *data, u8 bits_offset, 3831 struct btf_show *show) 3832 { 3833 const struct btf_var_secinfo *vsi; 3834 const struct btf_type *var; 3835 u32 i; 3836 3837 if (!btf_show_start_type(show, t, type_id, data)) 3838 return; 3839 3840 btf_show_type_value(show, "section (\"%s\") = {", 3841 __btf_name_by_offset(btf, t->name_off)); 3842 for_each_vsi(i, t, vsi) { 3843 var = btf_type_by_id(btf, vsi->type); 3844 if (i) 3845 btf_show(show, ","); 3846 btf_type_ops(var)->show(btf, var, vsi->type, 3847 data + vsi->offset, bits_offset, show); 3848 } 3849 btf_show_end_type(show); 3850 } 3851 3852 static const struct btf_kind_operations datasec_ops = { 3853 .check_meta = btf_datasec_check_meta, 3854 .resolve = btf_datasec_resolve, 3855 .check_member = btf_df_check_member, 3856 .check_kflag_member = btf_df_check_kflag_member, 3857 .log_details = btf_datasec_log, 3858 .show = btf_datasec_show, 3859 }; 3860 3861 static s32 btf_float_check_meta(struct btf_verifier_env *env, 3862 const struct btf_type *t, 3863 u32 meta_left) 3864 { 3865 if (btf_type_vlen(t)) { 3866 btf_verifier_log_type(env, t, "vlen != 0"); 3867 return -EINVAL; 3868 } 3869 3870 if (btf_type_kflag(t)) { 3871 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 3872 return -EINVAL; 3873 } 3874 3875 if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 && 3876 t->size != 16) { 3877 btf_verifier_log_type(env, t, "Invalid type_size"); 3878 return -EINVAL; 3879 } 3880 3881 btf_verifier_log_type(env, t, NULL); 3882 3883 return 0; 3884 } 3885 3886 static int btf_float_check_member(struct btf_verifier_env *env, 3887 const struct btf_type *struct_type, 3888 const struct btf_member *member, 3889 const struct btf_type *member_type) 3890 { 3891 u64 start_offset_bytes; 3892 u64 end_offset_bytes; 3893 u64 misalign_bits; 3894 u64 align_bytes; 3895 u64 align_bits; 3896 3897 /* Different architectures have different alignment requirements, so 3898 * here we check only for the reasonable minimum. This way we ensure 3899 * that types after CO-RE can pass the kernel BTF verifier. 3900 */ 3901 align_bytes = min_t(u64, sizeof(void *), member_type->size); 3902 align_bits = align_bytes * BITS_PER_BYTE; 3903 div64_u64_rem(member->offset, align_bits, &misalign_bits); 3904 if (misalign_bits) { 3905 btf_verifier_log_member(env, struct_type, member, 3906 "Member is not properly aligned"); 3907 return -EINVAL; 3908 } 3909 3910 start_offset_bytes = member->offset / BITS_PER_BYTE; 3911 end_offset_bytes = start_offset_bytes + member_type->size; 3912 if (end_offset_bytes > struct_type->size) { 3913 btf_verifier_log_member(env, struct_type, member, 3914 "Member exceeds struct_size"); 3915 return -EINVAL; 3916 } 3917 3918 return 0; 3919 } 3920 3921 static void btf_float_log(struct btf_verifier_env *env, 3922 const struct btf_type *t) 3923 { 3924 btf_verifier_log(env, "size=%u", t->size); 3925 } 3926 3927 static const struct btf_kind_operations float_ops = { 3928 .check_meta = btf_float_check_meta, 3929 .resolve = btf_df_resolve, 3930 .check_member = btf_float_check_member, 3931 .check_kflag_member = btf_generic_check_kflag_member, 3932 .log_details = btf_float_log, 3933 .show = btf_df_show, 3934 }; 3935 3936 static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env, 3937 const struct btf_type *t, 3938 u32 meta_left) 3939 { 3940 const struct btf_decl_tag *tag; 3941 u32 meta_needed = sizeof(*tag); 3942 s32 component_idx; 3943 const char *value; 3944 3945 if (meta_left < meta_needed) { 3946 btf_verifier_log_basic(env, t, 3947 "meta_left:%u meta_needed:%u", 3948 meta_left, meta_needed); 3949 return -EINVAL; 3950 } 3951 3952 value = btf_name_by_offset(env->btf, t->name_off); 3953 if (!value || !value[0]) { 3954 btf_verifier_log_type(env, t, "Invalid value"); 3955 return -EINVAL; 3956 } 3957 3958 if (btf_type_vlen(t)) { 3959 btf_verifier_log_type(env, t, "vlen != 0"); 3960 return -EINVAL; 3961 } 3962 3963 if (btf_type_kflag(t)) { 3964 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 3965 return -EINVAL; 3966 } 3967 3968 component_idx = btf_type_decl_tag(t)->component_idx; 3969 if (component_idx < -1) { 3970 btf_verifier_log_type(env, t, "Invalid component_idx"); 3971 return -EINVAL; 3972 } 3973 3974 btf_verifier_log_type(env, t, NULL); 3975 3976 return meta_needed; 3977 } 3978 3979 static int btf_decl_tag_resolve(struct btf_verifier_env *env, 3980 const struct resolve_vertex *v) 3981 { 3982 const struct btf_type *next_type; 3983 const struct btf_type *t = v->t; 3984 u32 next_type_id = t->type; 3985 struct btf *btf = env->btf; 3986 s32 component_idx; 3987 u32 vlen; 3988 3989 next_type = btf_type_by_id(btf, next_type_id); 3990 if (!next_type || !btf_type_is_decl_tag_target(next_type)) { 3991 btf_verifier_log_type(env, v->t, "Invalid type_id"); 3992 return -EINVAL; 3993 } 3994 3995 if (!env_type_is_resolve_sink(env, next_type) && 3996 !env_type_is_resolved(env, next_type_id)) 3997 return env_stack_push(env, next_type, next_type_id); 3998 3999 component_idx = btf_type_decl_tag(t)->component_idx; 4000 if (component_idx != -1) { 4001 if (btf_type_is_var(next_type) || btf_type_is_typedef(next_type)) { 4002 btf_verifier_log_type(env, v->t, "Invalid component_idx"); 4003 return -EINVAL; 4004 } 4005 4006 if (btf_type_is_struct(next_type)) { 4007 vlen = btf_type_vlen(next_type); 4008 } else { 4009 /* next_type should be a function */ 4010 next_type = btf_type_by_id(btf, next_type->type); 4011 vlen = btf_type_vlen(next_type); 4012 } 4013 4014 if ((u32)component_idx >= vlen) { 4015 btf_verifier_log_type(env, v->t, "Invalid component_idx"); 4016 return -EINVAL; 4017 } 4018 } 4019 4020 env_stack_pop_resolved(env, next_type_id, 0); 4021 4022 return 0; 4023 } 4024 4025 static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t) 4026 { 4027 btf_verifier_log(env, "type=%u component_idx=%d", t->type, 4028 btf_type_decl_tag(t)->component_idx); 4029 } 4030 4031 static const struct btf_kind_operations decl_tag_ops = { 4032 .check_meta = btf_decl_tag_check_meta, 4033 .resolve = btf_decl_tag_resolve, 4034 .check_member = btf_df_check_member, 4035 .check_kflag_member = btf_df_check_kflag_member, 4036 .log_details = btf_decl_tag_log, 4037 .show = btf_df_show, 4038 }; 4039 4040 static int btf_func_proto_check(struct btf_verifier_env *env, 4041 const struct btf_type *t) 4042 { 4043 const struct btf_type *ret_type; 4044 const struct btf_param *args; 4045 const struct btf *btf; 4046 u16 nr_args, i; 4047 int err; 4048 4049 btf = env->btf; 4050 args = (const struct btf_param *)(t + 1); 4051 nr_args = btf_type_vlen(t); 4052 4053 /* Check func return type which could be "void" (t->type == 0) */ 4054 if (t->type) { 4055 u32 ret_type_id = t->type; 4056 4057 ret_type = btf_type_by_id(btf, ret_type_id); 4058 if (!ret_type) { 4059 btf_verifier_log_type(env, t, "Invalid return type"); 4060 return -EINVAL; 4061 } 4062 4063 if (btf_type_needs_resolve(ret_type) && 4064 !env_type_is_resolved(env, ret_type_id)) { 4065 err = btf_resolve(env, ret_type, ret_type_id); 4066 if (err) 4067 return err; 4068 } 4069 4070 /* Ensure the return type is a type that has a size */ 4071 if (!btf_type_id_size(btf, &ret_type_id, NULL)) { 4072 btf_verifier_log_type(env, t, "Invalid return type"); 4073 return -EINVAL; 4074 } 4075 } 4076 4077 if (!nr_args) 4078 return 0; 4079 4080 /* Last func arg type_id could be 0 if it is a vararg */ 4081 if (!args[nr_args - 1].type) { 4082 if (args[nr_args - 1].name_off) { 4083 btf_verifier_log_type(env, t, "Invalid arg#%u", 4084 nr_args); 4085 return -EINVAL; 4086 } 4087 nr_args--; 4088 } 4089 4090 err = 0; 4091 for (i = 0; i < nr_args; i++) { 4092 const struct btf_type *arg_type; 4093 u32 arg_type_id; 4094 4095 arg_type_id = args[i].type; 4096 arg_type = btf_type_by_id(btf, arg_type_id); 4097 if (!arg_type) { 4098 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); 4099 err = -EINVAL; 4100 break; 4101 } 4102 4103 if (args[i].name_off && 4104 (!btf_name_offset_valid(btf, args[i].name_off) || 4105 !btf_name_valid_identifier(btf, args[i].name_off))) { 4106 btf_verifier_log_type(env, t, 4107 "Invalid arg#%u", i + 1); 4108 err = -EINVAL; 4109 break; 4110 } 4111 4112 if (btf_type_needs_resolve(arg_type) && 4113 !env_type_is_resolved(env, arg_type_id)) { 4114 err = btf_resolve(env, arg_type, arg_type_id); 4115 if (err) 4116 break; 4117 } 4118 4119 if (!btf_type_id_size(btf, &arg_type_id, NULL)) { 4120 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); 4121 err = -EINVAL; 4122 break; 4123 } 4124 } 4125 4126 return err; 4127 } 4128 4129 static int btf_func_check(struct btf_verifier_env *env, 4130 const struct btf_type *t) 4131 { 4132 const struct btf_type *proto_type; 4133 const struct btf_param *args; 4134 const struct btf *btf; 4135 u16 nr_args, i; 4136 4137 btf = env->btf; 4138 proto_type = btf_type_by_id(btf, t->type); 4139 4140 if (!proto_type || !btf_type_is_func_proto(proto_type)) { 4141 btf_verifier_log_type(env, t, "Invalid type_id"); 4142 return -EINVAL; 4143 } 4144 4145 args = (const struct btf_param *)(proto_type + 1); 4146 nr_args = btf_type_vlen(proto_type); 4147 for (i = 0; i < nr_args; i++) { 4148 if (!args[i].name_off && args[i].type) { 4149 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); 4150 return -EINVAL; 4151 } 4152 } 4153 4154 return 0; 4155 } 4156 4157 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = { 4158 [BTF_KIND_INT] = &int_ops, 4159 [BTF_KIND_PTR] = &ptr_ops, 4160 [BTF_KIND_ARRAY] = &array_ops, 4161 [BTF_KIND_STRUCT] = &struct_ops, 4162 [BTF_KIND_UNION] = &struct_ops, 4163 [BTF_KIND_ENUM] = &enum_ops, 4164 [BTF_KIND_FWD] = &fwd_ops, 4165 [BTF_KIND_TYPEDEF] = &modifier_ops, 4166 [BTF_KIND_VOLATILE] = &modifier_ops, 4167 [BTF_KIND_CONST] = &modifier_ops, 4168 [BTF_KIND_RESTRICT] = &modifier_ops, 4169 [BTF_KIND_FUNC] = &func_ops, 4170 [BTF_KIND_FUNC_PROTO] = &func_proto_ops, 4171 [BTF_KIND_VAR] = &var_ops, 4172 [BTF_KIND_DATASEC] = &datasec_ops, 4173 [BTF_KIND_FLOAT] = &float_ops, 4174 [BTF_KIND_DECL_TAG] = &decl_tag_ops, 4175 [BTF_KIND_TYPE_TAG] = &modifier_ops, 4176 }; 4177 4178 static s32 btf_check_meta(struct btf_verifier_env *env, 4179 const struct btf_type *t, 4180 u32 meta_left) 4181 { 4182 u32 saved_meta_left = meta_left; 4183 s32 var_meta_size; 4184 4185 if (meta_left < sizeof(*t)) { 4186 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu", 4187 env->log_type_id, meta_left, sizeof(*t)); 4188 return -EINVAL; 4189 } 4190 meta_left -= sizeof(*t); 4191 4192 if (t->info & ~BTF_INFO_MASK) { 4193 btf_verifier_log(env, "[%u] Invalid btf_info:%x", 4194 env->log_type_id, t->info); 4195 return -EINVAL; 4196 } 4197 4198 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX || 4199 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) { 4200 btf_verifier_log(env, "[%u] Invalid kind:%u", 4201 env->log_type_id, BTF_INFO_KIND(t->info)); 4202 return -EINVAL; 4203 } 4204 4205 if (!btf_name_offset_valid(env->btf, t->name_off)) { 4206 btf_verifier_log(env, "[%u] Invalid name_offset:%u", 4207 env->log_type_id, t->name_off); 4208 return -EINVAL; 4209 } 4210 4211 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left); 4212 if (var_meta_size < 0) 4213 return var_meta_size; 4214 4215 meta_left -= var_meta_size; 4216 4217 return saved_meta_left - meta_left; 4218 } 4219 4220 static int btf_check_all_metas(struct btf_verifier_env *env) 4221 { 4222 struct btf *btf = env->btf; 4223 struct btf_header *hdr; 4224 void *cur, *end; 4225 4226 hdr = &btf->hdr; 4227 cur = btf->nohdr_data + hdr->type_off; 4228 end = cur + hdr->type_len; 4229 4230 env->log_type_id = btf->base_btf ? btf->start_id : 1; 4231 while (cur < end) { 4232 struct btf_type *t = cur; 4233 s32 meta_size; 4234 4235 meta_size = btf_check_meta(env, t, end - cur); 4236 if (meta_size < 0) 4237 return meta_size; 4238 4239 btf_add_type(env, t); 4240 cur += meta_size; 4241 env->log_type_id++; 4242 } 4243 4244 return 0; 4245 } 4246 4247 static bool btf_resolve_valid(struct btf_verifier_env *env, 4248 const struct btf_type *t, 4249 u32 type_id) 4250 { 4251 struct btf *btf = env->btf; 4252 4253 if (!env_type_is_resolved(env, type_id)) 4254 return false; 4255 4256 if (btf_type_is_struct(t) || btf_type_is_datasec(t)) 4257 return !btf_resolved_type_id(btf, type_id) && 4258 !btf_resolved_type_size(btf, type_id); 4259 4260 if (btf_type_is_decl_tag(t) || btf_type_is_func(t)) 4261 return btf_resolved_type_id(btf, type_id) && 4262 !btf_resolved_type_size(btf, type_id); 4263 4264 if (btf_type_is_modifier(t) || btf_type_is_ptr(t) || 4265 btf_type_is_var(t)) { 4266 t = btf_type_id_resolve(btf, &type_id); 4267 return t && 4268 !btf_type_is_modifier(t) && 4269 !btf_type_is_var(t) && 4270 !btf_type_is_datasec(t); 4271 } 4272 4273 if (btf_type_is_array(t)) { 4274 const struct btf_array *array = btf_type_array(t); 4275 const struct btf_type *elem_type; 4276 u32 elem_type_id = array->type; 4277 u32 elem_size; 4278 4279 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); 4280 return elem_type && !btf_type_is_modifier(elem_type) && 4281 (array->nelems * elem_size == 4282 btf_resolved_type_size(btf, type_id)); 4283 } 4284 4285 return false; 4286 } 4287 4288 static int btf_resolve(struct btf_verifier_env *env, 4289 const struct btf_type *t, u32 type_id) 4290 { 4291 u32 save_log_type_id = env->log_type_id; 4292 const struct resolve_vertex *v; 4293 int err = 0; 4294 4295 env->resolve_mode = RESOLVE_TBD; 4296 env_stack_push(env, t, type_id); 4297 while (!err && (v = env_stack_peak(env))) { 4298 env->log_type_id = v->type_id; 4299 err = btf_type_ops(v->t)->resolve(env, v); 4300 } 4301 4302 env->log_type_id = type_id; 4303 if (err == -E2BIG) { 4304 btf_verifier_log_type(env, t, 4305 "Exceeded max resolving depth:%u", 4306 MAX_RESOLVE_DEPTH); 4307 } else if (err == -EEXIST) { 4308 btf_verifier_log_type(env, t, "Loop detected"); 4309 } 4310 4311 /* Final sanity check */ 4312 if (!err && !btf_resolve_valid(env, t, type_id)) { 4313 btf_verifier_log_type(env, t, "Invalid resolve state"); 4314 err = -EINVAL; 4315 } 4316 4317 env->log_type_id = save_log_type_id; 4318 return err; 4319 } 4320 4321 static int btf_check_all_types(struct btf_verifier_env *env) 4322 { 4323 struct btf *btf = env->btf; 4324 const struct btf_type *t; 4325 u32 type_id, i; 4326 int err; 4327 4328 err = env_resolve_init(env); 4329 if (err) 4330 return err; 4331 4332 env->phase++; 4333 for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) { 4334 type_id = btf->start_id + i; 4335 t = btf_type_by_id(btf, type_id); 4336 4337 env->log_type_id = type_id; 4338 if (btf_type_needs_resolve(t) && 4339 !env_type_is_resolved(env, type_id)) { 4340 err = btf_resolve(env, t, type_id); 4341 if (err) 4342 return err; 4343 } 4344 4345 if (btf_type_is_func_proto(t)) { 4346 err = btf_func_proto_check(env, t); 4347 if (err) 4348 return err; 4349 } 4350 } 4351 4352 return 0; 4353 } 4354 4355 static int btf_parse_type_sec(struct btf_verifier_env *env) 4356 { 4357 const struct btf_header *hdr = &env->btf->hdr; 4358 int err; 4359 4360 /* Type section must align to 4 bytes */ 4361 if (hdr->type_off & (sizeof(u32) - 1)) { 4362 btf_verifier_log(env, "Unaligned type_off"); 4363 return -EINVAL; 4364 } 4365 4366 if (!env->btf->base_btf && !hdr->type_len) { 4367 btf_verifier_log(env, "No type found"); 4368 return -EINVAL; 4369 } 4370 4371 err = btf_check_all_metas(env); 4372 if (err) 4373 return err; 4374 4375 return btf_check_all_types(env); 4376 } 4377 4378 static int btf_parse_str_sec(struct btf_verifier_env *env) 4379 { 4380 const struct btf_header *hdr; 4381 struct btf *btf = env->btf; 4382 const char *start, *end; 4383 4384 hdr = &btf->hdr; 4385 start = btf->nohdr_data + hdr->str_off; 4386 end = start + hdr->str_len; 4387 4388 if (end != btf->data + btf->data_size) { 4389 btf_verifier_log(env, "String section is not at the end"); 4390 return -EINVAL; 4391 } 4392 4393 btf->strings = start; 4394 4395 if (btf->base_btf && !hdr->str_len) 4396 return 0; 4397 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) { 4398 btf_verifier_log(env, "Invalid string section"); 4399 return -EINVAL; 4400 } 4401 if (!btf->base_btf && start[0]) { 4402 btf_verifier_log(env, "Invalid string section"); 4403 return -EINVAL; 4404 } 4405 4406 return 0; 4407 } 4408 4409 static const size_t btf_sec_info_offset[] = { 4410 offsetof(struct btf_header, type_off), 4411 offsetof(struct btf_header, str_off), 4412 }; 4413 4414 static int btf_sec_info_cmp(const void *a, const void *b) 4415 { 4416 const struct btf_sec_info *x = a; 4417 const struct btf_sec_info *y = b; 4418 4419 return (int)(x->off - y->off) ? : (int)(x->len - y->len); 4420 } 4421 4422 static int btf_check_sec_info(struct btf_verifier_env *env, 4423 u32 btf_data_size) 4424 { 4425 struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)]; 4426 u32 total, expected_total, i; 4427 const struct btf_header *hdr; 4428 const struct btf *btf; 4429 4430 btf = env->btf; 4431 hdr = &btf->hdr; 4432 4433 /* Populate the secs from hdr */ 4434 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) 4435 secs[i] = *(struct btf_sec_info *)((void *)hdr + 4436 btf_sec_info_offset[i]); 4437 4438 sort(secs, ARRAY_SIZE(btf_sec_info_offset), 4439 sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL); 4440 4441 /* Check for gaps and overlap among sections */ 4442 total = 0; 4443 expected_total = btf_data_size - hdr->hdr_len; 4444 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) { 4445 if (expected_total < secs[i].off) { 4446 btf_verifier_log(env, "Invalid section offset"); 4447 return -EINVAL; 4448 } 4449 if (total < secs[i].off) { 4450 /* gap */ 4451 btf_verifier_log(env, "Unsupported section found"); 4452 return -EINVAL; 4453 } 4454 if (total > secs[i].off) { 4455 btf_verifier_log(env, "Section overlap found"); 4456 return -EINVAL; 4457 } 4458 if (expected_total - total < secs[i].len) { 4459 btf_verifier_log(env, 4460 "Total section length too long"); 4461 return -EINVAL; 4462 } 4463 total += secs[i].len; 4464 } 4465 4466 /* There is data other than hdr and known sections */ 4467 if (expected_total != total) { 4468 btf_verifier_log(env, "Unsupported section found"); 4469 return -EINVAL; 4470 } 4471 4472 return 0; 4473 } 4474 4475 static int btf_parse_hdr(struct btf_verifier_env *env) 4476 { 4477 u32 hdr_len, hdr_copy, btf_data_size; 4478 const struct btf_header *hdr; 4479 struct btf *btf; 4480 int err; 4481 4482 btf = env->btf; 4483 btf_data_size = btf->data_size; 4484 4485 if (btf_data_size < offsetofend(struct btf_header, hdr_len)) { 4486 btf_verifier_log(env, "hdr_len not found"); 4487 return -EINVAL; 4488 } 4489 4490 hdr = btf->data; 4491 hdr_len = hdr->hdr_len; 4492 if (btf_data_size < hdr_len) { 4493 btf_verifier_log(env, "btf_header not found"); 4494 return -EINVAL; 4495 } 4496 4497 /* Ensure the unsupported header fields are zero */ 4498 if (hdr_len > sizeof(btf->hdr)) { 4499 u8 *expected_zero = btf->data + sizeof(btf->hdr); 4500 u8 *end = btf->data + hdr_len; 4501 4502 for (; expected_zero < end; expected_zero++) { 4503 if (*expected_zero) { 4504 btf_verifier_log(env, "Unsupported btf_header"); 4505 return -E2BIG; 4506 } 4507 } 4508 } 4509 4510 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr)); 4511 memcpy(&btf->hdr, btf->data, hdr_copy); 4512 4513 hdr = &btf->hdr; 4514 4515 btf_verifier_log_hdr(env, btf_data_size); 4516 4517 if (hdr->magic != BTF_MAGIC) { 4518 btf_verifier_log(env, "Invalid magic"); 4519 return -EINVAL; 4520 } 4521 4522 if (hdr->version != BTF_VERSION) { 4523 btf_verifier_log(env, "Unsupported version"); 4524 return -ENOTSUPP; 4525 } 4526 4527 if (hdr->flags) { 4528 btf_verifier_log(env, "Unsupported flags"); 4529 return -ENOTSUPP; 4530 } 4531 4532 if (!btf->base_btf && btf_data_size == hdr->hdr_len) { 4533 btf_verifier_log(env, "No data"); 4534 return -EINVAL; 4535 } 4536 4537 err = btf_check_sec_info(env, btf_data_size); 4538 if (err) 4539 return err; 4540 4541 return 0; 4542 } 4543 4544 static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size, 4545 u32 log_level, char __user *log_ubuf, u32 log_size) 4546 { 4547 struct btf_verifier_env *env = NULL; 4548 struct bpf_verifier_log *log; 4549 struct btf *btf = NULL; 4550 u8 *data; 4551 int err; 4552 4553 if (btf_data_size > BTF_MAX_SIZE) 4554 return ERR_PTR(-E2BIG); 4555 4556 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); 4557 if (!env) 4558 return ERR_PTR(-ENOMEM); 4559 4560 log = &env->log; 4561 if (log_level || log_ubuf || log_size) { 4562 /* user requested verbose verifier output 4563 * and supplied buffer to store the verification trace 4564 */ 4565 log->level = log_level; 4566 log->ubuf = log_ubuf; 4567 log->len_total = log_size; 4568 4569 /* log attributes have to be sane */ 4570 if (!bpf_verifier_log_attr_valid(log)) { 4571 err = -EINVAL; 4572 goto errout; 4573 } 4574 } 4575 4576 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); 4577 if (!btf) { 4578 err = -ENOMEM; 4579 goto errout; 4580 } 4581 env->btf = btf; 4582 4583 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN); 4584 if (!data) { 4585 err = -ENOMEM; 4586 goto errout; 4587 } 4588 4589 btf->data = data; 4590 btf->data_size = btf_data_size; 4591 4592 if (copy_from_bpfptr(data, btf_data, btf_data_size)) { 4593 err = -EFAULT; 4594 goto errout; 4595 } 4596 4597 err = btf_parse_hdr(env); 4598 if (err) 4599 goto errout; 4600 4601 btf->nohdr_data = btf->data + btf->hdr.hdr_len; 4602 4603 err = btf_parse_str_sec(env); 4604 if (err) 4605 goto errout; 4606 4607 err = btf_parse_type_sec(env); 4608 if (err) 4609 goto errout; 4610 4611 if (log->level && bpf_verifier_log_full(log)) { 4612 err = -ENOSPC; 4613 goto errout; 4614 } 4615 4616 btf_verifier_env_free(env); 4617 refcount_set(&btf->refcnt, 1); 4618 return btf; 4619 4620 errout: 4621 btf_verifier_env_free(env); 4622 if (btf) 4623 btf_free(btf); 4624 return ERR_PTR(err); 4625 } 4626 4627 extern char __weak __start_BTF[]; 4628 extern char __weak __stop_BTF[]; 4629 extern struct btf *btf_vmlinux; 4630 4631 #define BPF_MAP_TYPE(_id, _ops) 4632 #define BPF_LINK_TYPE(_id, _name) 4633 static union { 4634 struct bpf_ctx_convert { 4635 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 4636 prog_ctx_type _id##_prog; \ 4637 kern_ctx_type _id##_kern; 4638 #include <linux/bpf_types.h> 4639 #undef BPF_PROG_TYPE 4640 } *__t; 4641 /* 't' is written once under lock. Read many times. */ 4642 const struct btf_type *t; 4643 } bpf_ctx_convert; 4644 enum { 4645 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 4646 __ctx_convert##_id, 4647 #include <linux/bpf_types.h> 4648 #undef BPF_PROG_TYPE 4649 __ctx_convert_unused, /* to avoid empty enum in extreme .config */ 4650 }; 4651 static u8 bpf_ctx_convert_map[] = { 4652 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 4653 [_id] = __ctx_convert##_id, 4654 #include <linux/bpf_types.h> 4655 #undef BPF_PROG_TYPE 4656 0, /* avoid empty array */ 4657 }; 4658 #undef BPF_MAP_TYPE 4659 #undef BPF_LINK_TYPE 4660 4661 static const struct btf_member * 4662 btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf, 4663 const struct btf_type *t, enum bpf_prog_type prog_type, 4664 int arg) 4665 { 4666 const struct btf_type *conv_struct; 4667 const struct btf_type *ctx_struct; 4668 const struct btf_member *ctx_type; 4669 const char *tname, *ctx_tname; 4670 4671 conv_struct = bpf_ctx_convert.t; 4672 if (!conv_struct) { 4673 bpf_log(log, "btf_vmlinux is malformed\n"); 4674 return NULL; 4675 } 4676 t = btf_type_by_id(btf, t->type); 4677 while (btf_type_is_modifier(t)) 4678 t = btf_type_by_id(btf, t->type); 4679 if (!btf_type_is_struct(t)) { 4680 /* Only pointer to struct is supported for now. 4681 * That means that BPF_PROG_TYPE_TRACEPOINT with BTF 4682 * is not supported yet. 4683 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine. 4684 */ 4685 return NULL; 4686 } 4687 tname = btf_name_by_offset(btf, t->name_off); 4688 if (!tname) { 4689 bpf_log(log, "arg#%d struct doesn't have a name\n", arg); 4690 return NULL; 4691 } 4692 /* prog_type is valid bpf program type. No need for bounds check. */ 4693 ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2; 4694 /* ctx_struct is a pointer to prog_ctx_type in vmlinux. 4695 * Like 'struct __sk_buff' 4696 */ 4697 ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type); 4698 if (!ctx_struct) 4699 /* should not happen */ 4700 return NULL; 4701 ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off); 4702 if (!ctx_tname) { 4703 /* should not happen */ 4704 bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n"); 4705 return NULL; 4706 } 4707 /* only compare that prog's ctx type name is the same as 4708 * kernel expects. No need to compare field by field. 4709 * It's ok for bpf prog to do: 4710 * struct __sk_buff {}; 4711 * int socket_filter_bpf_prog(struct __sk_buff *skb) 4712 * { // no fields of skb are ever used } 4713 */ 4714 if (strcmp(ctx_tname, tname)) 4715 return NULL; 4716 return ctx_type; 4717 } 4718 4719 static const struct bpf_map_ops * const btf_vmlinux_map_ops[] = { 4720 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 4721 #define BPF_LINK_TYPE(_id, _name) 4722 #define BPF_MAP_TYPE(_id, _ops) \ 4723 [_id] = &_ops, 4724 #include <linux/bpf_types.h> 4725 #undef BPF_PROG_TYPE 4726 #undef BPF_LINK_TYPE 4727 #undef BPF_MAP_TYPE 4728 }; 4729 4730 static int btf_vmlinux_map_ids_init(const struct btf *btf, 4731 struct bpf_verifier_log *log) 4732 { 4733 const struct bpf_map_ops *ops; 4734 int i, btf_id; 4735 4736 for (i = 0; i < ARRAY_SIZE(btf_vmlinux_map_ops); ++i) { 4737 ops = btf_vmlinux_map_ops[i]; 4738 if (!ops || (!ops->map_btf_name && !ops->map_btf_id)) 4739 continue; 4740 if (!ops->map_btf_name || !ops->map_btf_id) { 4741 bpf_log(log, "map type %d is misconfigured\n", i); 4742 return -EINVAL; 4743 } 4744 btf_id = btf_find_by_name_kind(btf, ops->map_btf_name, 4745 BTF_KIND_STRUCT); 4746 if (btf_id < 0) 4747 return btf_id; 4748 *ops->map_btf_id = btf_id; 4749 } 4750 4751 return 0; 4752 } 4753 4754 static int btf_translate_to_vmlinux(struct bpf_verifier_log *log, 4755 struct btf *btf, 4756 const struct btf_type *t, 4757 enum bpf_prog_type prog_type, 4758 int arg) 4759 { 4760 const struct btf_member *prog_ctx_type, *kern_ctx_type; 4761 4762 prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type, arg); 4763 if (!prog_ctx_type) 4764 return -ENOENT; 4765 kern_ctx_type = prog_ctx_type + 1; 4766 return kern_ctx_type->type; 4767 } 4768 4769 BTF_ID_LIST(bpf_ctx_convert_btf_id) 4770 BTF_ID(struct, bpf_ctx_convert) 4771 4772 struct btf *btf_parse_vmlinux(void) 4773 { 4774 struct btf_verifier_env *env = NULL; 4775 struct bpf_verifier_log *log; 4776 struct btf *btf = NULL; 4777 int err; 4778 4779 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); 4780 if (!env) 4781 return ERR_PTR(-ENOMEM); 4782 4783 log = &env->log; 4784 log->level = BPF_LOG_KERNEL; 4785 4786 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); 4787 if (!btf) { 4788 err = -ENOMEM; 4789 goto errout; 4790 } 4791 env->btf = btf; 4792 4793 btf->data = __start_BTF; 4794 btf->data_size = __stop_BTF - __start_BTF; 4795 btf->kernel_btf = true; 4796 snprintf(btf->name, sizeof(btf->name), "vmlinux"); 4797 4798 err = btf_parse_hdr(env); 4799 if (err) 4800 goto errout; 4801 4802 btf->nohdr_data = btf->data + btf->hdr.hdr_len; 4803 4804 err = btf_parse_str_sec(env); 4805 if (err) 4806 goto errout; 4807 4808 err = btf_check_all_metas(env); 4809 if (err) 4810 goto errout; 4811 4812 /* btf_parse_vmlinux() runs under bpf_verifier_lock */ 4813 bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]); 4814 4815 /* find bpf map structs for map_ptr access checking */ 4816 err = btf_vmlinux_map_ids_init(btf, log); 4817 if (err < 0) 4818 goto errout; 4819 4820 bpf_struct_ops_init(btf, log); 4821 4822 refcount_set(&btf->refcnt, 1); 4823 4824 err = btf_alloc_id(btf); 4825 if (err) 4826 goto errout; 4827 4828 btf_verifier_env_free(env); 4829 return btf; 4830 4831 errout: 4832 btf_verifier_env_free(env); 4833 if (btf) { 4834 kvfree(btf->types); 4835 kfree(btf); 4836 } 4837 return ERR_PTR(err); 4838 } 4839 4840 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 4841 4842 static struct btf *btf_parse_module(const char *module_name, const void *data, unsigned int data_size) 4843 { 4844 struct btf_verifier_env *env = NULL; 4845 struct bpf_verifier_log *log; 4846 struct btf *btf = NULL, *base_btf; 4847 int err; 4848 4849 base_btf = bpf_get_btf_vmlinux(); 4850 if (IS_ERR(base_btf)) 4851 return base_btf; 4852 if (!base_btf) 4853 return ERR_PTR(-EINVAL); 4854 4855 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); 4856 if (!env) 4857 return ERR_PTR(-ENOMEM); 4858 4859 log = &env->log; 4860 log->level = BPF_LOG_KERNEL; 4861 4862 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); 4863 if (!btf) { 4864 err = -ENOMEM; 4865 goto errout; 4866 } 4867 env->btf = btf; 4868 4869 btf->base_btf = base_btf; 4870 btf->start_id = base_btf->nr_types; 4871 btf->start_str_off = base_btf->hdr.str_len; 4872 btf->kernel_btf = true; 4873 snprintf(btf->name, sizeof(btf->name), "%s", module_name); 4874 4875 btf->data = kvmalloc(data_size, GFP_KERNEL | __GFP_NOWARN); 4876 if (!btf->data) { 4877 err = -ENOMEM; 4878 goto errout; 4879 } 4880 memcpy(btf->data, data, data_size); 4881 btf->data_size = data_size; 4882 4883 err = btf_parse_hdr(env); 4884 if (err) 4885 goto errout; 4886 4887 btf->nohdr_data = btf->data + btf->hdr.hdr_len; 4888 4889 err = btf_parse_str_sec(env); 4890 if (err) 4891 goto errout; 4892 4893 err = btf_check_all_metas(env); 4894 if (err) 4895 goto errout; 4896 4897 btf_verifier_env_free(env); 4898 refcount_set(&btf->refcnt, 1); 4899 return btf; 4900 4901 errout: 4902 btf_verifier_env_free(env); 4903 if (btf) { 4904 kvfree(btf->data); 4905 kvfree(btf->types); 4906 kfree(btf); 4907 } 4908 return ERR_PTR(err); 4909 } 4910 4911 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */ 4912 4913 struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog) 4914 { 4915 struct bpf_prog *tgt_prog = prog->aux->dst_prog; 4916 4917 if (tgt_prog) 4918 return tgt_prog->aux->btf; 4919 else 4920 return prog->aux->attach_btf; 4921 } 4922 4923 static bool is_int_ptr(struct btf *btf, const struct btf_type *t) 4924 { 4925 /* t comes in already as a pointer */ 4926 t = btf_type_by_id(btf, t->type); 4927 4928 /* allow const */ 4929 if (BTF_INFO_KIND(t->info) == BTF_KIND_CONST) 4930 t = btf_type_by_id(btf, t->type); 4931 4932 return btf_type_is_int(t); 4933 } 4934 4935 bool btf_ctx_access(int off, int size, enum bpf_access_type type, 4936 const struct bpf_prog *prog, 4937 struct bpf_insn_access_aux *info) 4938 { 4939 const struct btf_type *t = prog->aux->attach_func_proto; 4940 struct bpf_prog *tgt_prog = prog->aux->dst_prog; 4941 struct btf *btf = bpf_prog_get_target_btf(prog); 4942 const char *tname = prog->aux->attach_func_name; 4943 struct bpf_verifier_log *log = info->log; 4944 const struct btf_param *args; 4945 const char *tag_value; 4946 u32 nr_args, arg; 4947 int i, ret; 4948 4949 if (off % 8) { 4950 bpf_log(log, "func '%s' offset %d is not multiple of 8\n", 4951 tname, off); 4952 return false; 4953 } 4954 arg = off / 8; 4955 args = (const struct btf_param *)(t + 1); 4956 /* if (t == NULL) Fall back to default BPF prog with 4957 * MAX_BPF_FUNC_REG_ARGS u64 arguments. 4958 */ 4959 nr_args = t ? btf_type_vlen(t) : MAX_BPF_FUNC_REG_ARGS; 4960 if (prog->aux->attach_btf_trace) { 4961 /* skip first 'void *__data' argument in btf_trace_##name typedef */ 4962 args++; 4963 nr_args--; 4964 } 4965 4966 if (arg > nr_args) { 4967 bpf_log(log, "func '%s' doesn't have %d-th argument\n", 4968 tname, arg + 1); 4969 return false; 4970 } 4971 4972 if (arg == nr_args) { 4973 switch (prog->expected_attach_type) { 4974 case BPF_LSM_MAC: 4975 case BPF_TRACE_FEXIT: 4976 /* When LSM programs are attached to void LSM hooks 4977 * they use FEXIT trampolines and when attached to 4978 * int LSM hooks, they use MODIFY_RETURN trampolines. 4979 * 4980 * While the LSM programs are BPF_MODIFY_RETURN-like 4981 * the check: 4982 * 4983 * if (ret_type != 'int') 4984 * return -EINVAL; 4985 * 4986 * is _not_ done here. This is still safe as LSM hooks 4987 * have only void and int return types. 4988 */ 4989 if (!t) 4990 return true; 4991 t = btf_type_by_id(btf, t->type); 4992 break; 4993 case BPF_MODIFY_RETURN: 4994 /* For now the BPF_MODIFY_RETURN can only be attached to 4995 * functions that return an int. 4996 */ 4997 if (!t) 4998 return false; 4999 5000 t = btf_type_skip_modifiers(btf, t->type, NULL); 5001 if (!btf_type_is_small_int(t)) { 5002 bpf_log(log, 5003 "ret type %s not allowed for fmod_ret\n", 5004 btf_kind_str[BTF_INFO_KIND(t->info)]); 5005 return false; 5006 } 5007 break; 5008 default: 5009 bpf_log(log, "func '%s' doesn't have %d-th argument\n", 5010 tname, arg + 1); 5011 return false; 5012 } 5013 } else { 5014 if (!t) 5015 /* Default prog with MAX_BPF_FUNC_REG_ARGS args */ 5016 return true; 5017 t = btf_type_by_id(btf, args[arg].type); 5018 } 5019 5020 /* skip modifiers */ 5021 while (btf_type_is_modifier(t)) 5022 t = btf_type_by_id(btf, t->type); 5023 if (btf_type_is_small_int(t) || btf_type_is_enum(t)) 5024 /* accessing a scalar */ 5025 return true; 5026 if (!btf_type_is_ptr(t)) { 5027 bpf_log(log, 5028 "func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n", 5029 tname, arg, 5030 __btf_name_by_offset(btf, t->name_off), 5031 btf_kind_str[BTF_INFO_KIND(t->info)]); 5032 return false; 5033 } 5034 5035 /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */ 5036 for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { 5037 const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; 5038 u32 type, flag; 5039 5040 type = base_type(ctx_arg_info->reg_type); 5041 flag = type_flag(ctx_arg_info->reg_type); 5042 if (ctx_arg_info->offset == off && type == PTR_TO_BUF && 5043 (flag & PTR_MAYBE_NULL)) { 5044 info->reg_type = ctx_arg_info->reg_type; 5045 return true; 5046 } 5047 } 5048 5049 if (t->type == 0) 5050 /* This is a pointer to void. 5051 * It is the same as scalar from the verifier safety pov. 5052 * No further pointer walking is allowed. 5053 */ 5054 return true; 5055 5056 if (is_int_ptr(btf, t)) 5057 return true; 5058 5059 /* this is a pointer to another type */ 5060 for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { 5061 const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; 5062 5063 if (ctx_arg_info->offset == off) { 5064 if (!ctx_arg_info->btf_id) { 5065 bpf_log(log,"invalid btf_id for context argument offset %u\n", off); 5066 return false; 5067 } 5068 5069 info->reg_type = ctx_arg_info->reg_type; 5070 info->btf = btf_vmlinux; 5071 info->btf_id = ctx_arg_info->btf_id; 5072 return true; 5073 } 5074 } 5075 5076 info->reg_type = PTR_TO_BTF_ID; 5077 if (tgt_prog) { 5078 enum bpf_prog_type tgt_type; 5079 5080 if (tgt_prog->type == BPF_PROG_TYPE_EXT) 5081 tgt_type = tgt_prog->aux->saved_dst_prog_type; 5082 else 5083 tgt_type = tgt_prog->type; 5084 5085 ret = btf_translate_to_vmlinux(log, btf, t, tgt_type, arg); 5086 if (ret > 0) { 5087 info->btf = btf_vmlinux; 5088 info->btf_id = ret; 5089 return true; 5090 } else { 5091 return false; 5092 } 5093 } 5094 5095 info->btf = btf; 5096 info->btf_id = t->type; 5097 t = btf_type_by_id(btf, t->type); 5098 5099 if (btf_type_is_type_tag(t)) { 5100 tag_value = __btf_name_by_offset(btf, t->name_off); 5101 if (strcmp(tag_value, "user") == 0) 5102 info->reg_type |= MEM_USER; 5103 if (strcmp(tag_value, "percpu") == 0) 5104 info->reg_type |= MEM_PERCPU; 5105 } 5106 5107 /* skip modifiers */ 5108 while (btf_type_is_modifier(t)) { 5109 info->btf_id = t->type; 5110 t = btf_type_by_id(btf, t->type); 5111 } 5112 if (!btf_type_is_struct(t)) { 5113 bpf_log(log, 5114 "func '%s' arg%d type %s is not a struct\n", 5115 tname, arg, btf_kind_str[BTF_INFO_KIND(t->info)]); 5116 return false; 5117 } 5118 bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n", 5119 tname, arg, info->btf_id, btf_kind_str[BTF_INFO_KIND(t->info)], 5120 __btf_name_by_offset(btf, t->name_off)); 5121 return true; 5122 } 5123 5124 enum bpf_struct_walk_result { 5125 /* < 0 error */ 5126 WALK_SCALAR = 0, 5127 WALK_PTR, 5128 WALK_STRUCT, 5129 }; 5130 5131 static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf, 5132 const struct btf_type *t, int off, int size, 5133 u32 *next_btf_id, enum bpf_type_flag *flag) 5134 { 5135 u32 i, moff, mtrue_end, msize = 0, total_nelems = 0; 5136 const struct btf_type *mtype, *elem_type = NULL; 5137 const struct btf_member *member; 5138 const char *tname, *mname, *tag_value; 5139 u32 vlen, elem_id, mid; 5140 5141 again: 5142 tname = __btf_name_by_offset(btf, t->name_off); 5143 if (!btf_type_is_struct(t)) { 5144 bpf_log(log, "Type '%s' is not a struct\n", tname); 5145 return -EINVAL; 5146 } 5147 5148 vlen = btf_type_vlen(t); 5149 if (off + size > t->size) { 5150 /* If the last element is a variable size array, we may 5151 * need to relax the rule. 5152 */ 5153 struct btf_array *array_elem; 5154 5155 if (vlen == 0) 5156 goto error; 5157 5158 member = btf_type_member(t) + vlen - 1; 5159 mtype = btf_type_skip_modifiers(btf, member->type, 5160 NULL); 5161 if (!btf_type_is_array(mtype)) 5162 goto error; 5163 5164 array_elem = (struct btf_array *)(mtype + 1); 5165 if (array_elem->nelems != 0) 5166 goto error; 5167 5168 moff = __btf_member_bit_offset(t, member) / 8; 5169 if (off < moff) 5170 goto error; 5171 5172 /* Only allow structure for now, can be relaxed for 5173 * other types later. 5174 */ 5175 t = btf_type_skip_modifiers(btf, array_elem->type, 5176 NULL); 5177 if (!btf_type_is_struct(t)) 5178 goto error; 5179 5180 off = (off - moff) % t->size; 5181 goto again; 5182 5183 error: 5184 bpf_log(log, "access beyond struct %s at off %u size %u\n", 5185 tname, off, size); 5186 return -EACCES; 5187 } 5188 5189 for_each_member(i, t, member) { 5190 /* offset of the field in bytes */ 5191 moff = __btf_member_bit_offset(t, member) / 8; 5192 if (off + size <= moff) 5193 /* won't find anything, field is already too far */ 5194 break; 5195 5196 if (__btf_member_bitfield_size(t, member)) { 5197 u32 end_bit = __btf_member_bit_offset(t, member) + 5198 __btf_member_bitfield_size(t, member); 5199 5200 /* off <= moff instead of off == moff because clang 5201 * does not generate a BTF member for anonymous 5202 * bitfield like the ":16" here: 5203 * struct { 5204 * int :16; 5205 * int x:8; 5206 * }; 5207 */ 5208 if (off <= moff && 5209 BITS_ROUNDUP_BYTES(end_bit) <= off + size) 5210 return WALK_SCALAR; 5211 5212 /* off may be accessing a following member 5213 * 5214 * or 5215 * 5216 * Doing partial access at either end of this 5217 * bitfield. Continue on this case also to 5218 * treat it as not accessing this bitfield 5219 * and eventually error out as field not 5220 * found to keep it simple. 5221 * It could be relaxed if there was a legit 5222 * partial access case later. 5223 */ 5224 continue; 5225 } 5226 5227 /* In case of "off" is pointing to holes of a struct */ 5228 if (off < moff) 5229 break; 5230 5231 /* type of the field */ 5232 mid = member->type; 5233 mtype = btf_type_by_id(btf, member->type); 5234 mname = __btf_name_by_offset(btf, member->name_off); 5235 5236 mtype = __btf_resolve_size(btf, mtype, &msize, 5237 &elem_type, &elem_id, &total_nelems, 5238 &mid); 5239 if (IS_ERR(mtype)) { 5240 bpf_log(log, "field %s doesn't have size\n", mname); 5241 return -EFAULT; 5242 } 5243 5244 mtrue_end = moff + msize; 5245 if (off >= mtrue_end) 5246 /* no overlap with member, keep iterating */ 5247 continue; 5248 5249 if (btf_type_is_array(mtype)) { 5250 u32 elem_idx; 5251 5252 /* __btf_resolve_size() above helps to 5253 * linearize a multi-dimensional array. 5254 * 5255 * The logic here is treating an array 5256 * in a struct as the following way: 5257 * 5258 * struct outer { 5259 * struct inner array[2][2]; 5260 * }; 5261 * 5262 * looks like: 5263 * 5264 * struct outer { 5265 * struct inner array_elem0; 5266 * struct inner array_elem1; 5267 * struct inner array_elem2; 5268 * struct inner array_elem3; 5269 * }; 5270 * 5271 * When accessing outer->array[1][0], it moves 5272 * moff to "array_elem2", set mtype to 5273 * "struct inner", and msize also becomes 5274 * sizeof(struct inner). Then most of the 5275 * remaining logic will fall through without 5276 * caring the current member is an array or 5277 * not. 5278 * 5279 * Unlike mtype/msize/moff, mtrue_end does not 5280 * change. The naming difference ("_true") tells 5281 * that it is not always corresponding to 5282 * the current mtype/msize/moff. 5283 * It is the true end of the current 5284 * member (i.e. array in this case). That 5285 * will allow an int array to be accessed like 5286 * a scratch space, 5287 * i.e. allow access beyond the size of 5288 * the array's element as long as it is 5289 * within the mtrue_end boundary. 5290 */ 5291 5292 /* skip empty array */ 5293 if (moff == mtrue_end) 5294 continue; 5295 5296 msize /= total_nelems; 5297 elem_idx = (off - moff) / msize; 5298 moff += elem_idx * msize; 5299 mtype = elem_type; 5300 mid = elem_id; 5301 } 5302 5303 /* the 'off' we're looking for is either equal to start 5304 * of this field or inside of this struct 5305 */ 5306 if (btf_type_is_struct(mtype)) { 5307 /* our field must be inside that union or struct */ 5308 t = mtype; 5309 5310 /* return if the offset matches the member offset */ 5311 if (off == moff) { 5312 *next_btf_id = mid; 5313 return WALK_STRUCT; 5314 } 5315 5316 /* adjust offset we're looking for */ 5317 off -= moff; 5318 goto again; 5319 } 5320 5321 if (btf_type_is_ptr(mtype)) { 5322 const struct btf_type *stype, *t; 5323 enum bpf_type_flag tmp_flag = 0; 5324 u32 id; 5325 5326 if (msize != size || off != moff) { 5327 bpf_log(log, 5328 "cannot access ptr member %s with moff %u in struct %s with off %u size %u\n", 5329 mname, moff, tname, off, size); 5330 return -EACCES; 5331 } 5332 5333 /* check type tag */ 5334 t = btf_type_by_id(btf, mtype->type); 5335 if (btf_type_is_type_tag(t)) { 5336 tag_value = __btf_name_by_offset(btf, t->name_off); 5337 /* check __user tag */ 5338 if (strcmp(tag_value, "user") == 0) 5339 tmp_flag = MEM_USER; 5340 /* check __percpu tag */ 5341 if (strcmp(tag_value, "percpu") == 0) 5342 tmp_flag = MEM_PERCPU; 5343 } 5344 5345 stype = btf_type_skip_modifiers(btf, mtype->type, &id); 5346 if (btf_type_is_struct(stype)) { 5347 *next_btf_id = id; 5348 *flag = tmp_flag; 5349 return WALK_PTR; 5350 } 5351 } 5352 5353 /* Allow more flexible access within an int as long as 5354 * it is within mtrue_end. 5355 * Since mtrue_end could be the end of an array, 5356 * that also allows using an array of int as a scratch 5357 * space. e.g. skb->cb[]. 5358 */ 5359 if (off + size > mtrue_end) { 5360 bpf_log(log, 5361 "access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n", 5362 mname, mtrue_end, tname, off, size); 5363 return -EACCES; 5364 } 5365 5366 return WALK_SCALAR; 5367 } 5368 bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off); 5369 return -EINVAL; 5370 } 5371 5372 int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, 5373 const struct btf_type *t, int off, int size, 5374 enum bpf_access_type atype __maybe_unused, 5375 u32 *next_btf_id, enum bpf_type_flag *flag) 5376 { 5377 enum bpf_type_flag tmp_flag = 0; 5378 int err; 5379 u32 id; 5380 5381 do { 5382 err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag); 5383 5384 switch (err) { 5385 case WALK_PTR: 5386 /* If we found the pointer or scalar on t+off, 5387 * we're done. 5388 */ 5389 *next_btf_id = id; 5390 *flag = tmp_flag; 5391 return PTR_TO_BTF_ID; 5392 case WALK_SCALAR: 5393 return SCALAR_VALUE; 5394 case WALK_STRUCT: 5395 /* We found nested struct, so continue the search 5396 * by diving in it. At this point the offset is 5397 * aligned with the new type, so set it to 0. 5398 */ 5399 t = btf_type_by_id(btf, id); 5400 off = 0; 5401 break; 5402 default: 5403 /* It's either error or unknown return value.. 5404 * scream and leave. 5405 */ 5406 if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value")) 5407 return -EINVAL; 5408 return err; 5409 } 5410 } while (t); 5411 5412 return -EINVAL; 5413 } 5414 5415 /* Check that two BTF types, each specified as an BTF object + id, are exactly 5416 * the same. Trivial ID check is not enough due to module BTFs, because we can 5417 * end up with two different module BTFs, but IDs point to the common type in 5418 * vmlinux BTF. 5419 */ 5420 static bool btf_types_are_same(const struct btf *btf1, u32 id1, 5421 const struct btf *btf2, u32 id2) 5422 { 5423 if (id1 != id2) 5424 return false; 5425 if (btf1 == btf2) 5426 return true; 5427 return btf_type_by_id(btf1, id1) == btf_type_by_id(btf2, id2); 5428 } 5429 5430 bool btf_struct_ids_match(struct bpf_verifier_log *log, 5431 const struct btf *btf, u32 id, int off, 5432 const struct btf *need_btf, u32 need_type_id) 5433 { 5434 const struct btf_type *type; 5435 enum bpf_type_flag flag; 5436 int err; 5437 5438 /* Are we already done? */ 5439 if (off == 0 && btf_types_are_same(btf, id, need_btf, need_type_id)) 5440 return true; 5441 5442 again: 5443 type = btf_type_by_id(btf, id); 5444 if (!type) 5445 return false; 5446 err = btf_struct_walk(log, btf, type, off, 1, &id, &flag); 5447 if (err != WALK_STRUCT) 5448 return false; 5449 5450 /* We found nested struct object. If it matches 5451 * the requested ID, we're done. Otherwise let's 5452 * continue the search with offset 0 in the new 5453 * type. 5454 */ 5455 if (!btf_types_are_same(btf, id, need_btf, need_type_id)) { 5456 off = 0; 5457 goto again; 5458 } 5459 5460 return true; 5461 } 5462 5463 static int __get_type_size(struct btf *btf, u32 btf_id, 5464 const struct btf_type **bad_type) 5465 { 5466 const struct btf_type *t; 5467 5468 if (!btf_id) 5469 /* void */ 5470 return 0; 5471 t = btf_type_by_id(btf, btf_id); 5472 while (t && btf_type_is_modifier(t)) 5473 t = btf_type_by_id(btf, t->type); 5474 if (!t) { 5475 *bad_type = btf_type_by_id(btf, 0); 5476 return -EINVAL; 5477 } 5478 if (btf_type_is_ptr(t)) 5479 /* kernel size of pointer. Not BPF's size of pointer*/ 5480 return sizeof(void *); 5481 if (btf_type_is_int(t) || btf_type_is_enum(t)) 5482 return t->size; 5483 *bad_type = t; 5484 return -EINVAL; 5485 } 5486 5487 int btf_distill_func_proto(struct bpf_verifier_log *log, 5488 struct btf *btf, 5489 const struct btf_type *func, 5490 const char *tname, 5491 struct btf_func_model *m) 5492 { 5493 const struct btf_param *args; 5494 const struct btf_type *t; 5495 u32 i, nargs; 5496 int ret; 5497 5498 if (!func) { 5499 /* BTF function prototype doesn't match the verifier types. 5500 * Fall back to MAX_BPF_FUNC_REG_ARGS u64 args. 5501 */ 5502 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) 5503 m->arg_size[i] = 8; 5504 m->ret_size = 8; 5505 m->nr_args = MAX_BPF_FUNC_REG_ARGS; 5506 return 0; 5507 } 5508 args = (const struct btf_param *)(func + 1); 5509 nargs = btf_type_vlen(func); 5510 if (nargs > MAX_BPF_FUNC_ARGS) { 5511 bpf_log(log, 5512 "The function %s has %d arguments. Too many.\n", 5513 tname, nargs); 5514 return -EINVAL; 5515 } 5516 ret = __get_type_size(btf, func->type, &t); 5517 if (ret < 0) { 5518 bpf_log(log, 5519 "The function %s return type %s is unsupported.\n", 5520 tname, btf_kind_str[BTF_INFO_KIND(t->info)]); 5521 return -EINVAL; 5522 } 5523 m->ret_size = ret; 5524 5525 for (i = 0; i < nargs; i++) { 5526 if (i == nargs - 1 && args[i].type == 0) { 5527 bpf_log(log, 5528 "The function %s with variable args is unsupported.\n", 5529 tname); 5530 return -EINVAL; 5531 } 5532 ret = __get_type_size(btf, args[i].type, &t); 5533 if (ret < 0) { 5534 bpf_log(log, 5535 "The function %s arg%d type %s is unsupported.\n", 5536 tname, i, btf_kind_str[BTF_INFO_KIND(t->info)]); 5537 return -EINVAL; 5538 } 5539 if (ret == 0) { 5540 bpf_log(log, 5541 "The function %s has malformed void argument.\n", 5542 tname); 5543 return -EINVAL; 5544 } 5545 m->arg_size[i] = ret; 5546 } 5547 m->nr_args = nargs; 5548 return 0; 5549 } 5550 5551 /* Compare BTFs of two functions assuming only scalars and pointers to context. 5552 * t1 points to BTF_KIND_FUNC in btf1 5553 * t2 points to BTF_KIND_FUNC in btf2 5554 * Returns: 5555 * EINVAL - function prototype mismatch 5556 * EFAULT - verifier bug 5557 * 0 - 99% match. The last 1% is validated by the verifier. 5558 */ 5559 static int btf_check_func_type_match(struct bpf_verifier_log *log, 5560 struct btf *btf1, const struct btf_type *t1, 5561 struct btf *btf2, const struct btf_type *t2) 5562 { 5563 const struct btf_param *args1, *args2; 5564 const char *fn1, *fn2, *s1, *s2; 5565 u32 nargs1, nargs2, i; 5566 5567 fn1 = btf_name_by_offset(btf1, t1->name_off); 5568 fn2 = btf_name_by_offset(btf2, t2->name_off); 5569 5570 if (btf_func_linkage(t1) != BTF_FUNC_GLOBAL) { 5571 bpf_log(log, "%s() is not a global function\n", fn1); 5572 return -EINVAL; 5573 } 5574 if (btf_func_linkage(t2) != BTF_FUNC_GLOBAL) { 5575 bpf_log(log, "%s() is not a global function\n", fn2); 5576 return -EINVAL; 5577 } 5578 5579 t1 = btf_type_by_id(btf1, t1->type); 5580 if (!t1 || !btf_type_is_func_proto(t1)) 5581 return -EFAULT; 5582 t2 = btf_type_by_id(btf2, t2->type); 5583 if (!t2 || !btf_type_is_func_proto(t2)) 5584 return -EFAULT; 5585 5586 args1 = (const struct btf_param *)(t1 + 1); 5587 nargs1 = btf_type_vlen(t1); 5588 args2 = (const struct btf_param *)(t2 + 1); 5589 nargs2 = btf_type_vlen(t2); 5590 5591 if (nargs1 != nargs2) { 5592 bpf_log(log, "%s() has %d args while %s() has %d args\n", 5593 fn1, nargs1, fn2, nargs2); 5594 return -EINVAL; 5595 } 5596 5597 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL); 5598 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL); 5599 if (t1->info != t2->info) { 5600 bpf_log(log, 5601 "Return type %s of %s() doesn't match type %s of %s()\n", 5602 btf_type_str(t1), fn1, 5603 btf_type_str(t2), fn2); 5604 return -EINVAL; 5605 } 5606 5607 for (i = 0; i < nargs1; i++) { 5608 t1 = btf_type_skip_modifiers(btf1, args1[i].type, NULL); 5609 t2 = btf_type_skip_modifiers(btf2, args2[i].type, NULL); 5610 5611 if (t1->info != t2->info) { 5612 bpf_log(log, "arg%d in %s() is %s while %s() has %s\n", 5613 i, fn1, btf_type_str(t1), 5614 fn2, btf_type_str(t2)); 5615 return -EINVAL; 5616 } 5617 if (btf_type_has_size(t1) && t1->size != t2->size) { 5618 bpf_log(log, 5619 "arg%d in %s() has size %d while %s() has %d\n", 5620 i, fn1, t1->size, 5621 fn2, t2->size); 5622 return -EINVAL; 5623 } 5624 5625 /* global functions are validated with scalars and pointers 5626 * to context only. And only global functions can be replaced. 5627 * Hence type check only those types. 5628 */ 5629 if (btf_type_is_int(t1) || btf_type_is_enum(t1)) 5630 continue; 5631 if (!btf_type_is_ptr(t1)) { 5632 bpf_log(log, 5633 "arg%d in %s() has unrecognized type\n", 5634 i, fn1); 5635 return -EINVAL; 5636 } 5637 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL); 5638 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL); 5639 if (!btf_type_is_struct(t1)) { 5640 bpf_log(log, 5641 "arg%d in %s() is not a pointer to context\n", 5642 i, fn1); 5643 return -EINVAL; 5644 } 5645 if (!btf_type_is_struct(t2)) { 5646 bpf_log(log, 5647 "arg%d in %s() is not a pointer to context\n", 5648 i, fn2); 5649 return -EINVAL; 5650 } 5651 /* This is an optional check to make program writing easier. 5652 * Compare names of structs and report an error to the user. 5653 * btf_prepare_func_args() already checked that t2 struct 5654 * is a context type. btf_prepare_func_args() will check 5655 * later that t1 struct is a context type as well. 5656 */ 5657 s1 = btf_name_by_offset(btf1, t1->name_off); 5658 s2 = btf_name_by_offset(btf2, t2->name_off); 5659 if (strcmp(s1, s2)) { 5660 bpf_log(log, 5661 "arg%d %s(struct %s *) doesn't match %s(struct %s *)\n", 5662 i, fn1, s1, fn2, s2); 5663 return -EINVAL; 5664 } 5665 } 5666 return 0; 5667 } 5668 5669 /* Compare BTFs of given program with BTF of target program */ 5670 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, 5671 struct btf *btf2, const struct btf_type *t2) 5672 { 5673 struct btf *btf1 = prog->aux->btf; 5674 const struct btf_type *t1; 5675 u32 btf_id = 0; 5676 5677 if (!prog->aux->func_info) { 5678 bpf_log(log, "Program extension requires BTF\n"); 5679 return -EINVAL; 5680 } 5681 5682 btf_id = prog->aux->func_info[0].type_id; 5683 if (!btf_id) 5684 return -EFAULT; 5685 5686 t1 = btf_type_by_id(btf1, btf_id); 5687 if (!t1 || !btf_type_is_func(t1)) 5688 return -EFAULT; 5689 5690 return btf_check_func_type_match(log, btf1, t1, btf2, t2); 5691 } 5692 5693 static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = { 5694 #ifdef CONFIG_NET 5695 [PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK], 5696 [PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], 5697 [PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP], 5698 #endif 5699 }; 5700 5701 /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */ 5702 static bool __btf_type_is_scalar_struct(struct bpf_verifier_log *log, 5703 const struct btf *btf, 5704 const struct btf_type *t, int rec) 5705 { 5706 const struct btf_type *member_type; 5707 const struct btf_member *member; 5708 u32 i; 5709 5710 if (!btf_type_is_struct(t)) 5711 return false; 5712 5713 for_each_member(i, t, member) { 5714 const struct btf_array *array; 5715 5716 member_type = btf_type_skip_modifiers(btf, member->type, NULL); 5717 if (btf_type_is_struct(member_type)) { 5718 if (rec >= 3) { 5719 bpf_log(log, "max struct nesting depth exceeded\n"); 5720 return false; 5721 } 5722 if (!__btf_type_is_scalar_struct(log, btf, member_type, rec + 1)) 5723 return false; 5724 continue; 5725 } 5726 if (btf_type_is_array(member_type)) { 5727 array = btf_type_array(member_type); 5728 if (!array->nelems) 5729 return false; 5730 member_type = btf_type_skip_modifiers(btf, array->type, NULL); 5731 if (!btf_type_is_scalar(member_type)) 5732 return false; 5733 continue; 5734 } 5735 if (!btf_type_is_scalar(member_type)) 5736 return false; 5737 } 5738 return true; 5739 } 5740 5741 static bool is_kfunc_arg_mem_size(const struct btf *btf, 5742 const struct btf_param *arg, 5743 const struct bpf_reg_state *reg) 5744 { 5745 int len, sfx_len = sizeof("__sz") - 1; 5746 const struct btf_type *t; 5747 const char *param_name; 5748 5749 t = btf_type_skip_modifiers(btf, arg->type, NULL); 5750 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) 5751 return false; 5752 5753 /* In the future, this can be ported to use BTF tagging */ 5754 param_name = btf_name_by_offset(btf, arg->name_off); 5755 if (str_is_empty(param_name)) 5756 return false; 5757 len = strlen(param_name); 5758 if (len < sfx_len) 5759 return false; 5760 param_name += len - sfx_len; 5761 if (strncmp(param_name, "__sz", sfx_len)) 5762 return false; 5763 5764 return true; 5765 } 5766 5767 static int btf_check_func_arg_match(struct bpf_verifier_env *env, 5768 const struct btf *btf, u32 func_id, 5769 struct bpf_reg_state *regs, 5770 bool ptr_to_mem_ok) 5771 { 5772 struct bpf_verifier_log *log = &env->log; 5773 u32 i, nargs, ref_id, ref_obj_id = 0; 5774 bool is_kfunc = btf_is_kernel(btf); 5775 const char *func_name, *ref_tname; 5776 const struct btf_type *t, *ref_t; 5777 const struct btf_param *args; 5778 int ref_regno = 0, ret; 5779 bool rel = false; 5780 5781 t = btf_type_by_id(btf, func_id); 5782 if (!t || !btf_type_is_func(t)) { 5783 /* These checks were already done by the verifier while loading 5784 * struct bpf_func_info or in add_kfunc_call(). 5785 */ 5786 bpf_log(log, "BTF of func_id %u doesn't point to KIND_FUNC\n", 5787 func_id); 5788 return -EFAULT; 5789 } 5790 func_name = btf_name_by_offset(btf, t->name_off); 5791 5792 t = btf_type_by_id(btf, t->type); 5793 if (!t || !btf_type_is_func_proto(t)) { 5794 bpf_log(log, "Invalid BTF of func %s\n", func_name); 5795 return -EFAULT; 5796 } 5797 args = (const struct btf_param *)(t + 1); 5798 nargs = btf_type_vlen(t); 5799 if (nargs > MAX_BPF_FUNC_REG_ARGS) { 5800 bpf_log(log, "Function %s has %d > %d args\n", func_name, nargs, 5801 MAX_BPF_FUNC_REG_ARGS); 5802 return -EINVAL; 5803 } 5804 5805 /* Only kfunc can be release func */ 5806 if (is_kfunc) 5807 rel = btf_kfunc_id_set_contains(btf, resolve_prog_type(env->prog), 5808 BTF_KFUNC_TYPE_RELEASE, func_id); 5809 /* check that BTF function arguments match actual types that the 5810 * verifier sees. 5811 */ 5812 for (i = 0; i < nargs; i++) { 5813 u32 regno = i + 1; 5814 struct bpf_reg_state *reg = ®s[regno]; 5815 5816 t = btf_type_skip_modifiers(btf, args[i].type, NULL); 5817 if (btf_type_is_scalar(t)) { 5818 if (reg->type == SCALAR_VALUE) 5819 continue; 5820 bpf_log(log, "R%d is not a scalar\n", regno); 5821 return -EINVAL; 5822 } 5823 5824 if (!btf_type_is_ptr(t)) { 5825 bpf_log(log, "Unrecognized arg#%d type %s\n", 5826 i, btf_type_str(t)); 5827 return -EINVAL; 5828 } 5829 5830 ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id); 5831 ref_tname = btf_name_by_offset(btf, ref_t->name_off); 5832 5833 ret = check_func_arg_reg_off(env, reg, regno, ARG_DONTCARE, rel); 5834 if (ret < 0) 5835 return ret; 5836 5837 if (btf_get_prog_ctx_type(log, btf, t, 5838 env->prog->type, i)) { 5839 /* If function expects ctx type in BTF check that caller 5840 * is passing PTR_TO_CTX. 5841 */ 5842 if (reg->type != PTR_TO_CTX) { 5843 bpf_log(log, 5844 "arg#%d expected pointer to ctx, but got %s\n", 5845 i, btf_type_str(t)); 5846 return -EINVAL; 5847 } 5848 } else if (is_kfunc && (reg->type == PTR_TO_BTF_ID || 5849 (reg2btf_ids[base_type(reg->type)] && !type_flag(reg->type)))) { 5850 const struct btf_type *reg_ref_t; 5851 const struct btf *reg_btf; 5852 const char *reg_ref_tname; 5853 u32 reg_ref_id; 5854 5855 if (!btf_type_is_struct(ref_t)) { 5856 bpf_log(log, "kernel function %s args#%d pointer type %s %s is not supported\n", 5857 func_name, i, btf_type_str(ref_t), 5858 ref_tname); 5859 return -EINVAL; 5860 } 5861 5862 if (reg->type == PTR_TO_BTF_ID) { 5863 reg_btf = reg->btf; 5864 reg_ref_id = reg->btf_id; 5865 /* Ensure only one argument is referenced 5866 * PTR_TO_BTF_ID, check_func_arg_reg_off relies 5867 * on only one referenced register being allowed 5868 * for kfuncs. 5869 */ 5870 if (reg->ref_obj_id) { 5871 if (ref_obj_id) { 5872 bpf_log(log, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 5873 regno, reg->ref_obj_id, ref_obj_id); 5874 return -EFAULT; 5875 } 5876 ref_regno = regno; 5877 ref_obj_id = reg->ref_obj_id; 5878 } 5879 } else { 5880 reg_btf = btf_vmlinux; 5881 reg_ref_id = *reg2btf_ids[base_type(reg->type)]; 5882 } 5883 5884 reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, 5885 ®_ref_id); 5886 reg_ref_tname = btf_name_by_offset(reg_btf, 5887 reg_ref_t->name_off); 5888 if (!btf_struct_ids_match(log, reg_btf, reg_ref_id, 5889 reg->off, btf, ref_id)) { 5890 bpf_log(log, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n", 5891 func_name, i, 5892 btf_type_str(ref_t), ref_tname, 5893 regno, btf_type_str(reg_ref_t), 5894 reg_ref_tname); 5895 return -EINVAL; 5896 } 5897 } else if (ptr_to_mem_ok) { 5898 const struct btf_type *resolve_ret; 5899 u32 type_size; 5900 5901 if (is_kfunc) { 5902 bool arg_mem_size = i + 1 < nargs && is_kfunc_arg_mem_size(btf, &args[i + 1], ®s[regno + 1]); 5903 5904 /* Permit pointer to mem, but only when argument 5905 * type is pointer to scalar, or struct composed 5906 * (recursively) of scalars. 5907 * When arg_mem_size is true, the pointer can be 5908 * void *. 5909 */ 5910 if (!btf_type_is_scalar(ref_t) && 5911 !__btf_type_is_scalar_struct(log, btf, ref_t, 0) && 5912 (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) { 5913 bpf_log(log, 5914 "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n", 5915 i, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : ""); 5916 return -EINVAL; 5917 } 5918 5919 /* Check for mem, len pair */ 5920 if (arg_mem_size) { 5921 if (check_kfunc_mem_size_reg(env, ®s[regno + 1], regno + 1)) { 5922 bpf_log(log, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", 5923 i, i + 1); 5924 return -EINVAL; 5925 } 5926 i++; 5927 continue; 5928 } 5929 } 5930 5931 resolve_ret = btf_resolve_size(btf, ref_t, &type_size); 5932 if (IS_ERR(resolve_ret)) { 5933 bpf_log(log, 5934 "arg#%d reference type('%s %s') size cannot be determined: %ld\n", 5935 i, btf_type_str(ref_t), ref_tname, 5936 PTR_ERR(resolve_ret)); 5937 return -EINVAL; 5938 } 5939 5940 if (check_mem_reg(env, reg, regno, type_size)) 5941 return -EINVAL; 5942 } else { 5943 bpf_log(log, "reg type unsupported for arg#%d %sfunction %s#%d\n", i, 5944 is_kfunc ? "kernel " : "", func_name, func_id); 5945 return -EINVAL; 5946 } 5947 } 5948 5949 /* Either both are set, or neither */ 5950 WARN_ON_ONCE((ref_obj_id && !ref_regno) || (!ref_obj_id && ref_regno)); 5951 /* We already made sure ref_obj_id is set only for one argument. We do 5952 * allow (!rel && ref_obj_id), so that passing such referenced 5953 * PTR_TO_BTF_ID to other kfuncs works. Note that rel is only true when 5954 * is_kfunc is true. 5955 */ 5956 if (rel && !ref_obj_id) { 5957 bpf_log(log, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n", 5958 func_name); 5959 return -EINVAL; 5960 } 5961 /* returns argument register number > 0 in case of reference release kfunc */ 5962 return rel ? ref_regno : 0; 5963 } 5964 5965 /* Compare BTF of a function with given bpf_reg_state. 5966 * Returns: 5967 * EFAULT - there is a verifier bug. Abort verification. 5968 * EINVAL - there is a type mismatch or BTF is not available. 5969 * 0 - BTF matches with what bpf_reg_state expects. 5970 * Only PTR_TO_CTX and SCALAR_VALUE states are recognized. 5971 */ 5972 int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, 5973 struct bpf_reg_state *regs) 5974 { 5975 struct bpf_prog *prog = env->prog; 5976 struct btf *btf = prog->aux->btf; 5977 bool is_global; 5978 u32 btf_id; 5979 int err; 5980 5981 if (!prog->aux->func_info) 5982 return -EINVAL; 5983 5984 btf_id = prog->aux->func_info[subprog].type_id; 5985 if (!btf_id) 5986 return -EFAULT; 5987 5988 if (prog->aux->func_info_aux[subprog].unreliable) 5989 return -EINVAL; 5990 5991 is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; 5992 err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global); 5993 5994 /* Compiler optimizations can remove arguments from static functions 5995 * or mismatched type can be passed into a global function. 5996 * In such cases mark the function as unreliable from BTF point of view. 5997 */ 5998 if (err) 5999 prog->aux->func_info_aux[subprog].unreliable = true; 6000 return err; 6001 } 6002 6003 int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, 6004 const struct btf *btf, u32 func_id, 6005 struct bpf_reg_state *regs) 6006 { 6007 return btf_check_func_arg_match(env, btf, func_id, regs, true); 6008 } 6009 6010 /* Convert BTF of a function into bpf_reg_state if possible 6011 * Returns: 6012 * EFAULT - there is a verifier bug. Abort verification. 6013 * EINVAL - cannot convert BTF. 6014 * 0 - Successfully converted BTF into bpf_reg_state 6015 * (either PTR_TO_CTX or SCALAR_VALUE). 6016 */ 6017 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, 6018 struct bpf_reg_state *regs) 6019 { 6020 struct bpf_verifier_log *log = &env->log; 6021 struct bpf_prog *prog = env->prog; 6022 enum bpf_prog_type prog_type = prog->type; 6023 struct btf *btf = prog->aux->btf; 6024 const struct btf_param *args; 6025 const struct btf_type *t, *ref_t; 6026 u32 i, nargs, btf_id; 6027 const char *tname; 6028 6029 if (!prog->aux->func_info || 6030 prog->aux->func_info_aux[subprog].linkage != BTF_FUNC_GLOBAL) { 6031 bpf_log(log, "Verifier bug\n"); 6032 return -EFAULT; 6033 } 6034 6035 btf_id = prog->aux->func_info[subprog].type_id; 6036 if (!btf_id) { 6037 bpf_log(log, "Global functions need valid BTF\n"); 6038 return -EFAULT; 6039 } 6040 6041 t = btf_type_by_id(btf, btf_id); 6042 if (!t || !btf_type_is_func(t)) { 6043 /* These checks were already done by the verifier while loading 6044 * struct bpf_func_info 6045 */ 6046 bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n", 6047 subprog); 6048 return -EFAULT; 6049 } 6050 tname = btf_name_by_offset(btf, t->name_off); 6051 6052 if (log->level & BPF_LOG_LEVEL) 6053 bpf_log(log, "Validating %s() func#%d...\n", 6054 tname, subprog); 6055 6056 if (prog->aux->func_info_aux[subprog].unreliable) { 6057 bpf_log(log, "Verifier bug in function %s()\n", tname); 6058 return -EFAULT; 6059 } 6060 if (prog_type == BPF_PROG_TYPE_EXT) 6061 prog_type = prog->aux->dst_prog->type; 6062 6063 t = btf_type_by_id(btf, t->type); 6064 if (!t || !btf_type_is_func_proto(t)) { 6065 bpf_log(log, "Invalid type of function %s()\n", tname); 6066 return -EFAULT; 6067 } 6068 args = (const struct btf_param *)(t + 1); 6069 nargs = btf_type_vlen(t); 6070 if (nargs > MAX_BPF_FUNC_REG_ARGS) { 6071 bpf_log(log, "Global function %s() with %d > %d args. Buggy compiler.\n", 6072 tname, nargs, MAX_BPF_FUNC_REG_ARGS); 6073 return -EINVAL; 6074 } 6075 /* check that function returns int */ 6076 t = btf_type_by_id(btf, t->type); 6077 while (btf_type_is_modifier(t)) 6078 t = btf_type_by_id(btf, t->type); 6079 if (!btf_type_is_int(t) && !btf_type_is_enum(t)) { 6080 bpf_log(log, 6081 "Global function %s() doesn't return scalar. Only those are supported.\n", 6082 tname); 6083 return -EINVAL; 6084 } 6085 /* Convert BTF function arguments into verifier types. 6086 * Only PTR_TO_CTX and SCALAR are supported atm. 6087 */ 6088 for (i = 0; i < nargs; i++) { 6089 struct bpf_reg_state *reg = ®s[i + 1]; 6090 6091 t = btf_type_by_id(btf, args[i].type); 6092 while (btf_type_is_modifier(t)) 6093 t = btf_type_by_id(btf, t->type); 6094 if (btf_type_is_int(t) || btf_type_is_enum(t)) { 6095 reg->type = SCALAR_VALUE; 6096 continue; 6097 } 6098 if (btf_type_is_ptr(t)) { 6099 if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) { 6100 reg->type = PTR_TO_CTX; 6101 continue; 6102 } 6103 6104 t = btf_type_skip_modifiers(btf, t->type, NULL); 6105 6106 ref_t = btf_resolve_size(btf, t, ®->mem_size); 6107 if (IS_ERR(ref_t)) { 6108 bpf_log(log, 6109 "arg#%d reference type('%s %s') size cannot be determined: %ld\n", 6110 i, btf_type_str(t), btf_name_by_offset(btf, t->name_off), 6111 PTR_ERR(ref_t)); 6112 return -EINVAL; 6113 } 6114 6115 reg->type = PTR_TO_MEM | PTR_MAYBE_NULL; 6116 reg->id = ++env->id_gen; 6117 6118 continue; 6119 } 6120 bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n", 6121 i, btf_kind_str[BTF_INFO_KIND(t->info)], tname); 6122 return -EINVAL; 6123 } 6124 return 0; 6125 } 6126 6127 static void btf_type_show(const struct btf *btf, u32 type_id, void *obj, 6128 struct btf_show *show) 6129 { 6130 const struct btf_type *t = btf_type_by_id(btf, type_id); 6131 6132 show->btf = btf; 6133 memset(&show->state, 0, sizeof(show->state)); 6134 memset(&show->obj, 0, sizeof(show->obj)); 6135 6136 btf_type_ops(t)->show(btf, t, type_id, obj, 0, show); 6137 } 6138 6139 static void btf_seq_show(struct btf_show *show, const char *fmt, 6140 va_list args) 6141 { 6142 seq_vprintf((struct seq_file *)show->target, fmt, args); 6143 } 6144 6145 int btf_type_seq_show_flags(const struct btf *btf, u32 type_id, 6146 void *obj, struct seq_file *m, u64 flags) 6147 { 6148 struct btf_show sseq; 6149 6150 sseq.target = m; 6151 sseq.showfn = btf_seq_show; 6152 sseq.flags = flags; 6153 6154 btf_type_show(btf, type_id, obj, &sseq); 6155 6156 return sseq.state.status; 6157 } 6158 6159 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, 6160 struct seq_file *m) 6161 { 6162 (void) btf_type_seq_show_flags(btf, type_id, obj, m, 6163 BTF_SHOW_NONAME | BTF_SHOW_COMPACT | 6164 BTF_SHOW_ZERO | BTF_SHOW_UNSAFE); 6165 } 6166 6167 struct btf_show_snprintf { 6168 struct btf_show show; 6169 int len_left; /* space left in string */ 6170 int len; /* length we would have written */ 6171 }; 6172 6173 static void btf_snprintf_show(struct btf_show *show, const char *fmt, 6174 va_list args) 6175 { 6176 struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show; 6177 int len; 6178 6179 len = vsnprintf(show->target, ssnprintf->len_left, fmt, args); 6180 6181 if (len < 0) { 6182 ssnprintf->len_left = 0; 6183 ssnprintf->len = len; 6184 } else if (len > ssnprintf->len_left) { 6185 /* no space, drive on to get length we would have written */ 6186 ssnprintf->len_left = 0; 6187 ssnprintf->len += len; 6188 } else { 6189 ssnprintf->len_left -= len; 6190 ssnprintf->len += len; 6191 show->target += len; 6192 } 6193 } 6194 6195 int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj, 6196 char *buf, int len, u64 flags) 6197 { 6198 struct btf_show_snprintf ssnprintf; 6199 6200 ssnprintf.show.target = buf; 6201 ssnprintf.show.flags = flags; 6202 ssnprintf.show.showfn = btf_snprintf_show; 6203 ssnprintf.len_left = len; 6204 ssnprintf.len = 0; 6205 6206 btf_type_show(btf, type_id, obj, (struct btf_show *)&ssnprintf); 6207 6208 /* If we encountered an error, return it. */ 6209 if (ssnprintf.show.state.status) 6210 return ssnprintf.show.state.status; 6211 6212 /* Otherwise return length we would have written */ 6213 return ssnprintf.len; 6214 } 6215 6216 #ifdef CONFIG_PROC_FS 6217 static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp) 6218 { 6219 const struct btf *btf = filp->private_data; 6220 6221 seq_printf(m, "btf_id:\t%u\n", btf->id); 6222 } 6223 #endif 6224 6225 static int btf_release(struct inode *inode, struct file *filp) 6226 { 6227 btf_put(filp->private_data); 6228 return 0; 6229 } 6230 6231 const struct file_operations btf_fops = { 6232 #ifdef CONFIG_PROC_FS 6233 .show_fdinfo = bpf_btf_show_fdinfo, 6234 #endif 6235 .release = btf_release, 6236 }; 6237 6238 static int __btf_new_fd(struct btf *btf) 6239 { 6240 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC); 6241 } 6242 6243 int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr) 6244 { 6245 struct btf *btf; 6246 int ret; 6247 6248 btf = btf_parse(make_bpfptr(attr->btf, uattr.is_kernel), 6249 attr->btf_size, attr->btf_log_level, 6250 u64_to_user_ptr(attr->btf_log_buf), 6251 attr->btf_log_size); 6252 if (IS_ERR(btf)) 6253 return PTR_ERR(btf); 6254 6255 ret = btf_alloc_id(btf); 6256 if (ret) { 6257 btf_free(btf); 6258 return ret; 6259 } 6260 6261 /* 6262 * The BTF ID is published to the userspace. 6263 * All BTF free must go through call_rcu() from 6264 * now on (i.e. free by calling btf_put()). 6265 */ 6266 6267 ret = __btf_new_fd(btf); 6268 if (ret < 0) 6269 btf_put(btf); 6270 6271 return ret; 6272 } 6273 6274 struct btf *btf_get_by_fd(int fd) 6275 { 6276 struct btf *btf; 6277 struct fd f; 6278 6279 f = fdget(fd); 6280 6281 if (!f.file) 6282 return ERR_PTR(-EBADF); 6283 6284 if (f.file->f_op != &btf_fops) { 6285 fdput(f); 6286 return ERR_PTR(-EINVAL); 6287 } 6288 6289 btf = f.file->private_data; 6290 refcount_inc(&btf->refcnt); 6291 fdput(f); 6292 6293 return btf; 6294 } 6295 6296 int btf_get_info_by_fd(const struct btf *btf, 6297 const union bpf_attr *attr, 6298 union bpf_attr __user *uattr) 6299 { 6300 struct bpf_btf_info __user *uinfo; 6301 struct bpf_btf_info info; 6302 u32 info_copy, btf_copy; 6303 void __user *ubtf; 6304 char __user *uname; 6305 u32 uinfo_len, uname_len, name_len; 6306 int ret = 0; 6307 6308 uinfo = u64_to_user_ptr(attr->info.info); 6309 uinfo_len = attr->info.info_len; 6310 6311 info_copy = min_t(u32, uinfo_len, sizeof(info)); 6312 memset(&info, 0, sizeof(info)); 6313 if (copy_from_user(&info, uinfo, info_copy)) 6314 return -EFAULT; 6315 6316 info.id = btf->id; 6317 ubtf = u64_to_user_ptr(info.btf); 6318 btf_copy = min_t(u32, btf->data_size, info.btf_size); 6319 if (copy_to_user(ubtf, btf->data, btf_copy)) 6320 return -EFAULT; 6321 info.btf_size = btf->data_size; 6322 6323 info.kernel_btf = btf->kernel_btf; 6324 6325 uname = u64_to_user_ptr(info.name); 6326 uname_len = info.name_len; 6327 if (!uname ^ !uname_len) 6328 return -EINVAL; 6329 6330 name_len = strlen(btf->name); 6331 info.name_len = name_len; 6332 6333 if (uname) { 6334 if (uname_len >= name_len + 1) { 6335 if (copy_to_user(uname, btf->name, name_len + 1)) 6336 return -EFAULT; 6337 } else { 6338 char zero = '\0'; 6339 6340 if (copy_to_user(uname, btf->name, uname_len - 1)) 6341 return -EFAULT; 6342 if (put_user(zero, uname + uname_len - 1)) 6343 return -EFAULT; 6344 /* let user-space know about too short buffer */ 6345 ret = -ENOSPC; 6346 } 6347 } 6348 6349 if (copy_to_user(uinfo, &info, info_copy) || 6350 put_user(info_copy, &uattr->info.info_len)) 6351 return -EFAULT; 6352 6353 return ret; 6354 } 6355 6356 int btf_get_fd_by_id(u32 id) 6357 { 6358 struct btf *btf; 6359 int fd; 6360 6361 rcu_read_lock(); 6362 btf = idr_find(&btf_idr, id); 6363 if (!btf || !refcount_inc_not_zero(&btf->refcnt)) 6364 btf = ERR_PTR(-ENOENT); 6365 rcu_read_unlock(); 6366 6367 if (IS_ERR(btf)) 6368 return PTR_ERR(btf); 6369 6370 fd = __btf_new_fd(btf); 6371 if (fd < 0) 6372 btf_put(btf); 6373 6374 return fd; 6375 } 6376 6377 u32 btf_obj_id(const struct btf *btf) 6378 { 6379 return btf->id; 6380 } 6381 6382 bool btf_is_kernel(const struct btf *btf) 6383 { 6384 return btf->kernel_btf; 6385 } 6386 6387 bool btf_is_module(const struct btf *btf) 6388 { 6389 return btf->kernel_btf && strcmp(btf->name, "vmlinux") != 0; 6390 } 6391 6392 static int btf_id_cmp_func(const void *a, const void *b) 6393 { 6394 const int *pa = a, *pb = b; 6395 6396 return *pa - *pb; 6397 } 6398 6399 bool btf_id_set_contains(const struct btf_id_set *set, u32 id) 6400 { 6401 return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL; 6402 } 6403 6404 enum { 6405 BTF_MODULE_F_LIVE = (1 << 0), 6406 }; 6407 6408 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 6409 struct btf_module { 6410 struct list_head list; 6411 struct module *module; 6412 struct btf *btf; 6413 struct bin_attribute *sysfs_attr; 6414 int flags; 6415 }; 6416 6417 static LIST_HEAD(btf_modules); 6418 static DEFINE_MUTEX(btf_module_mutex); 6419 6420 static ssize_t 6421 btf_module_read(struct file *file, struct kobject *kobj, 6422 struct bin_attribute *bin_attr, 6423 char *buf, loff_t off, size_t len) 6424 { 6425 const struct btf *btf = bin_attr->private; 6426 6427 memcpy(buf, btf->data + off, len); 6428 return len; 6429 } 6430 6431 static void purge_cand_cache(struct btf *btf); 6432 6433 static int btf_module_notify(struct notifier_block *nb, unsigned long op, 6434 void *module) 6435 { 6436 struct btf_module *btf_mod, *tmp; 6437 struct module *mod = module; 6438 struct btf *btf; 6439 int err = 0; 6440 6441 if (mod->btf_data_size == 0 || 6442 (op != MODULE_STATE_COMING && op != MODULE_STATE_LIVE && 6443 op != MODULE_STATE_GOING)) 6444 goto out; 6445 6446 switch (op) { 6447 case MODULE_STATE_COMING: 6448 btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL); 6449 if (!btf_mod) { 6450 err = -ENOMEM; 6451 goto out; 6452 } 6453 btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size); 6454 if (IS_ERR(btf)) { 6455 pr_warn("failed to validate module [%s] BTF: %ld\n", 6456 mod->name, PTR_ERR(btf)); 6457 kfree(btf_mod); 6458 if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) 6459 err = PTR_ERR(btf); 6460 goto out; 6461 } 6462 err = btf_alloc_id(btf); 6463 if (err) { 6464 btf_free(btf); 6465 kfree(btf_mod); 6466 goto out; 6467 } 6468 6469 purge_cand_cache(NULL); 6470 mutex_lock(&btf_module_mutex); 6471 btf_mod->module = module; 6472 btf_mod->btf = btf; 6473 list_add(&btf_mod->list, &btf_modules); 6474 mutex_unlock(&btf_module_mutex); 6475 6476 if (IS_ENABLED(CONFIG_SYSFS)) { 6477 struct bin_attribute *attr; 6478 6479 attr = kzalloc(sizeof(*attr), GFP_KERNEL); 6480 if (!attr) 6481 goto out; 6482 6483 sysfs_bin_attr_init(attr); 6484 attr->attr.name = btf->name; 6485 attr->attr.mode = 0444; 6486 attr->size = btf->data_size; 6487 attr->private = btf; 6488 attr->read = btf_module_read; 6489 6490 err = sysfs_create_bin_file(btf_kobj, attr); 6491 if (err) { 6492 pr_warn("failed to register module [%s] BTF in sysfs: %d\n", 6493 mod->name, err); 6494 kfree(attr); 6495 err = 0; 6496 goto out; 6497 } 6498 6499 btf_mod->sysfs_attr = attr; 6500 } 6501 6502 break; 6503 case MODULE_STATE_LIVE: 6504 mutex_lock(&btf_module_mutex); 6505 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { 6506 if (btf_mod->module != module) 6507 continue; 6508 6509 btf_mod->flags |= BTF_MODULE_F_LIVE; 6510 break; 6511 } 6512 mutex_unlock(&btf_module_mutex); 6513 break; 6514 case MODULE_STATE_GOING: 6515 mutex_lock(&btf_module_mutex); 6516 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { 6517 if (btf_mod->module != module) 6518 continue; 6519 6520 list_del(&btf_mod->list); 6521 if (btf_mod->sysfs_attr) 6522 sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr); 6523 purge_cand_cache(btf_mod->btf); 6524 btf_put(btf_mod->btf); 6525 kfree(btf_mod->sysfs_attr); 6526 kfree(btf_mod); 6527 break; 6528 } 6529 mutex_unlock(&btf_module_mutex); 6530 break; 6531 } 6532 out: 6533 return notifier_from_errno(err); 6534 } 6535 6536 static struct notifier_block btf_module_nb = { 6537 .notifier_call = btf_module_notify, 6538 }; 6539 6540 static int __init btf_module_init(void) 6541 { 6542 register_module_notifier(&btf_module_nb); 6543 return 0; 6544 } 6545 6546 fs_initcall(btf_module_init); 6547 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */ 6548 6549 struct module *btf_try_get_module(const struct btf *btf) 6550 { 6551 struct module *res = NULL; 6552 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 6553 struct btf_module *btf_mod, *tmp; 6554 6555 mutex_lock(&btf_module_mutex); 6556 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { 6557 if (btf_mod->btf != btf) 6558 continue; 6559 6560 /* We must only consider module whose __init routine has 6561 * finished, hence we must check for BTF_MODULE_F_LIVE flag, 6562 * which is set from the notifier callback for 6563 * MODULE_STATE_LIVE. 6564 */ 6565 if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module)) 6566 res = btf_mod->module; 6567 6568 break; 6569 } 6570 mutex_unlock(&btf_module_mutex); 6571 #endif 6572 6573 return res; 6574 } 6575 6576 /* Returns struct btf corresponding to the struct module. 6577 * This function can return NULL or ERR_PTR. 6578 */ 6579 static struct btf *btf_get_module_btf(const struct module *module) 6580 { 6581 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 6582 struct btf_module *btf_mod, *tmp; 6583 #endif 6584 struct btf *btf = NULL; 6585 6586 if (!module) { 6587 btf = bpf_get_btf_vmlinux(); 6588 if (!IS_ERR_OR_NULL(btf)) 6589 btf_get(btf); 6590 return btf; 6591 } 6592 6593 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 6594 mutex_lock(&btf_module_mutex); 6595 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { 6596 if (btf_mod->module != module) 6597 continue; 6598 6599 btf_get(btf_mod->btf); 6600 btf = btf_mod->btf; 6601 break; 6602 } 6603 mutex_unlock(&btf_module_mutex); 6604 #endif 6605 6606 return btf; 6607 } 6608 6609 BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags) 6610 { 6611 struct btf *btf = NULL; 6612 int btf_obj_fd = 0; 6613 long ret; 6614 6615 if (flags) 6616 return -EINVAL; 6617 6618 if (name_sz <= 1 || name[name_sz - 1]) 6619 return -EINVAL; 6620 6621 ret = bpf_find_btf_id(name, kind, &btf); 6622 if (ret > 0 && btf_is_module(btf)) { 6623 btf_obj_fd = __btf_new_fd(btf); 6624 if (btf_obj_fd < 0) { 6625 btf_put(btf); 6626 return btf_obj_fd; 6627 } 6628 return ret | (((u64)btf_obj_fd) << 32); 6629 } 6630 if (ret > 0) 6631 btf_put(btf); 6632 return ret; 6633 } 6634 6635 const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = { 6636 .func = bpf_btf_find_by_name_kind, 6637 .gpl_only = false, 6638 .ret_type = RET_INTEGER, 6639 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6640 .arg2_type = ARG_CONST_SIZE, 6641 .arg3_type = ARG_ANYTHING, 6642 .arg4_type = ARG_ANYTHING, 6643 }; 6644 6645 BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE) 6646 #define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type) 6647 BTF_TRACING_TYPE_xxx 6648 #undef BTF_TRACING_TYPE 6649 6650 /* Kernel Function (kfunc) BTF ID set registration API */ 6651 6652 static int __btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, 6653 enum btf_kfunc_type type, 6654 struct btf_id_set *add_set, bool vmlinux_set) 6655 { 6656 struct btf_kfunc_set_tab *tab; 6657 struct btf_id_set *set; 6658 u32 set_cnt; 6659 int ret; 6660 6661 if (hook >= BTF_KFUNC_HOOK_MAX || type >= BTF_KFUNC_TYPE_MAX) { 6662 ret = -EINVAL; 6663 goto end; 6664 } 6665 6666 if (!add_set->cnt) 6667 return 0; 6668 6669 tab = btf->kfunc_set_tab; 6670 if (!tab) { 6671 tab = kzalloc(sizeof(*tab), GFP_KERNEL | __GFP_NOWARN); 6672 if (!tab) 6673 return -ENOMEM; 6674 btf->kfunc_set_tab = tab; 6675 } 6676 6677 set = tab->sets[hook][type]; 6678 /* Warn when register_btf_kfunc_id_set is called twice for the same hook 6679 * for module sets. 6680 */ 6681 if (WARN_ON_ONCE(set && !vmlinux_set)) { 6682 ret = -EINVAL; 6683 goto end; 6684 } 6685 6686 /* We don't need to allocate, concatenate, and sort module sets, because 6687 * only one is allowed per hook. Hence, we can directly assign the 6688 * pointer and return. 6689 */ 6690 if (!vmlinux_set) { 6691 tab->sets[hook][type] = add_set; 6692 return 0; 6693 } 6694 6695 /* In case of vmlinux sets, there may be more than one set being 6696 * registered per hook. To create a unified set, we allocate a new set 6697 * and concatenate all individual sets being registered. While each set 6698 * is individually sorted, they may become unsorted when concatenated, 6699 * hence re-sorting the final set again is required to make binary 6700 * searching the set using btf_id_set_contains function work. 6701 */ 6702 set_cnt = set ? set->cnt : 0; 6703 6704 if (set_cnt > U32_MAX - add_set->cnt) { 6705 ret = -EOVERFLOW; 6706 goto end; 6707 } 6708 6709 if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) { 6710 ret = -E2BIG; 6711 goto end; 6712 } 6713 6714 /* Grow set */ 6715 set = krealloc(tab->sets[hook][type], 6716 offsetof(struct btf_id_set, ids[set_cnt + add_set->cnt]), 6717 GFP_KERNEL | __GFP_NOWARN); 6718 if (!set) { 6719 ret = -ENOMEM; 6720 goto end; 6721 } 6722 6723 /* For newly allocated set, initialize set->cnt to 0 */ 6724 if (!tab->sets[hook][type]) 6725 set->cnt = 0; 6726 tab->sets[hook][type] = set; 6727 6728 /* Concatenate the two sets */ 6729 memcpy(set->ids + set->cnt, add_set->ids, add_set->cnt * sizeof(set->ids[0])); 6730 set->cnt += add_set->cnt; 6731 6732 sort(set->ids, set->cnt, sizeof(set->ids[0]), btf_id_cmp_func, NULL); 6733 6734 return 0; 6735 end: 6736 btf_free_kfunc_set_tab(btf); 6737 return ret; 6738 } 6739 6740 static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, 6741 const struct btf_kfunc_id_set *kset) 6742 { 6743 bool vmlinux_set = !btf_is_module(btf); 6744 int type, ret = 0; 6745 6746 for (type = 0; type < ARRAY_SIZE(kset->sets); type++) { 6747 if (!kset->sets[type]) 6748 continue; 6749 6750 ret = __btf_populate_kfunc_set(btf, hook, type, kset->sets[type], vmlinux_set); 6751 if (ret) 6752 break; 6753 } 6754 return ret; 6755 } 6756 6757 static bool __btf_kfunc_id_set_contains(const struct btf *btf, 6758 enum btf_kfunc_hook hook, 6759 enum btf_kfunc_type type, 6760 u32 kfunc_btf_id) 6761 { 6762 struct btf_id_set *set; 6763 6764 if (hook >= BTF_KFUNC_HOOK_MAX || type >= BTF_KFUNC_TYPE_MAX) 6765 return false; 6766 if (!btf->kfunc_set_tab) 6767 return false; 6768 set = btf->kfunc_set_tab->sets[hook][type]; 6769 if (!set) 6770 return false; 6771 return btf_id_set_contains(set, kfunc_btf_id); 6772 } 6773 6774 static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type) 6775 { 6776 switch (prog_type) { 6777 case BPF_PROG_TYPE_XDP: 6778 return BTF_KFUNC_HOOK_XDP; 6779 case BPF_PROG_TYPE_SCHED_CLS: 6780 return BTF_KFUNC_HOOK_TC; 6781 case BPF_PROG_TYPE_STRUCT_OPS: 6782 return BTF_KFUNC_HOOK_STRUCT_OPS; 6783 default: 6784 return BTF_KFUNC_HOOK_MAX; 6785 } 6786 } 6787 6788 /* Caution: 6789 * Reference to the module (obtained using btf_try_get_module) corresponding to 6790 * the struct btf *MUST* be held when calling this function from verifier 6791 * context. This is usually true as we stash references in prog's kfunc_btf_tab; 6792 * keeping the reference for the duration of the call provides the necessary 6793 * protection for looking up a well-formed btf->kfunc_set_tab. 6794 */ 6795 bool btf_kfunc_id_set_contains(const struct btf *btf, 6796 enum bpf_prog_type prog_type, 6797 enum btf_kfunc_type type, u32 kfunc_btf_id) 6798 { 6799 enum btf_kfunc_hook hook; 6800 6801 hook = bpf_prog_type_to_kfunc_hook(prog_type); 6802 return __btf_kfunc_id_set_contains(btf, hook, type, kfunc_btf_id); 6803 } 6804 6805 /* This function must be invoked only from initcalls/module init functions */ 6806 int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, 6807 const struct btf_kfunc_id_set *kset) 6808 { 6809 enum btf_kfunc_hook hook; 6810 struct btf *btf; 6811 int ret; 6812 6813 btf = btf_get_module_btf(kset->owner); 6814 if (!btf) { 6815 if (!kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { 6816 pr_err("missing vmlinux BTF, cannot register kfuncs\n"); 6817 return -ENOENT; 6818 } 6819 if (kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) { 6820 pr_err("missing module BTF, cannot register kfuncs\n"); 6821 return -ENOENT; 6822 } 6823 return 0; 6824 } 6825 if (IS_ERR(btf)) 6826 return PTR_ERR(btf); 6827 6828 hook = bpf_prog_type_to_kfunc_hook(prog_type); 6829 ret = btf_populate_kfunc_set(btf, hook, kset); 6830 btf_put(btf); 6831 return ret; 6832 } 6833 EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set); 6834 6835 #define MAX_TYPES_ARE_COMPAT_DEPTH 2 6836 6837 static 6838 int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, 6839 const struct btf *targ_btf, __u32 targ_id, 6840 int level) 6841 { 6842 const struct btf_type *local_type, *targ_type; 6843 int depth = 32; /* max recursion depth */ 6844 6845 /* caller made sure that names match (ignoring flavor suffix) */ 6846 local_type = btf_type_by_id(local_btf, local_id); 6847 targ_type = btf_type_by_id(targ_btf, targ_id); 6848 if (btf_kind(local_type) != btf_kind(targ_type)) 6849 return 0; 6850 6851 recur: 6852 depth--; 6853 if (depth < 0) 6854 return -EINVAL; 6855 6856 local_type = btf_type_skip_modifiers(local_btf, local_id, &local_id); 6857 targ_type = btf_type_skip_modifiers(targ_btf, targ_id, &targ_id); 6858 if (!local_type || !targ_type) 6859 return -EINVAL; 6860 6861 if (btf_kind(local_type) != btf_kind(targ_type)) 6862 return 0; 6863 6864 switch (btf_kind(local_type)) { 6865 case BTF_KIND_UNKN: 6866 case BTF_KIND_STRUCT: 6867 case BTF_KIND_UNION: 6868 case BTF_KIND_ENUM: 6869 case BTF_KIND_FWD: 6870 return 1; 6871 case BTF_KIND_INT: 6872 /* just reject deprecated bitfield-like integers; all other 6873 * integers are by default compatible between each other 6874 */ 6875 return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0; 6876 case BTF_KIND_PTR: 6877 local_id = local_type->type; 6878 targ_id = targ_type->type; 6879 goto recur; 6880 case BTF_KIND_ARRAY: 6881 local_id = btf_array(local_type)->type; 6882 targ_id = btf_array(targ_type)->type; 6883 goto recur; 6884 case BTF_KIND_FUNC_PROTO: { 6885 struct btf_param *local_p = btf_params(local_type); 6886 struct btf_param *targ_p = btf_params(targ_type); 6887 __u16 local_vlen = btf_vlen(local_type); 6888 __u16 targ_vlen = btf_vlen(targ_type); 6889 int i, err; 6890 6891 if (local_vlen != targ_vlen) 6892 return 0; 6893 6894 for (i = 0; i < local_vlen; i++, local_p++, targ_p++) { 6895 if (level <= 0) 6896 return -EINVAL; 6897 6898 btf_type_skip_modifiers(local_btf, local_p->type, &local_id); 6899 btf_type_skip_modifiers(targ_btf, targ_p->type, &targ_id); 6900 err = __bpf_core_types_are_compat(local_btf, local_id, 6901 targ_btf, targ_id, 6902 level - 1); 6903 if (err <= 0) 6904 return err; 6905 } 6906 6907 /* tail recurse for return type check */ 6908 btf_type_skip_modifiers(local_btf, local_type->type, &local_id); 6909 btf_type_skip_modifiers(targ_btf, targ_type->type, &targ_id); 6910 goto recur; 6911 } 6912 default: 6913 return 0; 6914 } 6915 } 6916 6917 /* Check local and target types for compatibility. This check is used for 6918 * type-based CO-RE relocations and follow slightly different rules than 6919 * field-based relocations. This function assumes that root types were already 6920 * checked for name match. Beyond that initial root-level name check, names 6921 * are completely ignored. Compatibility rules are as follows: 6922 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but 6923 * kind should match for local and target types (i.e., STRUCT is not 6924 * compatible with UNION); 6925 * - for ENUMs, the size is ignored; 6926 * - for INT, size and signedness are ignored; 6927 * - for ARRAY, dimensionality is ignored, element types are checked for 6928 * compatibility recursively; 6929 * - CONST/VOLATILE/RESTRICT modifiers are ignored; 6930 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; 6931 * - FUNC_PROTOs are compatible if they have compatible signature: same 6932 * number of input args and compatible return and argument types. 6933 * These rules are not set in stone and probably will be adjusted as we get 6934 * more experience with using BPF CO-RE relocations. 6935 */ 6936 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, 6937 const struct btf *targ_btf, __u32 targ_id) 6938 { 6939 return __bpf_core_types_are_compat(local_btf, local_id, 6940 targ_btf, targ_id, 6941 MAX_TYPES_ARE_COMPAT_DEPTH); 6942 } 6943 6944 static bool bpf_core_is_flavor_sep(const char *s) 6945 { 6946 /* check X___Y name pattern, where X and Y are not underscores */ 6947 return s[0] != '_' && /* X */ 6948 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */ 6949 s[4] != '_'; /* Y */ 6950 } 6951 6952 size_t bpf_core_essential_name_len(const char *name) 6953 { 6954 size_t n = strlen(name); 6955 int i; 6956 6957 for (i = n - 5; i >= 0; i--) { 6958 if (bpf_core_is_flavor_sep(name + i)) 6959 return i + 1; 6960 } 6961 return n; 6962 } 6963 6964 struct bpf_cand_cache { 6965 const char *name; 6966 u32 name_len; 6967 u16 kind; 6968 u16 cnt; 6969 struct { 6970 const struct btf *btf; 6971 u32 id; 6972 } cands[]; 6973 }; 6974 6975 static void bpf_free_cands(struct bpf_cand_cache *cands) 6976 { 6977 if (!cands->cnt) 6978 /* empty candidate array was allocated on stack */ 6979 return; 6980 kfree(cands); 6981 } 6982 6983 static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands) 6984 { 6985 kfree(cands->name); 6986 kfree(cands); 6987 } 6988 6989 #define VMLINUX_CAND_CACHE_SIZE 31 6990 static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE]; 6991 6992 #define MODULE_CAND_CACHE_SIZE 31 6993 static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE]; 6994 6995 static DEFINE_MUTEX(cand_cache_mutex); 6996 6997 static void __print_cand_cache(struct bpf_verifier_log *log, 6998 struct bpf_cand_cache **cache, 6999 int cache_size) 7000 { 7001 struct bpf_cand_cache *cc; 7002 int i, j; 7003 7004 for (i = 0; i < cache_size; i++) { 7005 cc = cache[i]; 7006 if (!cc) 7007 continue; 7008 bpf_log(log, "[%d]%s(", i, cc->name); 7009 for (j = 0; j < cc->cnt; j++) { 7010 bpf_log(log, "%d", cc->cands[j].id); 7011 if (j < cc->cnt - 1) 7012 bpf_log(log, " "); 7013 } 7014 bpf_log(log, "), "); 7015 } 7016 } 7017 7018 static void print_cand_cache(struct bpf_verifier_log *log) 7019 { 7020 mutex_lock(&cand_cache_mutex); 7021 bpf_log(log, "vmlinux_cand_cache:"); 7022 __print_cand_cache(log, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); 7023 bpf_log(log, "\nmodule_cand_cache:"); 7024 __print_cand_cache(log, module_cand_cache, MODULE_CAND_CACHE_SIZE); 7025 bpf_log(log, "\n"); 7026 mutex_unlock(&cand_cache_mutex); 7027 } 7028 7029 static u32 hash_cands(struct bpf_cand_cache *cands) 7030 { 7031 return jhash(cands->name, cands->name_len, 0); 7032 } 7033 7034 static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands, 7035 struct bpf_cand_cache **cache, 7036 int cache_size) 7037 { 7038 struct bpf_cand_cache *cc = cache[hash_cands(cands) % cache_size]; 7039 7040 if (cc && cc->name_len == cands->name_len && 7041 !strncmp(cc->name, cands->name, cands->name_len)) 7042 return cc; 7043 return NULL; 7044 } 7045 7046 static size_t sizeof_cands(int cnt) 7047 { 7048 return offsetof(struct bpf_cand_cache, cands[cnt]); 7049 } 7050 7051 static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands, 7052 struct bpf_cand_cache **cache, 7053 int cache_size) 7054 { 7055 struct bpf_cand_cache **cc = &cache[hash_cands(cands) % cache_size], *new_cands; 7056 7057 if (*cc) { 7058 bpf_free_cands_from_cache(*cc); 7059 *cc = NULL; 7060 } 7061 new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL); 7062 if (!new_cands) { 7063 bpf_free_cands(cands); 7064 return ERR_PTR(-ENOMEM); 7065 } 7066 /* strdup the name, since it will stay in cache. 7067 * the cands->name points to strings in prog's BTF and the prog can be unloaded. 7068 */ 7069 new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL); 7070 bpf_free_cands(cands); 7071 if (!new_cands->name) { 7072 kfree(new_cands); 7073 return ERR_PTR(-ENOMEM); 7074 } 7075 *cc = new_cands; 7076 return new_cands; 7077 } 7078 7079 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 7080 static void __purge_cand_cache(struct btf *btf, struct bpf_cand_cache **cache, 7081 int cache_size) 7082 { 7083 struct bpf_cand_cache *cc; 7084 int i, j; 7085 7086 for (i = 0; i < cache_size; i++) { 7087 cc = cache[i]; 7088 if (!cc) 7089 continue; 7090 if (!btf) { 7091 /* when new module is loaded purge all of module_cand_cache, 7092 * since new module might have candidates with the name 7093 * that matches cached cands. 7094 */ 7095 bpf_free_cands_from_cache(cc); 7096 cache[i] = NULL; 7097 continue; 7098 } 7099 /* when module is unloaded purge cache entries 7100 * that match module's btf 7101 */ 7102 for (j = 0; j < cc->cnt; j++) 7103 if (cc->cands[j].btf == btf) { 7104 bpf_free_cands_from_cache(cc); 7105 cache[i] = NULL; 7106 break; 7107 } 7108 } 7109 7110 } 7111 7112 static void purge_cand_cache(struct btf *btf) 7113 { 7114 mutex_lock(&cand_cache_mutex); 7115 __purge_cand_cache(btf, module_cand_cache, MODULE_CAND_CACHE_SIZE); 7116 mutex_unlock(&cand_cache_mutex); 7117 } 7118 #endif 7119 7120 static struct bpf_cand_cache * 7121 bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf, 7122 int targ_start_id) 7123 { 7124 struct bpf_cand_cache *new_cands; 7125 const struct btf_type *t; 7126 const char *targ_name; 7127 size_t targ_essent_len; 7128 int n, i; 7129 7130 n = btf_nr_types(targ_btf); 7131 for (i = targ_start_id; i < n; i++) { 7132 t = btf_type_by_id(targ_btf, i); 7133 if (btf_kind(t) != cands->kind) 7134 continue; 7135 7136 targ_name = btf_name_by_offset(targ_btf, t->name_off); 7137 if (!targ_name) 7138 continue; 7139 7140 /* the resched point is before strncmp to make sure that search 7141 * for non-existing name will have a chance to schedule(). 7142 */ 7143 cond_resched(); 7144 7145 if (strncmp(cands->name, targ_name, cands->name_len) != 0) 7146 continue; 7147 7148 targ_essent_len = bpf_core_essential_name_len(targ_name); 7149 if (targ_essent_len != cands->name_len) 7150 continue; 7151 7152 /* most of the time there is only one candidate for a given kind+name pair */ 7153 new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL); 7154 if (!new_cands) { 7155 bpf_free_cands(cands); 7156 return ERR_PTR(-ENOMEM); 7157 } 7158 7159 memcpy(new_cands, cands, sizeof_cands(cands->cnt)); 7160 bpf_free_cands(cands); 7161 cands = new_cands; 7162 cands->cands[cands->cnt].btf = targ_btf; 7163 cands->cands[cands->cnt].id = i; 7164 cands->cnt++; 7165 } 7166 return cands; 7167 } 7168 7169 static struct bpf_cand_cache * 7170 bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id) 7171 { 7172 struct bpf_cand_cache *cands, *cc, local_cand = {}; 7173 const struct btf *local_btf = ctx->btf; 7174 const struct btf_type *local_type; 7175 const struct btf *main_btf; 7176 size_t local_essent_len; 7177 struct btf *mod_btf; 7178 const char *name; 7179 int id; 7180 7181 main_btf = bpf_get_btf_vmlinux(); 7182 if (IS_ERR(main_btf)) 7183 return ERR_CAST(main_btf); 7184 if (!main_btf) 7185 return ERR_PTR(-EINVAL); 7186 7187 local_type = btf_type_by_id(local_btf, local_type_id); 7188 if (!local_type) 7189 return ERR_PTR(-EINVAL); 7190 7191 name = btf_name_by_offset(local_btf, local_type->name_off); 7192 if (str_is_empty(name)) 7193 return ERR_PTR(-EINVAL); 7194 local_essent_len = bpf_core_essential_name_len(name); 7195 7196 cands = &local_cand; 7197 cands->name = name; 7198 cands->kind = btf_kind(local_type); 7199 cands->name_len = local_essent_len; 7200 7201 cc = check_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); 7202 /* cands is a pointer to stack here */ 7203 if (cc) { 7204 if (cc->cnt) 7205 return cc; 7206 goto check_modules; 7207 } 7208 7209 /* Attempt to find target candidates in vmlinux BTF first */ 7210 cands = bpf_core_add_cands(cands, main_btf, 1); 7211 if (IS_ERR(cands)) 7212 return ERR_CAST(cands); 7213 7214 /* cands is a pointer to kmalloced memory here if cands->cnt > 0 */ 7215 7216 /* populate cache even when cands->cnt == 0 */ 7217 cc = populate_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); 7218 if (IS_ERR(cc)) 7219 return ERR_CAST(cc); 7220 7221 /* if vmlinux BTF has any candidate, don't go for module BTFs */ 7222 if (cc->cnt) 7223 return cc; 7224 7225 check_modules: 7226 /* cands is a pointer to stack here and cands->cnt == 0 */ 7227 cc = check_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE); 7228 if (cc) 7229 /* if cache has it return it even if cc->cnt == 0 */ 7230 return cc; 7231 7232 /* If candidate is not found in vmlinux's BTF then search in module's BTFs */ 7233 spin_lock_bh(&btf_idr_lock); 7234 idr_for_each_entry(&btf_idr, mod_btf, id) { 7235 if (!btf_is_module(mod_btf)) 7236 continue; 7237 /* linear search could be slow hence unlock/lock 7238 * the IDR to avoiding holding it for too long 7239 */ 7240 btf_get(mod_btf); 7241 spin_unlock_bh(&btf_idr_lock); 7242 cands = bpf_core_add_cands(cands, mod_btf, btf_nr_types(main_btf)); 7243 if (IS_ERR(cands)) { 7244 btf_put(mod_btf); 7245 return ERR_CAST(cands); 7246 } 7247 spin_lock_bh(&btf_idr_lock); 7248 btf_put(mod_btf); 7249 } 7250 spin_unlock_bh(&btf_idr_lock); 7251 /* cands is a pointer to kmalloced memory here if cands->cnt > 0 7252 * or pointer to stack if cands->cnd == 0. 7253 * Copy it into the cache even when cands->cnt == 0 and 7254 * return the result. 7255 */ 7256 return populate_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE); 7257 } 7258 7259 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, 7260 int relo_idx, void *insn) 7261 { 7262 bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL; 7263 struct bpf_core_cand_list cands = {}; 7264 struct bpf_core_relo_res targ_res; 7265 struct bpf_core_spec *specs; 7266 int err; 7267 7268 /* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5" 7269 * into arrays of btf_ids of struct fields and array indices. 7270 */ 7271 specs = kcalloc(3, sizeof(*specs), GFP_KERNEL); 7272 if (!specs) 7273 return -ENOMEM; 7274 7275 if (need_cands) { 7276 struct bpf_cand_cache *cc; 7277 int i; 7278 7279 mutex_lock(&cand_cache_mutex); 7280 cc = bpf_core_find_cands(ctx, relo->type_id); 7281 if (IS_ERR(cc)) { 7282 bpf_log(ctx->log, "target candidate search failed for %d\n", 7283 relo->type_id); 7284 err = PTR_ERR(cc); 7285 goto out; 7286 } 7287 if (cc->cnt) { 7288 cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL); 7289 if (!cands.cands) { 7290 err = -ENOMEM; 7291 goto out; 7292 } 7293 } 7294 for (i = 0; i < cc->cnt; i++) { 7295 bpf_log(ctx->log, 7296 "CO-RE relocating %s %s: found target candidate [%d]\n", 7297 btf_kind_str[cc->kind], cc->name, cc->cands[i].id); 7298 cands.cands[i].btf = cc->cands[i].btf; 7299 cands.cands[i].id = cc->cands[i].id; 7300 } 7301 cands.len = cc->cnt; 7302 /* cand_cache_mutex needs to span the cache lookup and 7303 * copy of btf pointer into bpf_core_cand_list, 7304 * since module can be unloaded while bpf_core_calc_relo_insn 7305 * is working with module's btf. 7306 */ 7307 } 7308 7309 err = bpf_core_calc_relo_insn((void *)ctx->log, relo, relo_idx, ctx->btf, &cands, specs, 7310 &targ_res); 7311 if (err) 7312 goto out; 7313 7314 err = bpf_core_patch_insn((void *)ctx->log, insn, relo->insn_off / 8, relo, relo_idx, 7315 &targ_res); 7316 7317 out: 7318 kfree(specs); 7319 if (need_cands) { 7320 kfree(cands.cands); 7321 mutex_unlock(&cand_cache_mutex); 7322 if (ctx->log->level & BPF_LOG_LEVEL2) 7323 print_cand_cache(ctx->log); 7324 } 7325 return err; 7326 } 7327