1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018 Facebook */ 3 4 #include <uapi/linux/btf.h> 5 #include <uapi/linux/bpf.h> 6 #include <uapi/linux/bpf_perf_event.h> 7 #include <uapi/linux/types.h> 8 #include <linux/seq_file.h> 9 #include <linux/compiler.h> 10 #include <linux/ctype.h> 11 #include <linux/errno.h> 12 #include <linux/slab.h> 13 #include <linux/anon_inodes.h> 14 #include <linux/file.h> 15 #include <linux/uaccess.h> 16 #include <linux/kernel.h> 17 #include <linux/idr.h> 18 #include <linux/sort.h> 19 #include <linux/bpf_verifier.h> 20 #include <linux/btf.h> 21 #include <linux/btf_ids.h> 22 #include <linux/skmsg.h> 23 #include <linux/perf_event.h> 24 #include <linux/bsearch.h> 25 #include <linux/kobject.h> 26 #include <linux/sysfs.h> 27 #include <net/sock.h> 28 #include "../tools/lib/bpf/relo_core.h" 29 30 /* BTF (BPF Type Format) is the meta data format which describes 31 * the data types of BPF program/map. Hence, it basically focus 32 * on the C programming language which the modern BPF is primary 33 * using. 34 * 35 * ELF Section: 36 * ~~~~~~~~~~~ 37 * The BTF data is stored under the ".BTF" ELF section 38 * 39 * struct btf_type: 40 * ~~~~~~~~~~~~~~~ 41 * Each 'struct btf_type' object describes a C data type. 42 * Depending on the type it is describing, a 'struct btf_type' 43 * object may be followed by more data. F.e. 44 * To describe an array, 'struct btf_type' is followed by 45 * 'struct btf_array'. 46 * 47 * 'struct btf_type' and any extra data following it are 48 * 4 bytes aligned. 49 * 50 * Type section: 51 * ~~~~~~~~~~~~~ 52 * The BTF type section contains a list of 'struct btf_type' objects. 53 * Each one describes a C type. Recall from the above section 54 * that a 'struct btf_type' object could be immediately followed by extra 55 * data in order to describe some particular C types. 56 * 57 * type_id: 58 * ~~~~~~~ 59 * Each btf_type object is identified by a type_id. The type_id 60 * is implicitly implied by the location of the btf_type object in 61 * the BTF type section. The first one has type_id 1. The second 62 * one has type_id 2...etc. Hence, an earlier btf_type has 63 * a smaller type_id. 64 * 65 * A btf_type object may refer to another btf_type object by using 66 * type_id (i.e. the "type" in the "struct btf_type"). 67 * 68 * NOTE that we cannot assume any reference-order. 69 * A btf_type object can refer to an earlier btf_type object 70 * but it can also refer to a later btf_type object. 71 * 72 * For example, to describe "const void *". A btf_type 73 * object describing "const" may refer to another btf_type 74 * object describing "void *". This type-reference is done 75 * by specifying type_id: 76 * 77 * [1] CONST (anon) type_id=2 78 * [2] PTR (anon) type_id=0 79 * 80 * The above is the btf_verifier debug log: 81 * - Each line started with "[?]" is a btf_type object 82 * - [?] is the type_id of the btf_type object. 83 * - CONST/PTR is the BTF_KIND_XXX 84 * - "(anon)" is the name of the type. It just 85 * happens that CONST and PTR has no name. 86 * - type_id=XXX is the 'u32 type' in btf_type 87 * 88 * NOTE: "void" has type_id 0 89 * 90 * String section: 91 * ~~~~~~~~~~~~~~ 92 * The BTF string section contains the names used by the type section. 93 * Each string is referred by an "offset" from the beginning of the 94 * string section. 95 * 96 * Each string is '\0' terminated. 97 * 98 * The first character in the string section must be '\0' 99 * which is used to mean 'anonymous'. Some btf_type may not 100 * have a name. 101 */ 102 103 /* BTF verification: 104 * 105 * To verify BTF data, two passes are needed. 106 * 107 * Pass #1 108 * ~~~~~~~ 109 * The first pass is to collect all btf_type objects to 110 * an array: "btf->types". 111 * 112 * Depending on the C type that a btf_type is describing, 113 * a btf_type may be followed by extra data. We don't know 114 * how many btf_type is there, and more importantly we don't 115 * know where each btf_type is located in the type section. 116 * 117 * Without knowing the location of each type_id, most verifications 118 * cannot be done. e.g. an earlier btf_type may refer to a later 119 * btf_type (recall the "const void *" above), so we cannot 120 * check this type-reference in the first pass. 121 * 122 * In the first pass, it still does some verifications (e.g. 123 * checking the name is a valid offset to the string section). 124 * 125 * Pass #2 126 * ~~~~~~~ 127 * The main focus is to resolve a btf_type that is referring 128 * to another type. 129 * 130 * We have to ensure the referring type: 131 * 1) does exist in the BTF (i.e. in btf->types[]) 132 * 2) does not cause a loop: 133 * struct A { 134 * struct B b; 135 * }; 136 * 137 * struct B { 138 * struct A a; 139 * }; 140 * 141 * btf_type_needs_resolve() decides if a btf_type needs 142 * to be resolved. 143 * 144 * The needs_resolve type implements the "resolve()" ops which 145 * essentially does a DFS and detects backedge. 146 * 147 * During resolve (or DFS), different C types have different 148 * "RESOLVED" conditions. 149 * 150 * When resolving a BTF_KIND_STRUCT, we need to resolve all its 151 * members because a member is always referring to another 152 * type. A struct's member can be treated as "RESOLVED" if 153 * it is referring to a BTF_KIND_PTR. Otherwise, the 154 * following valid C struct would be rejected: 155 * 156 * struct A { 157 * int m; 158 * struct A *a; 159 * }; 160 * 161 * When resolving a BTF_KIND_PTR, it needs to keep resolving if 162 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot 163 * detect a pointer loop, e.g.: 164 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR + 165 * ^ | 166 * +-----------------------------------------+ 167 * 168 */ 169 170 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2) 171 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1) 172 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK) 173 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3) 174 #define BITS_ROUNDUP_BYTES(bits) \ 175 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits)) 176 177 #define BTF_INFO_MASK 0x9f00ffff 178 #define BTF_INT_MASK 0x0fffffff 179 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE) 180 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET) 181 182 /* 16MB for 64k structs and each has 16 members and 183 * a few MB spaces for the string section. 184 * The hard limit is S32_MAX. 185 */ 186 #define BTF_MAX_SIZE (16 * 1024 * 1024) 187 188 #define for_each_member_from(i, from, struct_type, member) \ 189 for (i = from, member = btf_type_member(struct_type) + from; \ 190 i < btf_type_vlen(struct_type); \ 191 i++, member++) 192 193 #define for_each_vsi_from(i, from, struct_type, member) \ 194 for (i = from, member = btf_type_var_secinfo(struct_type) + from; \ 195 i < btf_type_vlen(struct_type); \ 196 i++, member++) 197 198 DEFINE_IDR(btf_idr); 199 DEFINE_SPINLOCK(btf_idr_lock); 200 201 enum btf_kfunc_hook { 202 BTF_KFUNC_HOOK_XDP, 203 BTF_KFUNC_HOOK_TC, 204 BTF_KFUNC_HOOK_STRUCT_OPS, 205 BTF_KFUNC_HOOK_TRACING, 206 BTF_KFUNC_HOOK_SYSCALL, 207 BTF_KFUNC_HOOK_MAX, 208 }; 209 210 enum { 211 BTF_KFUNC_SET_MAX_CNT = 256, 212 BTF_DTOR_KFUNC_MAX_CNT = 256, 213 }; 214 215 struct btf_kfunc_set_tab { 216 struct btf_id_set8 *sets[BTF_KFUNC_HOOK_MAX]; 217 }; 218 219 struct btf_id_dtor_kfunc_tab { 220 u32 cnt; 221 struct btf_id_dtor_kfunc dtors[]; 222 }; 223 224 struct btf { 225 void *data; 226 struct btf_type **types; 227 u32 *resolved_ids; 228 u32 *resolved_sizes; 229 const char *strings; 230 void *nohdr_data; 231 struct btf_header hdr; 232 u32 nr_types; /* includes VOID for base BTF */ 233 u32 types_size; 234 u32 data_size; 235 refcount_t refcnt; 236 u32 id; 237 struct rcu_head rcu; 238 struct btf_kfunc_set_tab *kfunc_set_tab; 239 struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab; 240 241 /* split BTF support */ 242 struct btf *base_btf; 243 u32 start_id; /* first type ID in this BTF (0 for base BTF) */ 244 u32 start_str_off; /* first string offset (0 for base BTF) */ 245 char name[MODULE_NAME_LEN]; 246 bool kernel_btf; 247 }; 248 249 enum verifier_phase { 250 CHECK_META, 251 CHECK_TYPE, 252 }; 253 254 struct resolve_vertex { 255 const struct btf_type *t; 256 u32 type_id; 257 u16 next_member; 258 }; 259 260 enum visit_state { 261 NOT_VISITED, 262 VISITED, 263 RESOLVED, 264 }; 265 266 enum resolve_mode { 267 RESOLVE_TBD, /* To Be Determined */ 268 RESOLVE_PTR, /* Resolving for Pointer */ 269 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union 270 * or array 271 */ 272 }; 273 274 #define MAX_RESOLVE_DEPTH 32 275 276 struct btf_sec_info { 277 u32 off; 278 u32 len; 279 }; 280 281 struct btf_verifier_env { 282 struct btf *btf; 283 u8 *visit_states; 284 struct resolve_vertex stack[MAX_RESOLVE_DEPTH]; 285 struct bpf_verifier_log log; 286 u32 log_type_id; 287 u32 top_stack; 288 enum verifier_phase phase; 289 enum resolve_mode resolve_mode; 290 }; 291 292 static const char * const btf_kind_str[NR_BTF_KINDS] = { 293 [BTF_KIND_UNKN] = "UNKNOWN", 294 [BTF_KIND_INT] = "INT", 295 [BTF_KIND_PTR] = "PTR", 296 [BTF_KIND_ARRAY] = "ARRAY", 297 [BTF_KIND_STRUCT] = "STRUCT", 298 [BTF_KIND_UNION] = "UNION", 299 [BTF_KIND_ENUM] = "ENUM", 300 [BTF_KIND_FWD] = "FWD", 301 [BTF_KIND_TYPEDEF] = "TYPEDEF", 302 [BTF_KIND_VOLATILE] = "VOLATILE", 303 [BTF_KIND_CONST] = "CONST", 304 [BTF_KIND_RESTRICT] = "RESTRICT", 305 [BTF_KIND_FUNC] = "FUNC", 306 [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO", 307 [BTF_KIND_VAR] = "VAR", 308 [BTF_KIND_DATASEC] = "DATASEC", 309 [BTF_KIND_FLOAT] = "FLOAT", 310 [BTF_KIND_DECL_TAG] = "DECL_TAG", 311 [BTF_KIND_TYPE_TAG] = "TYPE_TAG", 312 [BTF_KIND_ENUM64] = "ENUM64", 313 }; 314 315 const char *btf_type_str(const struct btf_type *t) 316 { 317 return btf_kind_str[BTF_INFO_KIND(t->info)]; 318 } 319 320 /* Chunk size we use in safe copy of data to be shown. */ 321 #define BTF_SHOW_OBJ_SAFE_SIZE 32 322 323 /* 324 * This is the maximum size of a base type value (equivalent to a 325 * 128-bit int); if we are at the end of our safe buffer and have 326 * less than 16 bytes space we can't be assured of being able 327 * to copy the next type safely, so in such cases we will initiate 328 * a new copy. 329 */ 330 #define BTF_SHOW_OBJ_BASE_TYPE_SIZE 16 331 332 /* Type name size */ 333 #define BTF_SHOW_NAME_SIZE 80 334 335 /* 336 * Common data to all BTF show operations. Private show functions can add 337 * their own data to a structure containing a struct btf_show and consult it 338 * in the show callback. See btf_type_show() below. 339 * 340 * One challenge with showing nested data is we want to skip 0-valued 341 * data, but in order to figure out whether a nested object is all zeros 342 * we need to walk through it. As a result, we need to make two passes 343 * when handling structs, unions and arrays; the first path simply looks 344 * for nonzero data, while the second actually does the display. The first 345 * pass is signalled by show->state.depth_check being set, and if we 346 * encounter a non-zero value we set show->state.depth_to_show to 347 * the depth at which we encountered it. When we have completed the 348 * first pass, we will know if anything needs to be displayed if 349 * depth_to_show > depth. See btf_[struct,array]_show() for the 350 * implementation of this. 351 * 352 * Another problem is we want to ensure the data for display is safe to 353 * access. To support this, the anonymous "struct {} obj" tracks the data 354 * object and our safe copy of it. We copy portions of the data needed 355 * to the object "copy" buffer, but because its size is limited to 356 * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we 357 * traverse larger objects for display. 358 * 359 * The various data type show functions all start with a call to 360 * btf_show_start_type() which returns a pointer to the safe copy 361 * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the 362 * raw data itself). btf_show_obj_safe() is responsible for 363 * using copy_from_kernel_nofault() to update the safe data if necessary 364 * as we traverse the object's data. skbuff-like semantics are 365 * used: 366 * 367 * - obj.head points to the start of the toplevel object for display 368 * - obj.size is the size of the toplevel object 369 * - obj.data points to the current point in the original data at 370 * which our safe data starts. obj.data will advance as we copy 371 * portions of the data. 372 * 373 * In most cases a single copy will suffice, but larger data structures 374 * such as "struct task_struct" will require many copies. The logic in 375 * btf_show_obj_safe() handles the logic that determines if a new 376 * copy_from_kernel_nofault() is needed. 377 */ 378 struct btf_show { 379 u64 flags; 380 void *target; /* target of show operation (seq file, buffer) */ 381 void (*showfn)(struct btf_show *show, const char *fmt, va_list args); 382 const struct btf *btf; 383 /* below are used during iteration */ 384 struct { 385 u8 depth; 386 u8 depth_to_show; 387 u8 depth_check; 388 u8 array_member:1, 389 array_terminated:1; 390 u16 array_encoding; 391 u32 type_id; 392 int status; /* non-zero for error */ 393 const struct btf_type *type; 394 const struct btf_member *member; 395 char name[BTF_SHOW_NAME_SIZE]; /* space for member name/type */ 396 } state; 397 struct { 398 u32 size; 399 void *head; 400 void *data; 401 u8 safe[BTF_SHOW_OBJ_SAFE_SIZE]; 402 } obj; 403 }; 404 405 struct btf_kind_operations { 406 s32 (*check_meta)(struct btf_verifier_env *env, 407 const struct btf_type *t, 408 u32 meta_left); 409 int (*resolve)(struct btf_verifier_env *env, 410 const struct resolve_vertex *v); 411 int (*check_member)(struct btf_verifier_env *env, 412 const struct btf_type *struct_type, 413 const struct btf_member *member, 414 const struct btf_type *member_type); 415 int (*check_kflag_member)(struct btf_verifier_env *env, 416 const struct btf_type *struct_type, 417 const struct btf_member *member, 418 const struct btf_type *member_type); 419 void (*log_details)(struct btf_verifier_env *env, 420 const struct btf_type *t); 421 void (*show)(const struct btf *btf, const struct btf_type *t, 422 u32 type_id, void *data, u8 bits_offsets, 423 struct btf_show *show); 424 }; 425 426 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS]; 427 static struct btf_type btf_void; 428 429 static int btf_resolve(struct btf_verifier_env *env, 430 const struct btf_type *t, u32 type_id); 431 432 static int btf_func_check(struct btf_verifier_env *env, 433 const struct btf_type *t); 434 435 static bool btf_type_is_modifier(const struct btf_type *t) 436 { 437 /* Some of them is not strictly a C modifier 438 * but they are grouped into the same bucket 439 * for BTF concern: 440 * A type (t) that refers to another 441 * type through t->type AND its size cannot 442 * be determined without following the t->type. 443 * 444 * ptr does not fall into this bucket 445 * because its size is always sizeof(void *). 446 */ 447 switch (BTF_INFO_KIND(t->info)) { 448 case BTF_KIND_TYPEDEF: 449 case BTF_KIND_VOLATILE: 450 case BTF_KIND_CONST: 451 case BTF_KIND_RESTRICT: 452 case BTF_KIND_TYPE_TAG: 453 return true; 454 } 455 456 return false; 457 } 458 459 bool btf_type_is_void(const struct btf_type *t) 460 { 461 return t == &btf_void; 462 } 463 464 static bool btf_type_is_fwd(const struct btf_type *t) 465 { 466 return BTF_INFO_KIND(t->info) == BTF_KIND_FWD; 467 } 468 469 static bool btf_type_nosize(const struct btf_type *t) 470 { 471 return btf_type_is_void(t) || btf_type_is_fwd(t) || 472 btf_type_is_func(t) || btf_type_is_func_proto(t); 473 } 474 475 static bool btf_type_nosize_or_null(const struct btf_type *t) 476 { 477 return !t || btf_type_nosize(t); 478 } 479 480 static bool __btf_type_is_struct(const struct btf_type *t) 481 { 482 return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT; 483 } 484 485 static bool btf_type_is_array(const struct btf_type *t) 486 { 487 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY; 488 } 489 490 static bool btf_type_is_datasec(const struct btf_type *t) 491 { 492 return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC; 493 } 494 495 static bool btf_type_is_decl_tag(const struct btf_type *t) 496 { 497 return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG; 498 } 499 500 static bool btf_type_is_decl_tag_target(const struct btf_type *t) 501 { 502 return btf_type_is_func(t) || btf_type_is_struct(t) || 503 btf_type_is_var(t) || btf_type_is_typedef(t); 504 } 505 506 u32 btf_nr_types(const struct btf *btf) 507 { 508 u32 total = 0; 509 510 while (btf) { 511 total += btf->nr_types; 512 btf = btf->base_btf; 513 } 514 515 return total; 516 } 517 518 s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind) 519 { 520 const struct btf_type *t; 521 const char *tname; 522 u32 i, total; 523 524 total = btf_nr_types(btf); 525 for (i = 1; i < total; i++) { 526 t = btf_type_by_id(btf, i); 527 if (BTF_INFO_KIND(t->info) != kind) 528 continue; 529 530 tname = btf_name_by_offset(btf, t->name_off); 531 if (!strcmp(tname, name)) 532 return i; 533 } 534 535 return -ENOENT; 536 } 537 538 static s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p) 539 { 540 struct btf *btf; 541 s32 ret; 542 int id; 543 544 btf = bpf_get_btf_vmlinux(); 545 if (IS_ERR(btf)) 546 return PTR_ERR(btf); 547 if (!btf) 548 return -EINVAL; 549 550 ret = btf_find_by_name_kind(btf, name, kind); 551 /* ret is never zero, since btf_find_by_name_kind returns 552 * positive btf_id or negative error. 553 */ 554 if (ret > 0) { 555 btf_get(btf); 556 *btf_p = btf; 557 return ret; 558 } 559 560 /* If name is not found in vmlinux's BTF then search in module's BTFs */ 561 spin_lock_bh(&btf_idr_lock); 562 idr_for_each_entry(&btf_idr, btf, id) { 563 if (!btf_is_module(btf)) 564 continue; 565 /* linear search could be slow hence unlock/lock 566 * the IDR to avoiding holding it for too long 567 */ 568 btf_get(btf); 569 spin_unlock_bh(&btf_idr_lock); 570 ret = btf_find_by_name_kind(btf, name, kind); 571 if (ret > 0) { 572 *btf_p = btf; 573 return ret; 574 } 575 spin_lock_bh(&btf_idr_lock); 576 btf_put(btf); 577 } 578 spin_unlock_bh(&btf_idr_lock); 579 return ret; 580 } 581 582 const struct btf_type *btf_type_skip_modifiers(const struct btf *btf, 583 u32 id, u32 *res_id) 584 { 585 const struct btf_type *t = btf_type_by_id(btf, id); 586 587 while (btf_type_is_modifier(t)) { 588 id = t->type; 589 t = btf_type_by_id(btf, t->type); 590 } 591 592 if (res_id) 593 *res_id = id; 594 595 return t; 596 } 597 598 const struct btf_type *btf_type_resolve_ptr(const struct btf *btf, 599 u32 id, u32 *res_id) 600 { 601 const struct btf_type *t; 602 603 t = btf_type_skip_modifiers(btf, id, NULL); 604 if (!btf_type_is_ptr(t)) 605 return NULL; 606 607 return btf_type_skip_modifiers(btf, t->type, res_id); 608 } 609 610 const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf, 611 u32 id, u32 *res_id) 612 { 613 const struct btf_type *ptype; 614 615 ptype = btf_type_resolve_ptr(btf, id, res_id); 616 if (ptype && btf_type_is_func_proto(ptype)) 617 return ptype; 618 619 return NULL; 620 } 621 622 /* Types that act only as a source, not sink or intermediate 623 * type when resolving. 624 */ 625 static bool btf_type_is_resolve_source_only(const struct btf_type *t) 626 { 627 return btf_type_is_var(t) || 628 btf_type_is_decl_tag(t) || 629 btf_type_is_datasec(t); 630 } 631 632 /* What types need to be resolved? 633 * 634 * btf_type_is_modifier() is an obvious one. 635 * 636 * btf_type_is_struct() because its member refers to 637 * another type (through member->type). 638 * 639 * btf_type_is_var() because the variable refers to 640 * another type. btf_type_is_datasec() holds multiple 641 * btf_type_is_var() types that need resolving. 642 * 643 * btf_type_is_array() because its element (array->type) 644 * refers to another type. Array can be thought of a 645 * special case of struct while array just has the same 646 * member-type repeated by array->nelems of times. 647 */ 648 static bool btf_type_needs_resolve(const struct btf_type *t) 649 { 650 return btf_type_is_modifier(t) || 651 btf_type_is_ptr(t) || 652 btf_type_is_struct(t) || 653 btf_type_is_array(t) || 654 btf_type_is_var(t) || 655 btf_type_is_func(t) || 656 btf_type_is_decl_tag(t) || 657 btf_type_is_datasec(t); 658 } 659 660 /* t->size can be used */ 661 static bool btf_type_has_size(const struct btf_type *t) 662 { 663 switch (BTF_INFO_KIND(t->info)) { 664 case BTF_KIND_INT: 665 case BTF_KIND_STRUCT: 666 case BTF_KIND_UNION: 667 case BTF_KIND_ENUM: 668 case BTF_KIND_DATASEC: 669 case BTF_KIND_FLOAT: 670 case BTF_KIND_ENUM64: 671 return true; 672 } 673 674 return false; 675 } 676 677 static const char *btf_int_encoding_str(u8 encoding) 678 { 679 if (encoding == 0) 680 return "(none)"; 681 else if (encoding == BTF_INT_SIGNED) 682 return "SIGNED"; 683 else if (encoding == BTF_INT_CHAR) 684 return "CHAR"; 685 else if (encoding == BTF_INT_BOOL) 686 return "BOOL"; 687 else 688 return "UNKN"; 689 } 690 691 static u32 btf_type_int(const struct btf_type *t) 692 { 693 return *(u32 *)(t + 1); 694 } 695 696 static const struct btf_array *btf_type_array(const struct btf_type *t) 697 { 698 return (const struct btf_array *)(t + 1); 699 } 700 701 static const struct btf_enum *btf_type_enum(const struct btf_type *t) 702 { 703 return (const struct btf_enum *)(t + 1); 704 } 705 706 static const struct btf_var *btf_type_var(const struct btf_type *t) 707 { 708 return (const struct btf_var *)(t + 1); 709 } 710 711 static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t) 712 { 713 return (const struct btf_decl_tag *)(t + 1); 714 } 715 716 static const struct btf_enum64 *btf_type_enum64(const struct btf_type *t) 717 { 718 return (const struct btf_enum64 *)(t + 1); 719 } 720 721 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) 722 { 723 return kind_ops[BTF_INFO_KIND(t->info)]; 724 } 725 726 static bool btf_name_offset_valid(const struct btf *btf, u32 offset) 727 { 728 if (!BTF_STR_OFFSET_VALID(offset)) 729 return false; 730 731 while (offset < btf->start_str_off) 732 btf = btf->base_btf; 733 734 offset -= btf->start_str_off; 735 return offset < btf->hdr.str_len; 736 } 737 738 static bool __btf_name_char_ok(char c, bool first, bool dot_ok) 739 { 740 if ((first ? !isalpha(c) : 741 !isalnum(c)) && 742 c != '_' && 743 ((c == '.' && !dot_ok) || 744 c != '.')) 745 return false; 746 return true; 747 } 748 749 static const char *btf_str_by_offset(const struct btf *btf, u32 offset) 750 { 751 while (offset < btf->start_str_off) 752 btf = btf->base_btf; 753 754 offset -= btf->start_str_off; 755 if (offset < btf->hdr.str_len) 756 return &btf->strings[offset]; 757 758 return NULL; 759 } 760 761 static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok) 762 { 763 /* offset must be valid */ 764 const char *src = btf_str_by_offset(btf, offset); 765 const char *src_limit; 766 767 if (!__btf_name_char_ok(*src, true, dot_ok)) 768 return false; 769 770 /* set a limit on identifier length */ 771 src_limit = src + KSYM_NAME_LEN; 772 src++; 773 while (*src && src < src_limit) { 774 if (!__btf_name_char_ok(*src, false, dot_ok)) 775 return false; 776 src++; 777 } 778 779 return !*src; 780 } 781 782 /* Only C-style identifier is permitted. This can be relaxed if 783 * necessary. 784 */ 785 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset) 786 { 787 return __btf_name_valid(btf, offset, false); 788 } 789 790 static bool btf_name_valid_section(const struct btf *btf, u32 offset) 791 { 792 return __btf_name_valid(btf, offset, true); 793 } 794 795 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset) 796 { 797 const char *name; 798 799 if (!offset) 800 return "(anon)"; 801 802 name = btf_str_by_offset(btf, offset); 803 return name ?: "(invalid-name-offset)"; 804 } 805 806 const char *btf_name_by_offset(const struct btf *btf, u32 offset) 807 { 808 return btf_str_by_offset(btf, offset); 809 } 810 811 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) 812 { 813 while (type_id < btf->start_id) 814 btf = btf->base_btf; 815 816 type_id -= btf->start_id; 817 if (type_id >= btf->nr_types) 818 return NULL; 819 return btf->types[type_id]; 820 } 821 EXPORT_SYMBOL_GPL(btf_type_by_id); 822 823 /* 824 * Regular int is not a bit field and it must be either 825 * u8/u16/u32/u64 or __int128. 826 */ 827 static bool btf_type_int_is_regular(const struct btf_type *t) 828 { 829 u8 nr_bits, nr_bytes; 830 u32 int_data; 831 832 int_data = btf_type_int(t); 833 nr_bits = BTF_INT_BITS(int_data); 834 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits); 835 if (BITS_PER_BYTE_MASKED(nr_bits) || 836 BTF_INT_OFFSET(int_data) || 837 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) && 838 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) && 839 nr_bytes != (2 * sizeof(u64)))) { 840 return false; 841 } 842 843 return true; 844 } 845 846 /* 847 * Check that given struct member is a regular int with expected 848 * offset and size. 849 */ 850 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, 851 const struct btf_member *m, 852 u32 expected_offset, u32 expected_size) 853 { 854 const struct btf_type *t; 855 u32 id, int_data; 856 u8 nr_bits; 857 858 id = m->type; 859 t = btf_type_id_size(btf, &id, NULL); 860 if (!t || !btf_type_is_int(t)) 861 return false; 862 863 int_data = btf_type_int(t); 864 nr_bits = BTF_INT_BITS(int_data); 865 if (btf_type_kflag(s)) { 866 u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset); 867 u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset); 868 869 /* if kflag set, int should be a regular int and 870 * bit offset should be at byte boundary. 871 */ 872 return !bitfield_size && 873 BITS_ROUNDUP_BYTES(bit_offset) == expected_offset && 874 BITS_ROUNDUP_BYTES(nr_bits) == expected_size; 875 } 876 877 if (BTF_INT_OFFSET(int_data) || 878 BITS_PER_BYTE_MASKED(m->offset) || 879 BITS_ROUNDUP_BYTES(m->offset) != expected_offset || 880 BITS_PER_BYTE_MASKED(nr_bits) || 881 BITS_ROUNDUP_BYTES(nr_bits) != expected_size) 882 return false; 883 884 return true; 885 } 886 887 /* Similar to btf_type_skip_modifiers() but does not skip typedefs. */ 888 static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf, 889 u32 id) 890 { 891 const struct btf_type *t = btf_type_by_id(btf, id); 892 893 while (btf_type_is_modifier(t) && 894 BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) { 895 t = btf_type_by_id(btf, t->type); 896 } 897 898 return t; 899 } 900 901 #define BTF_SHOW_MAX_ITER 10 902 903 #define BTF_KIND_BIT(kind) (1ULL << kind) 904 905 /* 906 * Populate show->state.name with type name information. 907 * Format of type name is 908 * 909 * [.member_name = ] (type_name) 910 */ 911 static const char *btf_show_name(struct btf_show *show) 912 { 913 /* BTF_MAX_ITER array suffixes "[]" */ 914 const char *array_suffixes = "[][][][][][][][][][]"; 915 const char *array_suffix = &array_suffixes[strlen(array_suffixes)]; 916 /* BTF_MAX_ITER pointer suffixes "*" */ 917 const char *ptr_suffixes = "**********"; 918 const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)]; 919 const char *name = NULL, *prefix = "", *parens = ""; 920 const struct btf_member *m = show->state.member; 921 const struct btf_type *t; 922 const struct btf_array *array; 923 u32 id = show->state.type_id; 924 const char *member = NULL; 925 bool show_member = false; 926 u64 kinds = 0; 927 int i; 928 929 show->state.name[0] = '\0'; 930 931 /* 932 * Don't show type name if we're showing an array member; 933 * in that case we show the array type so don't need to repeat 934 * ourselves for each member. 935 */ 936 if (show->state.array_member) 937 return ""; 938 939 /* Retrieve member name, if any. */ 940 if (m) { 941 member = btf_name_by_offset(show->btf, m->name_off); 942 show_member = strlen(member) > 0; 943 id = m->type; 944 } 945 946 /* 947 * Start with type_id, as we have resolved the struct btf_type * 948 * via btf_modifier_show() past the parent typedef to the child 949 * struct, int etc it is defined as. In such cases, the type_id 950 * still represents the starting type while the struct btf_type * 951 * in our show->state points at the resolved type of the typedef. 952 */ 953 t = btf_type_by_id(show->btf, id); 954 if (!t) 955 return ""; 956 957 /* 958 * The goal here is to build up the right number of pointer and 959 * array suffixes while ensuring the type name for a typedef 960 * is represented. Along the way we accumulate a list of 961 * BTF kinds we have encountered, since these will inform later 962 * display; for example, pointer types will not require an 963 * opening "{" for struct, we will just display the pointer value. 964 * 965 * We also want to accumulate the right number of pointer or array 966 * indices in the format string while iterating until we get to 967 * the typedef/pointee/array member target type. 968 * 969 * We start by pointing at the end of pointer and array suffix 970 * strings; as we accumulate pointers and arrays we move the pointer 971 * or array string backwards so it will show the expected number of 972 * '*' or '[]' for the type. BTF_SHOW_MAX_ITER of nesting of pointers 973 * and/or arrays and typedefs are supported as a precaution. 974 * 975 * We also want to get typedef name while proceeding to resolve 976 * type it points to so that we can add parentheses if it is a 977 * "typedef struct" etc. 978 */ 979 for (i = 0; i < BTF_SHOW_MAX_ITER; i++) { 980 981 switch (BTF_INFO_KIND(t->info)) { 982 case BTF_KIND_TYPEDEF: 983 if (!name) 984 name = btf_name_by_offset(show->btf, 985 t->name_off); 986 kinds |= BTF_KIND_BIT(BTF_KIND_TYPEDEF); 987 id = t->type; 988 break; 989 case BTF_KIND_ARRAY: 990 kinds |= BTF_KIND_BIT(BTF_KIND_ARRAY); 991 parens = "["; 992 if (!t) 993 return ""; 994 array = btf_type_array(t); 995 if (array_suffix > array_suffixes) 996 array_suffix -= 2; 997 id = array->type; 998 break; 999 case BTF_KIND_PTR: 1000 kinds |= BTF_KIND_BIT(BTF_KIND_PTR); 1001 if (ptr_suffix > ptr_suffixes) 1002 ptr_suffix -= 1; 1003 id = t->type; 1004 break; 1005 default: 1006 id = 0; 1007 break; 1008 } 1009 if (!id) 1010 break; 1011 t = btf_type_skip_qualifiers(show->btf, id); 1012 } 1013 /* We may not be able to represent this type; bail to be safe */ 1014 if (i == BTF_SHOW_MAX_ITER) 1015 return ""; 1016 1017 if (!name) 1018 name = btf_name_by_offset(show->btf, t->name_off); 1019 1020 switch (BTF_INFO_KIND(t->info)) { 1021 case BTF_KIND_STRUCT: 1022 case BTF_KIND_UNION: 1023 prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ? 1024 "struct" : "union"; 1025 /* if it's an array of struct/union, parens is already set */ 1026 if (!(kinds & (BTF_KIND_BIT(BTF_KIND_ARRAY)))) 1027 parens = "{"; 1028 break; 1029 case BTF_KIND_ENUM: 1030 case BTF_KIND_ENUM64: 1031 prefix = "enum"; 1032 break; 1033 default: 1034 break; 1035 } 1036 1037 /* pointer does not require parens */ 1038 if (kinds & BTF_KIND_BIT(BTF_KIND_PTR)) 1039 parens = ""; 1040 /* typedef does not require struct/union/enum prefix */ 1041 if (kinds & BTF_KIND_BIT(BTF_KIND_TYPEDEF)) 1042 prefix = ""; 1043 1044 if (!name) 1045 name = ""; 1046 1047 /* Even if we don't want type name info, we want parentheses etc */ 1048 if (show->flags & BTF_SHOW_NONAME) 1049 snprintf(show->state.name, sizeof(show->state.name), "%s", 1050 parens); 1051 else 1052 snprintf(show->state.name, sizeof(show->state.name), 1053 "%s%s%s(%s%s%s%s%s%s)%s", 1054 /* first 3 strings comprise ".member = " */ 1055 show_member ? "." : "", 1056 show_member ? member : "", 1057 show_member ? " = " : "", 1058 /* ...next is our prefix (struct, enum, etc) */ 1059 prefix, 1060 strlen(prefix) > 0 && strlen(name) > 0 ? " " : "", 1061 /* ...this is the type name itself */ 1062 name, 1063 /* ...suffixed by the appropriate '*', '[]' suffixes */ 1064 strlen(ptr_suffix) > 0 ? " " : "", ptr_suffix, 1065 array_suffix, parens); 1066 1067 return show->state.name; 1068 } 1069 1070 static const char *__btf_show_indent(struct btf_show *show) 1071 { 1072 const char *indents = " "; 1073 const char *indent = &indents[strlen(indents)]; 1074 1075 if ((indent - show->state.depth) >= indents) 1076 return indent - show->state.depth; 1077 return indents; 1078 } 1079 1080 static const char *btf_show_indent(struct btf_show *show) 1081 { 1082 return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show); 1083 } 1084 1085 static const char *btf_show_newline(struct btf_show *show) 1086 { 1087 return show->flags & BTF_SHOW_COMPACT ? "" : "\n"; 1088 } 1089 1090 static const char *btf_show_delim(struct btf_show *show) 1091 { 1092 if (show->state.depth == 0) 1093 return ""; 1094 1095 if ((show->flags & BTF_SHOW_COMPACT) && show->state.type && 1096 BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION) 1097 return "|"; 1098 1099 return ","; 1100 } 1101 1102 __printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...) 1103 { 1104 va_list args; 1105 1106 if (!show->state.depth_check) { 1107 va_start(args, fmt); 1108 show->showfn(show, fmt, args); 1109 va_end(args); 1110 } 1111 } 1112 1113 /* Macros are used here as btf_show_type_value[s]() prepends and appends 1114 * format specifiers to the format specifier passed in; these do the work of 1115 * adding indentation, delimiters etc while the caller simply has to specify 1116 * the type value(s) in the format specifier + value(s). 1117 */ 1118 #define btf_show_type_value(show, fmt, value) \ 1119 do { \ 1120 if ((value) != (__typeof__(value))0 || \ 1121 (show->flags & BTF_SHOW_ZERO) || \ 1122 show->state.depth == 0) { \ 1123 btf_show(show, "%s%s" fmt "%s%s", \ 1124 btf_show_indent(show), \ 1125 btf_show_name(show), \ 1126 value, btf_show_delim(show), \ 1127 btf_show_newline(show)); \ 1128 if (show->state.depth > show->state.depth_to_show) \ 1129 show->state.depth_to_show = show->state.depth; \ 1130 } \ 1131 } while (0) 1132 1133 #define btf_show_type_values(show, fmt, ...) \ 1134 do { \ 1135 btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show), \ 1136 btf_show_name(show), \ 1137 __VA_ARGS__, btf_show_delim(show), \ 1138 btf_show_newline(show)); \ 1139 if (show->state.depth > show->state.depth_to_show) \ 1140 show->state.depth_to_show = show->state.depth; \ 1141 } while (0) 1142 1143 /* How much is left to copy to safe buffer after @data? */ 1144 static int btf_show_obj_size_left(struct btf_show *show, void *data) 1145 { 1146 return show->obj.head + show->obj.size - data; 1147 } 1148 1149 /* Is object pointed to by @data of @size already copied to our safe buffer? */ 1150 static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size) 1151 { 1152 return data >= show->obj.data && 1153 (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE); 1154 } 1155 1156 /* 1157 * If object pointed to by @data of @size falls within our safe buffer, return 1158 * the equivalent pointer to the same safe data. Assumes 1159 * copy_from_kernel_nofault() has already happened and our safe buffer is 1160 * populated. 1161 */ 1162 static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size) 1163 { 1164 if (btf_show_obj_is_safe(show, data, size)) 1165 return show->obj.safe + (data - show->obj.data); 1166 return NULL; 1167 } 1168 1169 /* 1170 * Return a safe-to-access version of data pointed to by @data. 1171 * We do this by copying the relevant amount of information 1172 * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault(). 1173 * 1174 * If BTF_SHOW_UNSAFE is specified, just return data as-is; no 1175 * safe copy is needed. 1176 * 1177 * Otherwise we need to determine if we have the required amount 1178 * of data (determined by the @data pointer and the size of the 1179 * largest base type we can encounter (represented by 1180 * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures 1181 * that we will be able to print some of the current object, 1182 * and if more is needed a copy will be triggered. 1183 * Some objects such as structs will not fit into the buffer; 1184 * in such cases additional copies when we iterate over their 1185 * members may be needed. 1186 * 1187 * btf_show_obj_safe() is used to return a safe buffer for 1188 * btf_show_start_type(); this ensures that as we recurse into 1189 * nested types we always have safe data for the given type. 1190 * This approach is somewhat wasteful; it's possible for example 1191 * that when iterating over a large union we'll end up copying the 1192 * same data repeatedly, but the goal is safety not performance. 1193 * We use stack data as opposed to per-CPU buffers because the 1194 * iteration over a type can take some time, and preemption handling 1195 * would greatly complicate use of the safe buffer. 1196 */ 1197 static void *btf_show_obj_safe(struct btf_show *show, 1198 const struct btf_type *t, 1199 void *data) 1200 { 1201 const struct btf_type *rt; 1202 int size_left, size; 1203 void *safe = NULL; 1204 1205 if (show->flags & BTF_SHOW_UNSAFE) 1206 return data; 1207 1208 rt = btf_resolve_size(show->btf, t, &size); 1209 if (IS_ERR(rt)) { 1210 show->state.status = PTR_ERR(rt); 1211 return NULL; 1212 } 1213 1214 /* 1215 * Is this toplevel object? If so, set total object size and 1216 * initialize pointers. Otherwise check if we still fall within 1217 * our safe object data. 1218 */ 1219 if (show->state.depth == 0) { 1220 show->obj.size = size; 1221 show->obj.head = data; 1222 } else { 1223 /* 1224 * If the size of the current object is > our remaining 1225 * safe buffer we _may_ need to do a new copy. However 1226 * consider the case of a nested struct; it's size pushes 1227 * us over the safe buffer limit, but showing any individual 1228 * struct members does not. In such cases, we don't need 1229 * to initiate a fresh copy yet; however we definitely need 1230 * at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left 1231 * in our buffer, regardless of the current object size. 1232 * The logic here is that as we resolve types we will 1233 * hit a base type at some point, and we need to be sure 1234 * the next chunk of data is safely available to display 1235 * that type info safely. We cannot rely on the size of 1236 * the current object here because it may be much larger 1237 * than our current buffer (e.g. task_struct is 8k). 1238 * All we want to do here is ensure that we can print the 1239 * next basic type, which we can if either 1240 * - the current type size is within the safe buffer; or 1241 * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in 1242 * the safe buffer. 1243 */ 1244 safe = __btf_show_obj_safe(show, data, 1245 min(size, 1246 BTF_SHOW_OBJ_BASE_TYPE_SIZE)); 1247 } 1248 1249 /* 1250 * We need a new copy to our safe object, either because we haven't 1251 * yet copied and are initializing safe data, or because the data 1252 * we want falls outside the boundaries of the safe object. 1253 */ 1254 if (!safe) { 1255 size_left = btf_show_obj_size_left(show, data); 1256 if (size_left > BTF_SHOW_OBJ_SAFE_SIZE) 1257 size_left = BTF_SHOW_OBJ_SAFE_SIZE; 1258 show->state.status = copy_from_kernel_nofault(show->obj.safe, 1259 data, size_left); 1260 if (!show->state.status) { 1261 show->obj.data = data; 1262 safe = show->obj.safe; 1263 } 1264 } 1265 1266 return safe; 1267 } 1268 1269 /* 1270 * Set the type we are starting to show and return a safe data pointer 1271 * to be used for showing the associated data. 1272 */ 1273 static void *btf_show_start_type(struct btf_show *show, 1274 const struct btf_type *t, 1275 u32 type_id, void *data) 1276 { 1277 show->state.type = t; 1278 show->state.type_id = type_id; 1279 show->state.name[0] = '\0'; 1280 1281 return btf_show_obj_safe(show, t, data); 1282 } 1283 1284 static void btf_show_end_type(struct btf_show *show) 1285 { 1286 show->state.type = NULL; 1287 show->state.type_id = 0; 1288 show->state.name[0] = '\0'; 1289 } 1290 1291 static void *btf_show_start_aggr_type(struct btf_show *show, 1292 const struct btf_type *t, 1293 u32 type_id, void *data) 1294 { 1295 void *safe_data = btf_show_start_type(show, t, type_id, data); 1296 1297 if (!safe_data) 1298 return safe_data; 1299 1300 btf_show(show, "%s%s%s", btf_show_indent(show), 1301 btf_show_name(show), 1302 btf_show_newline(show)); 1303 show->state.depth++; 1304 return safe_data; 1305 } 1306 1307 static void btf_show_end_aggr_type(struct btf_show *show, 1308 const char *suffix) 1309 { 1310 show->state.depth--; 1311 btf_show(show, "%s%s%s%s", btf_show_indent(show), suffix, 1312 btf_show_delim(show), btf_show_newline(show)); 1313 btf_show_end_type(show); 1314 } 1315 1316 static void btf_show_start_member(struct btf_show *show, 1317 const struct btf_member *m) 1318 { 1319 show->state.member = m; 1320 } 1321 1322 static void btf_show_start_array_member(struct btf_show *show) 1323 { 1324 show->state.array_member = 1; 1325 btf_show_start_member(show, NULL); 1326 } 1327 1328 static void btf_show_end_member(struct btf_show *show) 1329 { 1330 show->state.member = NULL; 1331 } 1332 1333 static void btf_show_end_array_member(struct btf_show *show) 1334 { 1335 show->state.array_member = 0; 1336 btf_show_end_member(show); 1337 } 1338 1339 static void *btf_show_start_array_type(struct btf_show *show, 1340 const struct btf_type *t, 1341 u32 type_id, 1342 u16 array_encoding, 1343 void *data) 1344 { 1345 show->state.array_encoding = array_encoding; 1346 show->state.array_terminated = 0; 1347 return btf_show_start_aggr_type(show, t, type_id, data); 1348 } 1349 1350 static void btf_show_end_array_type(struct btf_show *show) 1351 { 1352 show->state.array_encoding = 0; 1353 show->state.array_terminated = 0; 1354 btf_show_end_aggr_type(show, "]"); 1355 } 1356 1357 static void *btf_show_start_struct_type(struct btf_show *show, 1358 const struct btf_type *t, 1359 u32 type_id, 1360 void *data) 1361 { 1362 return btf_show_start_aggr_type(show, t, type_id, data); 1363 } 1364 1365 static void btf_show_end_struct_type(struct btf_show *show) 1366 { 1367 btf_show_end_aggr_type(show, "}"); 1368 } 1369 1370 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log, 1371 const char *fmt, ...) 1372 { 1373 va_list args; 1374 1375 va_start(args, fmt); 1376 bpf_verifier_vlog(log, fmt, args); 1377 va_end(args); 1378 } 1379 1380 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env, 1381 const char *fmt, ...) 1382 { 1383 struct bpf_verifier_log *log = &env->log; 1384 va_list args; 1385 1386 if (!bpf_verifier_log_needed(log)) 1387 return; 1388 1389 va_start(args, fmt); 1390 bpf_verifier_vlog(log, fmt, args); 1391 va_end(args); 1392 } 1393 1394 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env, 1395 const struct btf_type *t, 1396 bool log_details, 1397 const char *fmt, ...) 1398 { 1399 struct bpf_verifier_log *log = &env->log; 1400 struct btf *btf = env->btf; 1401 va_list args; 1402 1403 if (!bpf_verifier_log_needed(log)) 1404 return; 1405 1406 /* btf verifier prints all types it is processing via 1407 * btf_verifier_log_type(..., fmt = NULL). 1408 * Skip those prints for in-kernel BTF verification. 1409 */ 1410 if (log->level == BPF_LOG_KERNEL && !fmt) 1411 return; 1412 1413 __btf_verifier_log(log, "[%u] %s %s%s", 1414 env->log_type_id, 1415 btf_type_str(t), 1416 __btf_name_by_offset(btf, t->name_off), 1417 log_details ? " " : ""); 1418 1419 if (log_details) 1420 btf_type_ops(t)->log_details(env, t); 1421 1422 if (fmt && *fmt) { 1423 __btf_verifier_log(log, " "); 1424 va_start(args, fmt); 1425 bpf_verifier_vlog(log, fmt, args); 1426 va_end(args); 1427 } 1428 1429 __btf_verifier_log(log, "\n"); 1430 } 1431 1432 #define btf_verifier_log_type(env, t, ...) \ 1433 __btf_verifier_log_type((env), (t), true, __VA_ARGS__) 1434 #define btf_verifier_log_basic(env, t, ...) \ 1435 __btf_verifier_log_type((env), (t), false, __VA_ARGS__) 1436 1437 __printf(4, 5) 1438 static void btf_verifier_log_member(struct btf_verifier_env *env, 1439 const struct btf_type *struct_type, 1440 const struct btf_member *member, 1441 const char *fmt, ...) 1442 { 1443 struct bpf_verifier_log *log = &env->log; 1444 struct btf *btf = env->btf; 1445 va_list args; 1446 1447 if (!bpf_verifier_log_needed(log)) 1448 return; 1449 1450 if (log->level == BPF_LOG_KERNEL && !fmt) 1451 return; 1452 /* The CHECK_META phase already did a btf dump. 1453 * 1454 * If member is logged again, it must hit an error in 1455 * parsing this member. It is useful to print out which 1456 * struct this member belongs to. 1457 */ 1458 if (env->phase != CHECK_META) 1459 btf_verifier_log_type(env, struct_type, NULL); 1460 1461 if (btf_type_kflag(struct_type)) 1462 __btf_verifier_log(log, 1463 "\t%s type_id=%u bitfield_size=%u bits_offset=%u", 1464 __btf_name_by_offset(btf, member->name_off), 1465 member->type, 1466 BTF_MEMBER_BITFIELD_SIZE(member->offset), 1467 BTF_MEMBER_BIT_OFFSET(member->offset)); 1468 else 1469 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u", 1470 __btf_name_by_offset(btf, member->name_off), 1471 member->type, member->offset); 1472 1473 if (fmt && *fmt) { 1474 __btf_verifier_log(log, " "); 1475 va_start(args, fmt); 1476 bpf_verifier_vlog(log, fmt, args); 1477 va_end(args); 1478 } 1479 1480 __btf_verifier_log(log, "\n"); 1481 } 1482 1483 __printf(4, 5) 1484 static void btf_verifier_log_vsi(struct btf_verifier_env *env, 1485 const struct btf_type *datasec_type, 1486 const struct btf_var_secinfo *vsi, 1487 const char *fmt, ...) 1488 { 1489 struct bpf_verifier_log *log = &env->log; 1490 va_list args; 1491 1492 if (!bpf_verifier_log_needed(log)) 1493 return; 1494 if (log->level == BPF_LOG_KERNEL && !fmt) 1495 return; 1496 if (env->phase != CHECK_META) 1497 btf_verifier_log_type(env, datasec_type, NULL); 1498 1499 __btf_verifier_log(log, "\t type_id=%u offset=%u size=%u", 1500 vsi->type, vsi->offset, vsi->size); 1501 if (fmt && *fmt) { 1502 __btf_verifier_log(log, " "); 1503 va_start(args, fmt); 1504 bpf_verifier_vlog(log, fmt, args); 1505 va_end(args); 1506 } 1507 1508 __btf_verifier_log(log, "\n"); 1509 } 1510 1511 static void btf_verifier_log_hdr(struct btf_verifier_env *env, 1512 u32 btf_data_size) 1513 { 1514 struct bpf_verifier_log *log = &env->log; 1515 const struct btf *btf = env->btf; 1516 const struct btf_header *hdr; 1517 1518 if (!bpf_verifier_log_needed(log)) 1519 return; 1520 1521 if (log->level == BPF_LOG_KERNEL) 1522 return; 1523 hdr = &btf->hdr; 1524 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic); 1525 __btf_verifier_log(log, "version: %u\n", hdr->version); 1526 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags); 1527 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len); 1528 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off); 1529 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len); 1530 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off); 1531 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len); 1532 __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size); 1533 } 1534 1535 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t) 1536 { 1537 struct btf *btf = env->btf; 1538 1539 if (btf->types_size == btf->nr_types) { 1540 /* Expand 'types' array */ 1541 1542 struct btf_type **new_types; 1543 u32 expand_by, new_size; 1544 1545 if (btf->start_id + btf->types_size == BTF_MAX_TYPE) { 1546 btf_verifier_log(env, "Exceeded max num of types"); 1547 return -E2BIG; 1548 } 1549 1550 expand_by = max_t(u32, btf->types_size >> 2, 16); 1551 new_size = min_t(u32, BTF_MAX_TYPE, 1552 btf->types_size + expand_by); 1553 1554 new_types = kvcalloc(new_size, sizeof(*new_types), 1555 GFP_KERNEL | __GFP_NOWARN); 1556 if (!new_types) 1557 return -ENOMEM; 1558 1559 if (btf->nr_types == 0) { 1560 if (!btf->base_btf) { 1561 /* lazily init VOID type */ 1562 new_types[0] = &btf_void; 1563 btf->nr_types++; 1564 } 1565 } else { 1566 memcpy(new_types, btf->types, 1567 sizeof(*btf->types) * btf->nr_types); 1568 } 1569 1570 kvfree(btf->types); 1571 btf->types = new_types; 1572 btf->types_size = new_size; 1573 } 1574 1575 btf->types[btf->nr_types++] = t; 1576 1577 return 0; 1578 } 1579 1580 static int btf_alloc_id(struct btf *btf) 1581 { 1582 int id; 1583 1584 idr_preload(GFP_KERNEL); 1585 spin_lock_bh(&btf_idr_lock); 1586 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC); 1587 if (id > 0) 1588 btf->id = id; 1589 spin_unlock_bh(&btf_idr_lock); 1590 idr_preload_end(); 1591 1592 if (WARN_ON_ONCE(!id)) 1593 return -ENOSPC; 1594 1595 return id > 0 ? 0 : id; 1596 } 1597 1598 static void btf_free_id(struct btf *btf) 1599 { 1600 unsigned long flags; 1601 1602 /* 1603 * In map-in-map, calling map_delete_elem() on outer 1604 * map will call bpf_map_put on the inner map. 1605 * It will then eventually call btf_free_id() 1606 * on the inner map. Some of the map_delete_elem() 1607 * implementation may have irq disabled, so 1608 * we need to use the _irqsave() version instead 1609 * of the _bh() version. 1610 */ 1611 spin_lock_irqsave(&btf_idr_lock, flags); 1612 idr_remove(&btf_idr, btf->id); 1613 spin_unlock_irqrestore(&btf_idr_lock, flags); 1614 } 1615 1616 static void btf_free_kfunc_set_tab(struct btf *btf) 1617 { 1618 struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab; 1619 int hook; 1620 1621 if (!tab) 1622 return; 1623 /* For module BTF, we directly assign the sets being registered, so 1624 * there is nothing to free except kfunc_set_tab. 1625 */ 1626 if (btf_is_module(btf)) 1627 goto free_tab; 1628 for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++) 1629 kfree(tab->sets[hook]); 1630 free_tab: 1631 kfree(tab); 1632 btf->kfunc_set_tab = NULL; 1633 } 1634 1635 static void btf_free_dtor_kfunc_tab(struct btf *btf) 1636 { 1637 struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab; 1638 1639 if (!tab) 1640 return; 1641 kfree(tab); 1642 btf->dtor_kfunc_tab = NULL; 1643 } 1644 1645 static void btf_free(struct btf *btf) 1646 { 1647 btf_free_dtor_kfunc_tab(btf); 1648 btf_free_kfunc_set_tab(btf); 1649 kvfree(btf->types); 1650 kvfree(btf->resolved_sizes); 1651 kvfree(btf->resolved_ids); 1652 kvfree(btf->data); 1653 kfree(btf); 1654 } 1655 1656 static void btf_free_rcu(struct rcu_head *rcu) 1657 { 1658 struct btf *btf = container_of(rcu, struct btf, rcu); 1659 1660 btf_free(btf); 1661 } 1662 1663 void btf_get(struct btf *btf) 1664 { 1665 refcount_inc(&btf->refcnt); 1666 } 1667 1668 void btf_put(struct btf *btf) 1669 { 1670 if (btf && refcount_dec_and_test(&btf->refcnt)) { 1671 btf_free_id(btf); 1672 call_rcu(&btf->rcu, btf_free_rcu); 1673 } 1674 } 1675 1676 static int env_resolve_init(struct btf_verifier_env *env) 1677 { 1678 struct btf *btf = env->btf; 1679 u32 nr_types = btf->nr_types; 1680 u32 *resolved_sizes = NULL; 1681 u32 *resolved_ids = NULL; 1682 u8 *visit_states = NULL; 1683 1684 resolved_sizes = kvcalloc(nr_types, sizeof(*resolved_sizes), 1685 GFP_KERNEL | __GFP_NOWARN); 1686 if (!resolved_sizes) 1687 goto nomem; 1688 1689 resolved_ids = kvcalloc(nr_types, sizeof(*resolved_ids), 1690 GFP_KERNEL | __GFP_NOWARN); 1691 if (!resolved_ids) 1692 goto nomem; 1693 1694 visit_states = kvcalloc(nr_types, sizeof(*visit_states), 1695 GFP_KERNEL | __GFP_NOWARN); 1696 if (!visit_states) 1697 goto nomem; 1698 1699 btf->resolved_sizes = resolved_sizes; 1700 btf->resolved_ids = resolved_ids; 1701 env->visit_states = visit_states; 1702 1703 return 0; 1704 1705 nomem: 1706 kvfree(resolved_sizes); 1707 kvfree(resolved_ids); 1708 kvfree(visit_states); 1709 return -ENOMEM; 1710 } 1711 1712 static void btf_verifier_env_free(struct btf_verifier_env *env) 1713 { 1714 kvfree(env->visit_states); 1715 kfree(env); 1716 } 1717 1718 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env, 1719 const struct btf_type *next_type) 1720 { 1721 switch (env->resolve_mode) { 1722 case RESOLVE_TBD: 1723 /* int, enum or void is a sink */ 1724 return !btf_type_needs_resolve(next_type); 1725 case RESOLVE_PTR: 1726 /* int, enum, void, struct, array, func or func_proto is a sink 1727 * for ptr 1728 */ 1729 return !btf_type_is_modifier(next_type) && 1730 !btf_type_is_ptr(next_type); 1731 case RESOLVE_STRUCT_OR_ARRAY: 1732 /* int, enum, void, ptr, func or func_proto is a sink 1733 * for struct and array 1734 */ 1735 return !btf_type_is_modifier(next_type) && 1736 !btf_type_is_array(next_type) && 1737 !btf_type_is_struct(next_type); 1738 default: 1739 BUG(); 1740 } 1741 } 1742 1743 static bool env_type_is_resolved(const struct btf_verifier_env *env, 1744 u32 type_id) 1745 { 1746 /* base BTF types should be resolved by now */ 1747 if (type_id < env->btf->start_id) 1748 return true; 1749 1750 return env->visit_states[type_id - env->btf->start_id] == RESOLVED; 1751 } 1752 1753 static int env_stack_push(struct btf_verifier_env *env, 1754 const struct btf_type *t, u32 type_id) 1755 { 1756 const struct btf *btf = env->btf; 1757 struct resolve_vertex *v; 1758 1759 if (env->top_stack == MAX_RESOLVE_DEPTH) 1760 return -E2BIG; 1761 1762 if (type_id < btf->start_id 1763 || env->visit_states[type_id - btf->start_id] != NOT_VISITED) 1764 return -EEXIST; 1765 1766 env->visit_states[type_id - btf->start_id] = VISITED; 1767 1768 v = &env->stack[env->top_stack++]; 1769 v->t = t; 1770 v->type_id = type_id; 1771 v->next_member = 0; 1772 1773 if (env->resolve_mode == RESOLVE_TBD) { 1774 if (btf_type_is_ptr(t)) 1775 env->resolve_mode = RESOLVE_PTR; 1776 else if (btf_type_is_struct(t) || btf_type_is_array(t)) 1777 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY; 1778 } 1779 1780 return 0; 1781 } 1782 1783 static void env_stack_set_next_member(struct btf_verifier_env *env, 1784 u16 next_member) 1785 { 1786 env->stack[env->top_stack - 1].next_member = next_member; 1787 } 1788 1789 static void env_stack_pop_resolved(struct btf_verifier_env *env, 1790 u32 resolved_type_id, 1791 u32 resolved_size) 1792 { 1793 u32 type_id = env->stack[--(env->top_stack)].type_id; 1794 struct btf *btf = env->btf; 1795 1796 type_id -= btf->start_id; /* adjust to local type id */ 1797 btf->resolved_sizes[type_id] = resolved_size; 1798 btf->resolved_ids[type_id] = resolved_type_id; 1799 env->visit_states[type_id] = RESOLVED; 1800 } 1801 1802 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env) 1803 { 1804 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL; 1805 } 1806 1807 /* Resolve the size of a passed-in "type" 1808 * 1809 * type: is an array (e.g. u32 array[x][y]) 1810 * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY, 1811 * *type_size: (x * y * sizeof(u32)). Hence, *type_size always 1812 * corresponds to the return type. 1813 * *elem_type: u32 1814 * *elem_id: id of u32 1815 * *total_nelems: (x * y). Hence, individual elem size is 1816 * (*type_size / *total_nelems) 1817 * *type_id: id of type if it's changed within the function, 0 if not 1818 * 1819 * type: is not an array (e.g. const struct X) 1820 * return type: type "struct X" 1821 * *type_size: sizeof(struct X) 1822 * *elem_type: same as return type ("struct X") 1823 * *elem_id: 0 1824 * *total_nelems: 1 1825 * *type_id: id of type if it's changed within the function, 0 if not 1826 */ 1827 static const struct btf_type * 1828 __btf_resolve_size(const struct btf *btf, const struct btf_type *type, 1829 u32 *type_size, const struct btf_type **elem_type, 1830 u32 *elem_id, u32 *total_nelems, u32 *type_id) 1831 { 1832 const struct btf_type *array_type = NULL; 1833 const struct btf_array *array = NULL; 1834 u32 i, size, nelems = 1, id = 0; 1835 1836 for (i = 0; i < MAX_RESOLVE_DEPTH; i++) { 1837 switch (BTF_INFO_KIND(type->info)) { 1838 /* type->size can be used */ 1839 case BTF_KIND_INT: 1840 case BTF_KIND_STRUCT: 1841 case BTF_KIND_UNION: 1842 case BTF_KIND_ENUM: 1843 case BTF_KIND_FLOAT: 1844 case BTF_KIND_ENUM64: 1845 size = type->size; 1846 goto resolved; 1847 1848 case BTF_KIND_PTR: 1849 size = sizeof(void *); 1850 goto resolved; 1851 1852 /* Modifiers */ 1853 case BTF_KIND_TYPEDEF: 1854 case BTF_KIND_VOLATILE: 1855 case BTF_KIND_CONST: 1856 case BTF_KIND_RESTRICT: 1857 case BTF_KIND_TYPE_TAG: 1858 id = type->type; 1859 type = btf_type_by_id(btf, type->type); 1860 break; 1861 1862 case BTF_KIND_ARRAY: 1863 if (!array_type) 1864 array_type = type; 1865 array = btf_type_array(type); 1866 if (nelems && array->nelems > U32_MAX / nelems) 1867 return ERR_PTR(-EINVAL); 1868 nelems *= array->nelems; 1869 type = btf_type_by_id(btf, array->type); 1870 break; 1871 1872 /* type without size */ 1873 default: 1874 return ERR_PTR(-EINVAL); 1875 } 1876 } 1877 1878 return ERR_PTR(-EINVAL); 1879 1880 resolved: 1881 if (nelems && size > U32_MAX / nelems) 1882 return ERR_PTR(-EINVAL); 1883 1884 *type_size = nelems * size; 1885 if (total_nelems) 1886 *total_nelems = nelems; 1887 if (elem_type) 1888 *elem_type = type; 1889 if (elem_id) 1890 *elem_id = array ? array->type : 0; 1891 if (type_id && id) 1892 *type_id = id; 1893 1894 return array_type ? : type; 1895 } 1896 1897 const struct btf_type * 1898 btf_resolve_size(const struct btf *btf, const struct btf_type *type, 1899 u32 *type_size) 1900 { 1901 return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL); 1902 } 1903 1904 static u32 btf_resolved_type_id(const struct btf *btf, u32 type_id) 1905 { 1906 while (type_id < btf->start_id) 1907 btf = btf->base_btf; 1908 1909 return btf->resolved_ids[type_id - btf->start_id]; 1910 } 1911 1912 /* The input param "type_id" must point to a needs_resolve type */ 1913 static const struct btf_type *btf_type_id_resolve(const struct btf *btf, 1914 u32 *type_id) 1915 { 1916 *type_id = btf_resolved_type_id(btf, *type_id); 1917 return btf_type_by_id(btf, *type_id); 1918 } 1919 1920 static u32 btf_resolved_type_size(const struct btf *btf, u32 type_id) 1921 { 1922 while (type_id < btf->start_id) 1923 btf = btf->base_btf; 1924 1925 return btf->resolved_sizes[type_id - btf->start_id]; 1926 } 1927 1928 const struct btf_type *btf_type_id_size(const struct btf *btf, 1929 u32 *type_id, u32 *ret_size) 1930 { 1931 const struct btf_type *size_type; 1932 u32 size_type_id = *type_id; 1933 u32 size = 0; 1934 1935 size_type = btf_type_by_id(btf, size_type_id); 1936 if (btf_type_nosize_or_null(size_type)) 1937 return NULL; 1938 1939 if (btf_type_has_size(size_type)) { 1940 size = size_type->size; 1941 } else if (btf_type_is_array(size_type)) { 1942 size = btf_resolved_type_size(btf, size_type_id); 1943 } else if (btf_type_is_ptr(size_type)) { 1944 size = sizeof(void *); 1945 } else { 1946 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) && 1947 !btf_type_is_var(size_type))) 1948 return NULL; 1949 1950 size_type_id = btf_resolved_type_id(btf, size_type_id); 1951 size_type = btf_type_by_id(btf, size_type_id); 1952 if (btf_type_nosize_or_null(size_type)) 1953 return NULL; 1954 else if (btf_type_has_size(size_type)) 1955 size = size_type->size; 1956 else if (btf_type_is_array(size_type)) 1957 size = btf_resolved_type_size(btf, size_type_id); 1958 else if (btf_type_is_ptr(size_type)) 1959 size = sizeof(void *); 1960 else 1961 return NULL; 1962 } 1963 1964 *type_id = size_type_id; 1965 if (ret_size) 1966 *ret_size = size; 1967 1968 return size_type; 1969 } 1970 1971 static int btf_df_check_member(struct btf_verifier_env *env, 1972 const struct btf_type *struct_type, 1973 const struct btf_member *member, 1974 const struct btf_type *member_type) 1975 { 1976 btf_verifier_log_basic(env, struct_type, 1977 "Unsupported check_member"); 1978 return -EINVAL; 1979 } 1980 1981 static int btf_df_check_kflag_member(struct btf_verifier_env *env, 1982 const struct btf_type *struct_type, 1983 const struct btf_member *member, 1984 const struct btf_type *member_type) 1985 { 1986 btf_verifier_log_basic(env, struct_type, 1987 "Unsupported check_kflag_member"); 1988 return -EINVAL; 1989 } 1990 1991 /* Used for ptr, array struct/union and float type members. 1992 * int, enum and modifier types have their specific callback functions. 1993 */ 1994 static int btf_generic_check_kflag_member(struct btf_verifier_env *env, 1995 const struct btf_type *struct_type, 1996 const struct btf_member *member, 1997 const struct btf_type *member_type) 1998 { 1999 if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) { 2000 btf_verifier_log_member(env, struct_type, member, 2001 "Invalid member bitfield_size"); 2002 return -EINVAL; 2003 } 2004 2005 /* bitfield size is 0, so member->offset represents bit offset only. 2006 * It is safe to call non kflag check_member variants. 2007 */ 2008 return btf_type_ops(member_type)->check_member(env, struct_type, 2009 member, 2010 member_type); 2011 } 2012 2013 static int btf_df_resolve(struct btf_verifier_env *env, 2014 const struct resolve_vertex *v) 2015 { 2016 btf_verifier_log_basic(env, v->t, "Unsupported resolve"); 2017 return -EINVAL; 2018 } 2019 2020 static void btf_df_show(const struct btf *btf, const struct btf_type *t, 2021 u32 type_id, void *data, u8 bits_offsets, 2022 struct btf_show *show) 2023 { 2024 btf_show(show, "<unsupported kind:%u>", BTF_INFO_KIND(t->info)); 2025 } 2026 2027 static int btf_int_check_member(struct btf_verifier_env *env, 2028 const struct btf_type *struct_type, 2029 const struct btf_member *member, 2030 const struct btf_type *member_type) 2031 { 2032 u32 int_data = btf_type_int(member_type); 2033 u32 struct_bits_off = member->offset; 2034 u32 struct_size = struct_type->size; 2035 u32 nr_copy_bits; 2036 u32 bytes_offset; 2037 2038 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) { 2039 btf_verifier_log_member(env, struct_type, member, 2040 "bits_offset exceeds U32_MAX"); 2041 return -EINVAL; 2042 } 2043 2044 struct_bits_off += BTF_INT_OFFSET(int_data); 2045 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2046 nr_copy_bits = BTF_INT_BITS(int_data) + 2047 BITS_PER_BYTE_MASKED(struct_bits_off); 2048 2049 if (nr_copy_bits > BITS_PER_U128) { 2050 btf_verifier_log_member(env, struct_type, member, 2051 "nr_copy_bits exceeds 128"); 2052 return -EINVAL; 2053 } 2054 2055 if (struct_size < bytes_offset || 2056 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { 2057 btf_verifier_log_member(env, struct_type, member, 2058 "Member exceeds struct_size"); 2059 return -EINVAL; 2060 } 2061 2062 return 0; 2063 } 2064 2065 static int btf_int_check_kflag_member(struct btf_verifier_env *env, 2066 const struct btf_type *struct_type, 2067 const struct btf_member *member, 2068 const struct btf_type *member_type) 2069 { 2070 u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset; 2071 u32 int_data = btf_type_int(member_type); 2072 u32 struct_size = struct_type->size; 2073 u32 nr_copy_bits; 2074 2075 /* a regular int type is required for the kflag int member */ 2076 if (!btf_type_int_is_regular(member_type)) { 2077 btf_verifier_log_member(env, struct_type, member, 2078 "Invalid member base type"); 2079 return -EINVAL; 2080 } 2081 2082 /* check sanity of bitfield size */ 2083 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); 2084 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); 2085 nr_int_data_bits = BTF_INT_BITS(int_data); 2086 if (!nr_bits) { 2087 /* Not a bitfield member, member offset must be at byte 2088 * boundary. 2089 */ 2090 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 2091 btf_verifier_log_member(env, struct_type, member, 2092 "Invalid member offset"); 2093 return -EINVAL; 2094 } 2095 2096 nr_bits = nr_int_data_bits; 2097 } else if (nr_bits > nr_int_data_bits) { 2098 btf_verifier_log_member(env, struct_type, member, 2099 "Invalid member bitfield_size"); 2100 return -EINVAL; 2101 } 2102 2103 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2104 nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off); 2105 if (nr_copy_bits > BITS_PER_U128) { 2106 btf_verifier_log_member(env, struct_type, member, 2107 "nr_copy_bits exceeds 128"); 2108 return -EINVAL; 2109 } 2110 2111 if (struct_size < bytes_offset || 2112 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { 2113 btf_verifier_log_member(env, struct_type, member, 2114 "Member exceeds struct_size"); 2115 return -EINVAL; 2116 } 2117 2118 return 0; 2119 } 2120 2121 static s32 btf_int_check_meta(struct btf_verifier_env *env, 2122 const struct btf_type *t, 2123 u32 meta_left) 2124 { 2125 u32 int_data, nr_bits, meta_needed = sizeof(int_data); 2126 u16 encoding; 2127 2128 if (meta_left < meta_needed) { 2129 btf_verifier_log_basic(env, t, 2130 "meta_left:%u meta_needed:%u", 2131 meta_left, meta_needed); 2132 return -EINVAL; 2133 } 2134 2135 if (btf_type_vlen(t)) { 2136 btf_verifier_log_type(env, t, "vlen != 0"); 2137 return -EINVAL; 2138 } 2139 2140 if (btf_type_kflag(t)) { 2141 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 2142 return -EINVAL; 2143 } 2144 2145 int_data = btf_type_int(t); 2146 if (int_data & ~BTF_INT_MASK) { 2147 btf_verifier_log_basic(env, t, "Invalid int_data:%x", 2148 int_data); 2149 return -EINVAL; 2150 } 2151 2152 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data); 2153 2154 if (nr_bits > BITS_PER_U128) { 2155 btf_verifier_log_type(env, t, "nr_bits exceeds %zu", 2156 BITS_PER_U128); 2157 return -EINVAL; 2158 } 2159 2160 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) { 2161 btf_verifier_log_type(env, t, "nr_bits exceeds type_size"); 2162 return -EINVAL; 2163 } 2164 2165 /* 2166 * Only one of the encoding bits is allowed and it 2167 * should be sufficient for the pretty print purpose (i.e. decoding). 2168 * Multiple bits can be allowed later if it is found 2169 * to be insufficient. 2170 */ 2171 encoding = BTF_INT_ENCODING(int_data); 2172 if (encoding && 2173 encoding != BTF_INT_SIGNED && 2174 encoding != BTF_INT_CHAR && 2175 encoding != BTF_INT_BOOL) { 2176 btf_verifier_log_type(env, t, "Unsupported encoding"); 2177 return -ENOTSUPP; 2178 } 2179 2180 btf_verifier_log_type(env, t, NULL); 2181 2182 return meta_needed; 2183 } 2184 2185 static void btf_int_log(struct btf_verifier_env *env, 2186 const struct btf_type *t) 2187 { 2188 int int_data = btf_type_int(t); 2189 2190 btf_verifier_log(env, 2191 "size=%u bits_offset=%u nr_bits=%u encoding=%s", 2192 t->size, BTF_INT_OFFSET(int_data), 2193 BTF_INT_BITS(int_data), 2194 btf_int_encoding_str(BTF_INT_ENCODING(int_data))); 2195 } 2196 2197 static void btf_int128_print(struct btf_show *show, void *data) 2198 { 2199 /* data points to a __int128 number. 2200 * Suppose 2201 * int128_num = *(__int128 *)data; 2202 * The below formulas shows what upper_num and lower_num represents: 2203 * upper_num = int128_num >> 64; 2204 * lower_num = int128_num & 0xffffffffFFFFFFFFULL; 2205 */ 2206 u64 upper_num, lower_num; 2207 2208 #ifdef __BIG_ENDIAN_BITFIELD 2209 upper_num = *(u64 *)data; 2210 lower_num = *(u64 *)(data + 8); 2211 #else 2212 upper_num = *(u64 *)(data + 8); 2213 lower_num = *(u64 *)data; 2214 #endif 2215 if (upper_num == 0) 2216 btf_show_type_value(show, "0x%llx", lower_num); 2217 else 2218 btf_show_type_values(show, "0x%llx%016llx", upper_num, 2219 lower_num); 2220 } 2221 2222 static void btf_int128_shift(u64 *print_num, u16 left_shift_bits, 2223 u16 right_shift_bits) 2224 { 2225 u64 upper_num, lower_num; 2226 2227 #ifdef __BIG_ENDIAN_BITFIELD 2228 upper_num = print_num[0]; 2229 lower_num = print_num[1]; 2230 #else 2231 upper_num = print_num[1]; 2232 lower_num = print_num[0]; 2233 #endif 2234 2235 /* shake out un-needed bits by shift/or operations */ 2236 if (left_shift_bits >= 64) { 2237 upper_num = lower_num << (left_shift_bits - 64); 2238 lower_num = 0; 2239 } else { 2240 upper_num = (upper_num << left_shift_bits) | 2241 (lower_num >> (64 - left_shift_bits)); 2242 lower_num = lower_num << left_shift_bits; 2243 } 2244 2245 if (right_shift_bits >= 64) { 2246 lower_num = upper_num >> (right_shift_bits - 64); 2247 upper_num = 0; 2248 } else { 2249 lower_num = (lower_num >> right_shift_bits) | 2250 (upper_num << (64 - right_shift_bits)); 2251 upper_num = upper_num >> right_shift_bits; 2252 } 2253 2254 #ifdef __BIG_ENDIAN_BITFIELD 2255 print_num[0] = upper_num; 2256 print_num[1] = lower_num; 2257 #else 2258 print_num[0] = lower_num; 2259 print_num[1] = upper_num; 2260 #endif 2261 } 2262 2263 static void btf_bitfield_show(void *data, u8 bits_offset, 2264 u8 nr_bits, struct btf_show *show) 2265 { 2266 u16 left_shift_bits, right_shift_bits; 2267 u8 nr_copy_bytes; 2268 u8 nr_copy_bits; 2269 u64 print_num[2] = {}; 2270 2271 nr_copy_bits = nr_bits + bits_offset; 2272 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); 2273 2274 memcpy(print_num, data, nr_copy_bytes); 2275 2276 #ifdef __BIG_ENDIAN_BITFIELD 2277 left_shift_bits = bits_offset; 2278 #else 2279 left_shift_bits = BITS_PER_U128 - nr_copy_bits; 2280 #endif 2281 right_shift_bits = BITS_PER_U128 - nr_bits; 2282 2283 btf_int128_shift(print_num, left_shift_bits, right_shift_bits); 2284 btf_int128_print(show, print_num); 2285 } 2286 2287 2288 static void btf_int_bits_show(const struct btf *btf, 2289 const struct btf_type *t, 2290 void *data, u8 bits_offset, 2291 struct btf_show *show) 2292 { 2293 u32 int_data = btf_type_int(t); 2294 u8 nr_bits = BTF_INT_BITS(int_data); 2295 u8 total_bits_offset; 2296 2297 /* 2298 * bits_offset is at most 7. 2299 * BTF_INT_OFFSET() cannot exceed 128 bits. 2300 */ 2301 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); 2302 data += BITS_ROUNDDOWN_BYTES(total_bits_offset); 2303 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset); 2304 btf_bitfield_show(data, bits_offset, nr_bits, show); 2305 } 2306 2307 static void btf_int_show(const struct btf *btf, const struct btf_type *t, 2308 u32 type_id, void *data, u8 bits_offset, 2309 struct btf_show *show) 2310 { 2311 u32 int_data = btf_type_int(t); 2312 u8 encoding = BTF_INT_ENCODING(int_data); 2313 bool sign = encoding & BTF_INT_SIGNED; 2314 u8 nr_bits = BTF_INT_BITS(int_data); 2315 void *safe_data; 2316 2317 safe_data = btf_show_start_type(show, t, type_id, data); 2318 if (!safe_data) 2319 return; 2320 2321 if (bits_offset || BTF_INT_OFFSET(int_data) || 2322 BITS_PER_BYTE_MASKED(nr_bits)) { 2323 btf_int_bits_show(btf, t, safe_data, bits_offset, show); 2324 goto out; 2325 } 2326 2327 switch (nr_bits) { 2328 case 128: 2329 btf_int128_print(show, safe_data); 2330 break; 2331 case 64: 2332 if (sign) 2333 btf_show_type_value(show, "%lld", *(s64 *)safe_data); 2334 else 2335 btf_show_type_value(show, "%llu", *(u64 *)safe_data); 2336 break; 2337 case 32: 2338 if (sign) 2339 btf_show_type_value(show, "%d", *(s32 *)safe_data); 2340 else 2341 btf_show_type_value(show, "%u", *(u32 *)safe_data); 2342 break; 2343 case 16: 2344 if (sign) 2345 btf_show_type_value(show, "%d", *(s16 *)safe_data); 2346 else 2347 btf_show_type_value(show, "%u", *(u16 *)safe_data); 2348 break; 2349 case 8: 2350 if (show->state.array_encoding == BTF_INT_CHAR) { 2351 /* check for null terminator */ 2352 if (show->state.array_terminated) 2353 break; 2354 if (*(char *)data == '\0') { 2355 show->state.array_terminated = 1; 2356 break; 2357 } 2358 if (isprint(*(char *)data)) { 2359 btf_show_type_value(show, "'%c'", 2360 *(char *)safe_data); 2361 break; 2362 } 2363 } 2364 if (sign) 2365 btf_show_type_value(show, "%d", *(s8 *)safe_data); 2366 else 2367 btf_show_type_value(show, "%u", *(u8 *)safe_data); 2368 break; 2369 default: 2370 btf_int_bits_show(btf, t, safe_data, bits_offset, show); 2371 break; 2372 } 2373 out: 2374 btf_show_end_type(show); 2375 } 2376 2377 static const struct btf_kind_operations int_ops = { 2378 .check_meta = btf_int_check_meta, 2379 .resolve = btf_df_resolve, 2380 .check_member = btf_int_check_member, 2381 .check_kflag_member = btf_int_check_kflag_member, 2382 .log_details = btf_int_log, 2383 .show = btf_int_show, 2384 }; 2385 2386 static int btf_modifier_check_member(struct btf_verifier_env *env, 2387 const struct btf_type *struct_type, 2388 const struct btf_member *member, 2389 const struct btf_type *member_type) 2390 { 2391 const struct btf_type *resolved_type; 2392 u32 resolved_type_id = member->type; 2393 struct btf_member resolved_member; 2394 struct btf *btf = env->btf; 2395 2396 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL); 2397 if (!resolved_type) { 2398 btf_verifier_log_member(env, struct_type, member, 2399 "Invalid member"); 2400 return -EINVAL; 2401 } 2402 2403 resolved_member = *member; 2404 resolved_member.type = resolved_type_id; 2405 2406 return btf_type_ops(resolved_type)->check_member(env, struct_type, 2407 &resolved_member, 2408 resolved_type); 2409 } 2410 2411 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env, 2412 const struct btf_type *struct_type, 2413 const struct btf_member *member, 2414 const struct btf_type *member_type) 2415 { 2416 const struct btf_type *resolved_type; 2417 u32 resolved_type_id = member->type; 2418 struct btf_member resolved_member; 2419 struct btf *btf = env->btf; 2420 2421 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL); 2422 if (!resolved_type) { 2423 btf_verifier_log_member(env, struct_type, member, 2424 "Invalid member"); 2425 return -EINVAL; 2426 } 2427 2428 resolved_member = *member; 2429 resolved_member.type = resolved_type_id; 2430 2431 return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type, 2432 &resolved_member, 2433 resolved_type); 2434 } 2435 2436 static int btf_ptr_check_member(struct btf_verifier_env *env, 2437 const struct btf_type *struct_type, 2438 const struct btf_member *member, 2439 const struct btf_type *member_type) 2440 { 2441 u32 struct_size, struct_bits_off, bytes_offset; 2442 2443 struct_size = struct_type->size; 2444 struct_bits_off = member->offset; 2445 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2446 2447 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 2448 btf_verifier_log_member(env, struct_type, member, 2449 "Member is not byte aligned"); 2450 return -EINVAL; 2451 } 2452 2453 if (struct_size - bytes_offset < sizeof(void *)) { 2454 btf_verifier_log_member(env, struct_type, member, 2455 "Member exceeds struct_size"); 2456 return -EINVAL; 2457 } 2458 2459 return 0; 2460 } 2461 2462 static int btf_ref_type_check_meta(struct btf_verifier_env *env, 2463 const struct btf_type *t, 2464 u32 meta_left) 2465 { 2466 const char *value; 2467 2468 if (btf_type_vlen(t)) { 2469 btf_verifier_log_type(env, t, "vlen != 0"); 2470 return -EINVAL; 2471 } 2472 2473 if (btf_type_kflag(t)) { 2474 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 2475 return -EINVAL; 2476 } 2477 2478 if (!BTF_TYPE_ID_VALID(t->type)) { 2479 btf_verifier_log_type(env, t, "Invalid type_id"); 2480 return -EINVAL; 2481 } 2482 2483 /* typedef/type_tag type must have a valid name, and other ref types, 2484 * volatile, const, restrict, should have a null name. 2485 */ 2486 if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) { 2487 if (!t->name_off || 2488 !btf_name_valid_identifier(env->btf, t->name_off)) { 2489 btf_verifier_log_type(env, t, "Invalid name"); 2490 return -EINVAL; 2491 } 2492 } else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) { 2493 value = btf_name_by_offset(env->btf, t->name_off); 2494 if (!value || !value[0]) { 2495 btf_verifier_log_type(env, t, "Invalid name"); 2496 return -EINVAL; 2497 } 2498 } else { 2499 if (t->name_off) { 2500 btf_verifier_log_type(env, t, "Invalid name"); 2501 return -EINVAL; 2502 } 2503 } 2504 2505 btf_verifier_log_type(env, t, NULL); 2506 2507 return 0; 2508 } 2509 2510 static int btf_modifier_resolve(struct btf_verifier_env *env, 2511 const struct resolve_vertex *v) 2512 { 2513 const struct btf_type *t = v->t; 2514 const struct btf_type *next_type; 2515 u32 next_type_id = t->type; 2516 struct btf *btf = env->btf; 2517 2518 next_type = btf_type_by_id(btf, next_type_id); 2519 if (!next_type || btf_type_is_resolve_source_only(next_type)) { 2520 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2521 return -EINVAL; 2522 } 2523 2524 if (!env_type_is_resolve_sink(env, next_type) && 2525 !env_type_is_resolved(env, next_type_id)) 2526 return env_stack_push(env, next_type, next_type_id); 2527 2528 /* Figure out the resolved next_type_id with size. 2529 * They will be stored in the current modifier's 2530 * resolved_ids and resolved_sizes such that it can 2531 * save us a few type-following when we use it later (e.g. in 2532 * pretty print). 2533 */ 2534 if (!btf_type_id_size(btf, &next_type_id, NULL)) { 2535 if (env_type_is_resolved(env, next_type_id)) 2536 next_type = btf_type_id_resolve(btf, &next_type_id); 2537 2538 /* "typedef void new_void", "const void"...etc */ 2539 if (!btf_type_is_void(next_type) && 2540 !btf_type_is_fwd(next_type) && 2541 !btf_type_is_func_proto(next_type)) { 2542 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2543 return -EINVAL; 2544 } 2545 } 2546 2547 env_stack_pop_resolved(env, next_type_id, 0); 2548 2549 return 0; 2550 } 2551 2552 static int btf_var_resolve(struct btf_verifier_env *env, 2553 const struct resolve_vertex *v) 2554 { 2555 const struct btf_type *next_type; 2556 const struct btf_type *t = v->t; 2557 u32 next_type_id = t->type; 2558 struct btf *btf = env->btf; 2559 2560 next_type = btf_type_by_id(btf, next_type_id); 2561 if (!next_type || btf_type_is_resolve_source_only(next_type)) { 2562 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2563 return -EINVAL; 2564 } 2565 2566 if (!env_type_is_resolve_sink(env, next_type) && 2567 !env_type_is_resolved(env, next_type_id)) 2568 return env_stack_push(env, next_type, next_type_id); 2569 2570 if (btf_type_is_modifier(next_type)) { 2571 const struct btf_type *resolved_type; 2572 u32 resolved_type_id; 2573 2574 resolved_type_id = next_type_id; 2575 resolved_type = btf_type_id_resolve(btf, &resolved_type_id); 2576 2577 if (btf_type_is_ptr(resolved_type) && 2578 !env_type_is_resolve_sink(env, resolved_type) && 2579 !env_type_is_resolved(env, resolved_type_id)) 2580 return env_stack_push(env, resolved_type, 2581 resolved_type_id); 2582 } 2583 2584 /* We must resolve to something concrete at this point, no 2585 * forward types or similar that would resolve to size of 2586 * zero is allowed. 2587 */ 2588 if (!btf_type_id_size(btf, &next_type_id, NULL)) { 2589 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2590 return -EINVAL; 2591 } 2592 2593 env_stack_pop_resolved(env, next_type_id, 0); 2594 2595 return 0; 2596 } 2597 2598 static int btf_ptr_resolve(struct btf_verifier_env *env, 2599 const struct resolve_vertex *v) 2600 { 2601 const struct btf_type *next_type; 2602 const struct btf_type *t = v->t; 2603 u32 next_type_id = t->type; 2604 struct btf *btf = env->btf; 2605 2606 next_type = btf_type_by_id(btf, next_type_id); 2607 if (!next_type || btf_type_is_resolve_source_only(next_type)) { 2608 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2609 return -EINVAL; 2610 } 2611 2612 if (!env_type_is_resolve_sink(env, next_type) && 2613 !env_type_is_resolved(env, next_type_id)) 2614 return env_stack_push(env, next_type, next_type_id); 2615 2616 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY, 2617 * the modifier may have stopped resolving when it was resolved 2618 * to a ptr (last-resolved-ptr). 2619 * 2620 * We now need to continue from the last-resolved-ptr to 2621 * ensure the last-resolved-ptr will not referring back to 2622 * the current ptr (t). 2623 */ 2624 if (btf_type_is_modifier(next_type)) { 2625 const struct btf_type *resolved_type; 2626 u32 resolved_type_id; 2627 2628 resolved_type_id = next_type_id; 2629 resolved_type = btf_type_id_resolve(btf, &resolved_type_id); 2630 2631 if (btf_type_is_ptr(resolved_type) && 2632 !env_type_is_resolve_sink(env, resolved_type) && 2633 !env_type_is_resolved(env, resolved_type_id)) 2634 return env_stack_push(env, resolved_type, 2635 resolved_type_id); 2636 } 2637 2638 if (!btf_type_id_size(btf, &next_type_id, NULL)) { 2639 if (env_type_is_resolved(env, next_type_id)) 2640 next_type = btf_type_id_resolve(btf, &next_type_id); 2641 2642 if (!btf_type_is_void(next_type) && 2643 !btf_type_is_fwd(next_type) && 2644 !btf_type_is_func_proto(next_type)) { 2645 btf_verifier_log_type(env, v->t, "Invalid type_id"); 2646 return -EINVAL; 2647 } 2648 } 2649 2650 env_stack_pop_resolved(env, next_type_id, 0); 2651 2652 return 0; 2653 } 2654 2655 static void btf_modifier_show(const struct btf *btf, 2656 const struct btf_type *t, 2657 u32 type_id, void *data, 2658 u8 bits_offset, struct btf_show *show) 2659 { 2660 if (btf->resolved_ids) 2661 t = btf_type_id_resolve(btf, &type_id); 2662 else 2663 t = btf_type_skip_modifiers(btf, type_id, NULL); 2664 2665 btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show); 2666 } 2667 2668 static void btf_var_show(const struct btf *btf, const struct btf_type *t, 2669 u32 type_id, void *data, u8 bits_offset, 2670 struct btf_show *show) 2671 { 2672 t = btf_type_id_resolve(btf, &type_id); 2673 2674 btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show); 2675 } 2676 2677 static void btf_ptr_show(const struct btf *btf, const struct btf_type *t, 2678 u32 type_id, void *data, u8 bits_offset, 2679 struct btf_show *show) 2680 { 2681 void *safe_data; 2682 2683 safe_data = btf_show_start_type(show, t, type_id, data); 2684 if (!safe_data) 2685 return; 2686 2687 /* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */ 2688 if (show->flags & BTF_SHOW_PTR_RAW) 2689 btf_show_type_value(show, "0x%px", *(void **)safe_data); 2690 else 2691 btf_show_type_value(show, "0x%p", *(void **)safe_data); 2692 btf_show_end_type(show); 2693 } 2694 2695 static void btf_ref_type_log(struct btf_verifier_env *env, 2696 const struct btf_type *t) 2697 { 2698 btf_verifier_log(env, "type_id=%u", t->type); 2699 } 2700 2701 static struct btf_kind_operations modifier_ops = { 2702 .check_meta = btf_ref_type_check_meta, 2703 .resolve = btf_modifier_resolve, 2704 .check_member = btf_modifier_check_member, 2705 .check_kflag_member = btf_modifier_check_kflag_member, 2706 .log_details = btf_ref_type_log, 2707 .show = btf_modifier_show, 2708 }; 2709 2710 static struct btf_kind_operations ptr_ops = { 2711 .check_meta = btf_ref_type_check_meta, 2712 .resolve = btf_ptr_resolve, 2713 .check_member = btf_ptr_check_member, 2714 .check_kflag_member = btf_generic_check_kflag_member, 2715 .log_details = btf_ref_type_log, 2716 .show = btf_ptr_show, 2717 }; 2718 2719 static s32 btf_fwd_check_meta(struct btf_verifier_env *env, 2720 const struct btf_type *t, 2721 u32 meta_left) 2722 { 2723 if (btf_type_vlen(t)) { 2724 btf_verifier_log_type(env, t, "vlen != 0"); 2725 return -EINVAL; 2726 } 2727 2728 if (t->type) { 2729 btf_verifier_log_type(env, t, "type != 0"); 2730 return -EINVAL; 2731 } 2732 2733 /* fwd type must have a valid name */ 2734 if (!t->name_off || 2735 !btf_name_valid_identifier(env->btf, t->name_off)) { 2736 btf_verifier_log_type(env, t, "Invalid name"); 2737 return -EINVAL; 2738 } 2739 2740 btf_verifier_log_type(env, t, NULL); 2741 2742 return 0; 2743 } 2744 2745 static void btf_fwd_type_log(struct btf_verifier_env *env, 2746 const struct btf_type *t) 2747 { 2748 btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct"); 2749 } 2750 2751 static struct btf_kind_operations fwd_ops = { 2752 .check_meta = btf_fwd_check_meta, 2753 .resolve = btf_df_resolve, 2754 .check_member = btf_df_check_member, 2755 .check_kflag_member = btf_df_check_kflag_member, 2756 .log_details = btf_fwd_type_log, 2757 .show = btf_df_show, 2758 }; 2759 2760 static int btf_array_check_member(struct btf_verifier_env *env, 2761 const struct btf_type *struct_type, 2762 const struct btf_member *member, 2763 const struct btf_type *member_type) 2764 { 2765 u32 struct_bits_off = member->offset; 2766 u32 struct_size, bytes_offset; 2767 u32 array_type_id, array_size; 2768 struct btf *btf = env->btf; 2769 2770 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 2771 btf_verifier_log_member(env, struct_type, member, 2772 "Member is not byte aligned"); 2773 return -EINVAL; 2774 } 2775 2776 array_type_id = member->type; 2777 btf_type_id_size(btf, &array_type_id, &array_size); 2778 struct_size = struct_type->size; 2779 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 2780 if (struct_size - bytes_offset < array_size) { 2781 btf_verifier_log_member(env, struct_type, member, 2782 "Member exceeds struct_size"); 2783 return -EINVAL; 2784 } 2785 2786 return 0; 2787 } 2788 2789 static s32 btf_array_check_meta(struct btf_verifier_env *env, 2790 const struct btf_type *t, 2791 u32 meta_left) 2792 { 2793 const struct btf_array *array = btf_type_array(t); 2794 u32 meta_needed = sizeof(*array); 2795 2796 if (meta_left < meta_needed) { 2797 btf_verifier_log_basic(env, t, 2798 "meta_left:%u meta_needed:%u", 2799 meta_left, meta_needed); 2800 return -EINVAL; 2801 } 2802 2803 /* array type should not have a name */ 2804 if (t->name_off) { 2805 btf_verifier_log_type(env, t, "Invalid name"); 2806 return -EINVAL; 2807 } 2808 2809 if (btf_type_vlen(t)) { 2810 btf_verifier_log_type(env, t, "vlen != 0"); 2811 return -EINVAL; 2812 } 2813 2814 if (btf_type_kflag(t)) { 2815 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 2816 return -EINVAL; 2817 } 2818 2819 if (t->size) { 2820 btf_verifier_log_type(env, t, "size != 0"); 2821 return -EINVAL; 2822 } 2823 2824 /* Array elem type and index type cannot be in type void, 2825 * so !array->type and !array->index_type are not allowed. 2826 */ 2827 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) { 2828 btf_verifier_log_type(env, t, "Invalid elem"); 2829 return -EINVAL; 2830 } 2831 2832 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) { 2833 btf_verifier_log_type(env, t, "Invalid index"); 2834 return -EINVAL; 2835 } 2836 2837 btf_verifier_log_type(env, t, NULL); 2838 2839 return meta_needed; 2840 } 2841 2842 static int btf_array_resolve(struct btf_verifier_env *env, 2843 const struct resolve_vertex *v) 2844 { 2845 const struct btf_array *array = btf_type_array(v->t); 2846 const struct btf_type *elem_type, *index_type; 2847 u32 elem_type_id, index_type_id; 2848 struct btf *btf = env->btf; 2849 u32 elem_size; 2850 2851 /* Check array->index_type */ 2852 index_type_id = array->index_type; 2853 index_type = btf_type_by_id(btf, index_type_id); 2854 if (btf_type_nosize_or_null(index_type) || 2855 btf_type_is_resolve_source_only(index_type)) { 2856 btf_verifier_log_type(env, v->t, "Invalid index"); 2857 return -EINVAL; 2858 } 2859 2860 if (!env_type_is_resolve_sink(env, index_type) && 2861 !env_type_is_resolved(env, index_type_id)) 2862 return env_stack_push(env, index_type, index_type_id); 2863 2864 index_type = btf_type_id_size(btf, &index_type_id, NULL); 2865 if (!index_type || !btf_type_is_int(index_type) || 2866 !btf_type_int_is_regular(index_type)) { 2867 btf_verifier_log_type(env, v->t, "Invalid index"); 2868 return -EINVAL; 2869 } 2870 2871 /* Check array->type */ 2872 elem_type_id = array->type; 2873 elem_type = btf_type_by_id(btf, elem_type_id); 2874 if (btf_type_nosize_or_null(elem_type) || 2875 btf_type_is_resolve_source_only(elem_type)) { 2876 btf_verifier_log_type(env, v->t, 2877 "Invalid elem"); 2878 return -EINVAL; 2879 } 2880 2881 if (!env_type_is_resolve_sink(env, elem_type) && 2882 !env_type_is_resolved(env, elem_type_id)) 2883 return env_stack_push(env, elem_type, elem_type_id); 2884 2885 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); 2886 if (!elem_type) { 2887 btf_verifier_log_type(env, v->t, "Invalid elem"); 2888 return -EINVAL; 2889 } 2890 2891 if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) { 2892 btf_verifier_log_type(env, v->t, "Invalid array of int"); 2893 return -EINVAL; 2894 } 2895 2896 if (array->nelems && elem_size > U32_MAX / array->nelems) { 2897 btf_verifier_log_type(env, v->t, 2898 "Array size overflows U32_MAX"); 2899 return -EINVAL; 2900 } 2901 2902 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems); 2903 2904 return 0; 2905 } 2906 2907 static void btf_array_log(struct btf_verifier_env *env, 2908 const struct btf_type *t) 2909 { 2910 const struct btf_array *array = btf_type_array(t); 2911 2912 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u", 2913 array->type, array->index_type, array->nelems); 2914 } 2915 2916 static void __btf_array_show(const struct btf *btf, const struct btf_type *t, 2917 u32 type_id, void *data, u8 bits_offset, 2918 struct btf_show *show) 2919 { 2920 const struct btf_array *array = btf_type_array(t); 2921 const struct btf_kind_operations *elem_ops; 2922 const struct btf_type *elem_type; 2923 u32 i, elem_size = 0, elem_type_id; 2924 u16 encoding = 0; 2925 2926 elem_type_id = array->type; 2927 elem_type = btf_type_skip_modifiers(btf, elem_type_id, NULL); 2928 if (elem_type && btf_type_has_size(elem_type)) 2929 elem_size = elem_type->size; 2930 2931 if (elem_type && btf_type_is_int(elem_type)) { 2932 u32 int_type = btf_type_int(elem_type); 2933 2934 encoding = BTF_INT_ENCODING(int_type); 2935 2936 /* 2937 * BTF_INT_CHAR encoding never seems to be set for 2938 * char arrays, so if size is 1 and element is 2939 * printable as a char, we'll do that. 2940 */ 2941 if (elem_size == 1) 2942 encoding = BTF_INT_CHAR; 2943 } 2944 2945 if (!btf_show_start_array_type(show, t, type_id, encoding, data)) 2946 return; 2947 2948 if (!elem_type) 2949 goto out; 2950 elem_ops = btf_type_ops(elem_type); 2951 2952 for (i = 0; i < array->nelems; i++) { 2953 2954 btf_show_start_array_member(show); 2955 2956 elem_ops->show(btf, elem_type, elem_type_id, data, 2957 bits_offset, show); 2958 data += elem_size; 2959 2960 btf_show_end_array_member(show); 2961 2962 if (show->state.array_terminated) 2963 break; 2964 } 2965 out: 2966 btf_show_end_array_type(show); 2967 } 2968 2969 static void btf_array_show(const struct btf *btf, const struct btf_type *t, 2970 u32 type_id, void *data, u8 bits_offset, 2971 struct btf_show *show) 2972 { 2973 const struct btf_member *m = show->state.member; 2974 2975 /* 2976 * First check if any members would be shown (are non-zero). 2977 * See comments above "struct btf_show" definition for more 2978 * details on how this works at a high-level. 2979 */ 2980 if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) { 2981 if (!show->state.depth_check) { 2982 show->state.depth_check = show->state.depth + 1; 2983 show->state.depth_to_show = 0; 2984 } 2985 __btf_array_show(btf, t, type_id, data, bits_offset, show); 2986 show->state.member = m; 2987 2988 if (show->state.depth_check != show->state.depth + 1) 2989 return; 2990 show->state.depth_check = 0; 2991 2992 if (show->state.depth_to_show <= show->state.depth) 2993 return; 2994 /* 2995 * Reaching here indicates we have recursed and found 2996 * non-zero array member(s). 2997 */ 2998 } 2999 __btf_array_show(btf, t, type_id, data, bits_offset, show); 3000 } 3001 3002 static struct btf_kind_operations array_ops = { 3003 .check_meta = btf_array_check_meta, 3004 .resolve = btf_array_resolve, 3005 .check_member = btf_array_check_member, 3006 .check_kflag_member = btf_generic_check_kflag_member, 3007 .log_details = btf_array_log, 3008 .show = btf_array_show, 3009 }; 3010 3011 static int btf_struct_check_member(struct btf_verifier_env *env, 3012 const struct btf_type *struct_type, 3013 const struct btf_member *member, 3014 const struct btf_type *member_type) 3015 { 3016 u32 struct_bits_off = member->offset; 3017 u32 struct_size, bytes_offset; 3018 3019 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 3020 btf_verifier_log_member(env, struct_type, member, 3021 "Member is not byte aligned"); 3022 return -EINVAL; 3023 } 3024 3025 struct_size = struct_type->size; 3026 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 3027 if (struct_size - bytes_offset < member_type->size) { 3028 btf_verifier_log_member(env, struct_type, member, 3029 "Member exceeds struct_size"); 3030 return -EINVAL; 3031 } 3032 3033 return 0; 3034 } 3035 3036 static s32 btf_struct_check_meta(struct btf_verifier_env *env, 3037 const struct btf_type *t, 3038 u32 meta_left) 3039 { 3040 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION; 3041 const struct btf_member *member; 3042 u32 meta_needed, last_offset; 3043 struct btf *btf = env->btf; 3044 u32 struct_size = t->size; 3045 u32 offset; 3046 u16 i; 3047 3048 meta_needed = btf_type_vlen(t) * sizeof(*member); 3049 if (meta_left < meta_needed) { 3050 btf_verifier_log_basic(env, t, 3051 "meta_left:%u meta_needed:%u", 3052 meta_left, meta_needed); 3053 return -EINVAL; 3054 } 3055 3056 /* struct type either no name or a valid one */ 3057 if (t->name_off && 3058 !btf_name_valid_identifier(env->btf, t->name_off)) { 3059 btf_verifier_log_type(env, t, "Invalid name"); 3060 return -EINVAL; 3061 } 3062 3063 btf_verifier_log_type(env, t, NULL); 3064 3065 last_offset = 0; 3066 for_each_member(i, t, member) { 3067 if (!btf_name_offset_valid(btf, member->name_off)) { 3068 btf_verifier_log_member(env, t, member, 3069 "Invalid member name_offset:%u", 3070 member->name_off); 3071 return -EINVAL; 3072 } 3073 3074 /* struct member either no name or a valid one */ 3075 if (member->name_off && 3076 !btf_name_valid_identifier(btf, member->name_off)) { 3077 btf_verifier_log_member(env, t, member, "Invalid name"); 3078 return -EINVAL; 3079 } 3080 /* A member cannot be in type void */ 3081 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) { 3082 btf_verifier_log_member(env, t, member, 3083 "Invalid type_id"); 3084 return -EINVAL; 3085 } 3086 3087 offset = __btf_member_bit_offset(t, member); 3088 if (is_union && offset) { 3089 btf_verifier_log_member(env, t, member, 3090 "Invalid member bits_offset"); 3091 return -EINVAL; 3092 } 3093 3094 /* 3095 * ">" instead of ">=" because the last member could be 3096 * "char a[0];" 3097 */ 3098 if (last_offset > offset) { 3099 btf_verifier_log_member(env, t, member, 3100 "Invalid member bits_offset"); 3101 return -EINVAL; 3102 } 3103 3104 if (BITS_ROUNDUP_BYTES(offset) > struct_size) { 3105 btf_verifier_log_member(env, t, member, 3106 "Member bits_offset exceeds its struct size"); 3107 return -EINVAL; 3108 } 3109 3110 btf_verifier_log_member(env, t, member, NULL); 3111 last_offset = offset; 3112 } 3113 3114 return meta_needed; 3115 } 3116 3117 static int btf_struct_resolve(struct btf_verifier_env *env, 3118 const struct resolve_vertex *v) 3119 { 3120 const struct btf_member *member; 3121 int err; 3122 u16 i; 3123 3124 /* Before continue resolving the next_member, 3125 * ensure the last member is indeed resolved to a 3126 * type with size info. 3127 */ 3128 if (v->next_member) { 3129 const struct btf_type *last_member_type; 3130 const struct btf_member *last_member; 3131 u32 last_member_type_id; 3132 3133 last_member = btf_type_member(v->t) + v->next_member - 1; 3134 last_member_type_id = last_member->type; 3135 if (WARN_ON_ONCE(!env_type_is_resolved(env, 3136 last_member_type_id))) 3137 return -EINVAL; 3138 3139 last_member_type = btf_type_by_id(env->btf, 3140 last_member_type_id); 3141 if (btf_type_kflag(v->t)) 3142 err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t, 3143 last_member, 3144 last_member_type); 3145 else 3146 err = btf_type_ops(last_member_type)->check_member(env, v->t, 3147 last_member, 3148 last_member_type); 3149 if (err) 3150 return err; 3151 } 3152 3153 for_each_member_from(i, v->next_member, v->t, member) { 3154 u32 member_type_id = member->type; 3155 const struct btf_type *member_type = btf_type_by_id(env->btf, 3156 member_type_id); 3157 3158 if (btf_type_nosize_or_null(member_type) || 3159 btf_type_is_resolve_source_only(member_type)) { 3160 btf_verifier_log_member(env, v->t, member, 3161 "Invalid member"); 3162 return -EINVAL; 3163 } 3164 3165 if (!env_type_is_resolve_sink(env, member_type) && 3166 !env_type_is_resolved(env, member_type_id)) { 3167 env_stack_set_next_member(env, i + 1); 3168 return env_stack_push(env, member_type, member_type_id); 3169 } 3170 3171 if (btf_type_kflag(v->t)) 3172 err = btf_type_ops(member_type)->check_kflag_member(env, v->t, 3173 member, 3174 member_type); 3175 else 3176 err = btf_type_ops(member_type)->check_member(env, v->t, 3177 member, 3178 member_type); 3179 if (err) 3180 return err; 3181 } 3182 3183 env_stack_pop_resolved(env, 0, 0); 3184 3185 return 0; 3186 } 3187 3188 static void btf_struct_log(struct btf_verifier_env *env, 3189 const struct btf_type *t) 3190 { 3191 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); 3192 } 3193 3194 enum btf_field_type { 3195 BTF_FIELD_SPIN_LOCK, 3196 BTF_FIELD_TIMER, 3197 BTF_FIELD_KPTR, 3198 }; 3199 3200 enum { 3201 BTF_FIELD_IGNORE = 0, 3202 BTF_FIELD_FOUND = 1, 3203 }; 3204 3205 struct btf_field_info { 3206 u32 type_id; 3207 u32 off; 3208 enum bpf_kptr_type type; 3209 }; 3210 3211 static int btf_find_struct(const struct btf *btf, const struct btf_type *t, 3212 u32 off, int sz, struct btf_field_info *info) 3213 { 3214 if (!__btf_type_is_struct(t)) 3215 return BTF_FIELD_IGNORE; 3216 if (t->size != sz) 3217 return BTF_FIELD_IGNORE; 3218 info->off = off; 3219 return BTF_FIELD_FOUND; 3220 } 3221 3222 static int btf_find_kptr(const struct btf *btf, const struct btf_type *t, 3223 u32 off, int sz, struct btf_field_info *info) 3224 { 3225 enum bpf_kptr_type type; 3226 u32 res_id; 3227 3228 /* For PTR, sz is always == 8 */ 3229 if (!btf_type_is_ptr(t)) 3230 return BTF_FIELD_IGNORE; 3231 t = btf_type_by_id(btf, t->type); 3232 3233 if (!btf_type_is_type_tag(t)) 3234 return BTF_FIELD_IGNORE; 3235 /* Reject extra tags */ 3236 if (btf_type_is_type_tag(btf_type_by_id(btf, t->type))) 3237 return -EINVAL; 3238 if (!strcmp("kptr", __btf_name_by_offset(btf, t->name_off))) 3239 type = BPF_KPTR_UNREF; 3240 else if (!strcmp("kptr_ref", __btf_name_by_offset(btf, t->name_off))) 3241 type = BPF_KPTR_REF; 3242 else 3243 return -EINVAL; 3244 3245 /* Get the base type */ 3246 t = btf_type_skip_modifiers(btf, t->type, &res_id); 3247 /* Only pointer to struct is allowed */ 3248 if (!__btf_type_is_struct(t)) 3249 return -EINVAL; 3250 3251 info->type_id = res_id; 3252 info->off = off; 3253 info->type = type; 3254 return BTF_FIELD_FOUND; 3255 } 3256 3257 static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t, 3258 const char *name, int sz, int align, 3259 enum btf_field_type field_type, 3260 struct btf_field_info *info, int info_cnt) 3261 { 3262 const struct btf_member *member; 3263 struct btf_field_info tmp; 3264 int ret, idx = 0; 3265 u32 i, off; 3266 3267 for_each_member(i, t, member) { 3268 const struct btf_type *member_type = btf_type_by_id(btf, 3269 member->type); 3270 3271 if (name && strcmp(__btf_name_by_offset(btf, member_type->name_off), name)) 3272 continue; 3273 3274 off = __btf_member_bit_offset(t, member); 3275 if (off % 8) 3276 /* valid C code cannot generate such BTF */ 3277 return -EINVAL; 3278 off /= 8; 3279 if (off % align) 3280 return -EINVAL; 3281 3282 switch (field_type) { 3283 case BTF_FIELD_SPIN_LOCK: 3284 case BTF_FIELD_TIMER: 3285 ret = btf_find_struct(btf, member_type, off, sz, 3286 idx < info_cnt ? &info[idx] : &tmp); 3287 if (ret < 0) 3288 return ret; 3289 break; 3290 case BTF_FIELD_KPTR: 3291 ret = btf_find_kptr(btf, member_type, off, sz, 3292 idx < info_cnt ? &info[idx] : &tmp); 3293 if (ret < 0) 3294 return ret; 3295 break; 3296 default: 3297 return -EFAULT; 3298 } 3299 3300 if (ret == BTF_FIELD_IGNORE) 3301 continue; 3302 if (idx >= info_cnt) 3303 return -E2BIG; 3304 ++idx; 3305 } 3306 return idx; 3307 } 3308 3309 static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t, 3310 const char *name, int sz, int align, 3311 enum btf_field_type field_type, 3312 struct btf_field_info *info, int info_cnt) 3313 { 3314 const struct btf_var_secinfo *vsi; 3315 struct btf_field_info tmp; 3316 int ret, idx = 0; 3317 u32 i, off; 3318 3319 for_each_vsi(i, t, vsi) { 3320 const struct btf_type *var = btf_type_by_id(btf, vsi->type); 3321 const struct btf_type *var_type = btf_type_by_id(btf, var->type); 3322 3323 off = vsi->offset; 3324 3325 if (name && strcmp(__btf_name_by_offset(btf, var_type->name_off), name)) 3326 continue; 3327 if (vsi->size != sz) 3328 continue; 3329 if (off % align) 3330 return -EINVAL; 3331 3332 switch (field_type) { 3333 case BTF_FIELD_SPIN_LOCK: 3334 case BTF_FIELD_TIMER: 3335 ret = btf_find_struct(btf, var_type, off, sz, 3336 idx < info_cnt ? &info[idx] : &tmp); 3337 if (ret < 0) 3338 return ret; 3339 break; 3340 case BTF_FIELD_KPTR: 3341 ret = btf_find_kptr(btf, var_type, off, sz, 3342 idx < info_cnt ? &info[idx] : &tmp); 3343 if (ret < 0) 3344 return ret; 3345 break; 3346 default: 3347 return -EFAULT; 3348 } 3349 3350 if (ret == BTF_FIELD_IGNORE) 3351 continue; 3352 if (idx >= info_cnt) 3353 return -E2BIG; 3354 ++idx; 3355 } 3356 return idx; 3357 } 3358 3359 static int btf_find_field(const struct btf *btf, const struct btf_type *t, 3360 enum btf_field_type field_type, 3361 struct btf_field_info *info, int info_cnt) 3362 { 3363 const char *name; 3364 int sz, align; 3365 3366 switch (field_type) { 3367 case BTF_FIELD_SPIN_LOCK: 3368 name = "bpf_spin_lock"; 3369 sz = sizeof(struct bpf_spin_lock); 3370 align = __alignof__(struct bpf_spin_lock); 3371 break; 3372 case BTF_FIELD_TIMER: 3373 name = "bpf_timer"; 3374 sz = sizeof(struct bpf_timer); 3375 align = __alignof__(struct bpf_timer); 3376 break; 3377 case BTF_FIELD_KPTR: 3378 name = NULL; 3379 sz = sizeof(u64); 3380 align = 8; 3381 break; 3382 default: 3383 return -EFAULT; 3384 } 3385 3386 if (__btf_type_is_struct(t)) 3387 return btf_find_struct_field(btf, t, name, sz, align, field_type, info, info_cnt); 3388 else if (btf_type_is_datasec(t)) 3389 return btf_find_datasec_var(btf, t, name, sz, align, field_type, info, info_cnt); 3390 return -EINVAL; 3391 } 3392 3393 /* find 'struct bpf_spin_lock' in map value. 3394 * return >= 0 offset if found 3395 * and < 0 in case of error 3396 */ 3397 int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t) 3398 { 3399 struct btf_field_info info; 3400 int ret; 3401 3402 ret = btf_find_field(btf, t, BTF_FIELD_SPIN_LOCK, &info, 1); 3403 if (ret < 0) 3404 return ret; 3405 if (!ret) 3406 return -ENOENT; 3407 return info.off; 3408 } 3409 3410 int btf_find_timer(const struct btf *btf, const struct btf_type *t) 3411 { 3412 struct btf_field_info info; 3413 int ret; 3414 3415 ret = btf_find_field(btf, t, BTF_FIELD_TIMER, &info, 1); 3416 if (ret < 0) 3417 return ret; 3418 if (!ret) 3419 return -ENOENT; 3420 return info.off; 3421 } 3422 3423 struct bpf_map_value_off *btf_parse_kptrs(const struct btf *btf, 3424 const struct btf_type *t) 3425 { 3426 struct btf_field_info info_arr[BPF_MAP_VALUE_OFF_MAX]; 3427 struct bpf_map_value_off *tab; 3428 struct btf *kernel_btf = NULL; 3429 struct module *mod = NULL; 3430 int ret, i, nr_off; 3431 3432 ret = btf_find_field(btf, t, BTF_FIELD_KPTR, info_arr, ARRAY_SIZE(info_arr)); 3433 if (ret < 0) 3434 return ERR_PTR(ret); 3435 if (!ret) 3436 return NULL; 3437 3438 nr_off = ret; 3439 tab = kzalloc(offsetof(struct bpf_map_value_off, off[nr_off]), GFP_KERNEL | __GFP_NOWARN); 3440 if (!tab) 3441 return ERR_PTR(-ENOMEM); 3442 3443 for (i = 0; i < nr_off; i++) { 3444 const struct btf_type *t; 3445 s32 id; 3446 3447 /* Find type in map BTF, and use it to look up the matching type 3448 * in vmlinux or module BTFs, by name and kind. 3449 */ 3450 t = btf_type_by_id(btf, info_arr[i].type_id); 3451 id = bpf_find_btf_id(__btf_name_by_offset(btf, t->name_off), BTF_INFO_KIND(t->info), 3452 &kernel_btf); 3453 if (id < 0) { 3454 ret = id; 3455 goto end; 3456 } 3457 3458 /* Find and stash the function pointer for the destruction function that 3459 * needs to be eventually invoked from the map free path. 3460 */ 3461 if (info_arr[i].type == BPF_KPTR_REF) { 3462 const struct btf_type *dtor_func; 3463 const char *dtor_func_name; 3464 unsigned long addr; 3465 s32 dtor_btf_id; 3466 3467 /* This call also serves as a whitelist of allowed objects that 3468 * can be used as a referenced pointer and be stored in a map at 3469 * the same time. 3470 */ 3471 dtor_btf_id = btf_find_dtor_kfunc(kernel_btf, id); 3472 if (dtor_btf_id < 0) { 3473 ret = dtor_btf_id; 3474 goto end_btf; 3475 } 3476 3477 dtor_func = btf_type_by_id(kernel_btf, dtor_btf_id); 3478 if (!dtor_func) { 3479 ret = -ENOENT; 3480 goto end_btf; 3481 } 3482 3483 if (btf_is_module(kernel_btf)) { 3484 mod = btf_try_get_module(kernel_btf); 3485 if (!mod) { 3486 ret = -ENXIO; 3487 goto end_btf; 3488 } 3489 } 3490 3491 /* We already verified dtor_func to be btf_type_is_func 3492 * in register_btf_id_dtor_kfuncs. 3493 */ 3494 dtor_func_name = __btf_name_by_offset(kernel_btf, dtor_func->name_off); 3495 addr = kallsyms_lookup_name(dtor_func_name); 3496 if (!addr) { 3497 ret = -EINVAL; 3498 goto end_mod; 3499 } 3500 tab->off[i].kptr.dtor = (void *)addr; 3501 } 3502 3503 tab->off[i].offset = info_arr[i].off; 3504 tab->off[i].type = info_arr[i].type; 3505 tab->off[i].kptr.btf_id = id; 3506 tab->off[i].kptr.btf = kernel_btf; 3507 tab->off[i].kptr.module = mod; 3508 } 3509 tab->nr_off = nr_off; 3510 return tab; 3511 end_mod: 3512 module_put(mod); 3513 end_btf: 3514 btf_put(kernel_btf); 3515 end: 3516 while (i--) { 3517 btf_put(tab->off[i].kptr.btf); 3518 if (tab->off[i].kptr.module) 3519 module_put(tab->off[i].kptr.module); 3520 } 3521 kfree(tab); 3522 return ERR_PTR(ret); 3523 } 3524 3525 static void __btf_struct_show(const struct btf *btf, const struct btf_type *t, 3526 u32 type_id, void *data, u8 bits_offset, 3527 struct btf_show *show) 3528 { 3529 const struct btf_member *member; 3530 void *safe_data; 3531 u32 i; 3532 3533 safe_data = btf_show_start_struct_type(show, t, type_id, data); 3534 if (!safe_data) 3535 return; 3536 3537 for_each_member(i, t, member) { 3538 const struct btf_type *member_type = btf_type_by_id(btf, 3539 member->type); 3540 const struct btf_kind_operations *ops; 3541 u32 member_offset, bitfield_size; 3542 u32 bytes_offset; 3543 u8 bits8_offset; 3544 3545 btf_show_start_member(show, member); 3546 3547 member_offset = __btf_member_bit_offset(t, member); 3548 bitfield_size = __btf_member_bitfield_size(t, member); 3549 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); 3550 bits8_offset = BITS_PER_BYTE_MASKED(member_offset); 3551 if (bitfield_size) { 3552 safe_data = btf_show_start_type(show, member_type, 3553 member->type, 3554 data + bytes_offset); 3555 if (safe_data) 3556 btf_bitfield_show(safe_data, 3557 bits8_offset, 3558 bitfield_size, show); 3559 btf_show_end_type(show); 3560 } else { 3561 ops = btf_type_ops(member_type); 3562 ops->show(btf, member_type, member->type, 3563 data + bytes_offset, bits8_offset, show); 3564 } 3565 3566 btf_show_end_member(show); 3567 } 3568 3569 btf_show_end_struct_type(show); 3570 } 3571 3572 static void btf_struct_show(const struct btf *btf, const struct btf_type *t, 3573 u32 type_id, void *data, u8 bits_offset, 3574 struct btf_show *show) 3575 { 3576 const struct btf_member *m = show->state.member; 3577 3578 /* 3579 * First check if any members would be shown (are non-zero). 3580 * See comments above "struct btf_show" definition for more 3581 * details on how this works at a high-level. 3582 */ 3583 if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) { 3584 if (!show->state.depth_check) { 3585 show->state.depth_check = show->state.depth + 1; 3586 show->state.depth_to_show = 0; 3587 } 3588 __btf_struct_show(btf, t, type_id, data, bits_offset, show); 3589 /* Restore saved member data here */ 3590 show->state.member = m; 3591 if (show->state.depth_check != show->state.depth + 1) 3592 return; 3593 show->state.depth_check = 0; 3594 3595 if (show->state.depth_to_show <= show->state.depth) 3596 return; 3597 /* 3598 * Reaching here indicates we have recursed and found 3599 * non-zero child values. 3600 */ 3601 } 3602 3603 __btf_struct_show(btf, t, type_id, data, bits_offset, show); 3604 } 3605 3606 static struct btf_kind_operations struct_ops = { 3607 .check_meta = btf_struct_check_meta, 3608 .resolve = btf_struct_resolve, 3609 .check_member = btf_struct_check_member, 3610 .check_kflag_member = btf_generic_check_kflag_member, 3611 .log_details = btf_struct_log, 3612 .show = btf_struct_show, 3613 }; 3614 3615 static int btf_enum_check_member(struct btf_verifier_env *env, 3616 const struct btf_type *struct_type, 3617 const struct btf_member *member, 3618 const struct btf_type *member_type) 3619 { 3620 u32 struct_bits_off = member->offset; 3621 u32 struct_size, bytes_offset; 3622 3623 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 3624 btf_verifier_log_member(env, struct_type, member, 3625 "Member is not byte aligned"); 3626 return -EINVAL; 3627 } 3628 3629 struct_size = struct_type->size; 3630 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); 3631 if (struct_size - bytes_offset < member_type->size) { 3632 btf_verifier_log_member(env, struct_type, member, 3633 "Member exceeds struct_size"); 3634 return -EINVAL; 3635 } 3636 3637 return 0; 3638 } 3639 3640 static int btf_enum_check_kflag_member(struct btf_verifier_env *env, 3641 const struct btf_type *struct_type, 3642 const struct btf_member *member, 3643 const struct btf_type *member_type) 3644 { 3645 u32 struct_bits_off, nr_bits, bytes_end, struct_size; 3646 u32 int_bitsize = sizeof(int) * BITS_PER_BYTE; 3647 3648 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); 3649 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); 3650 if (!nr_bits) { 3651 if (BITS_PER_BYTE_MASKED(struct_bits_off)) { 3652 btf_verifier_log_member(env, struct_type, member, 3653 "Member is not byte aligned"); 3654 return -EINVAL; 3655 } 3656 3657 nr_bits = int_bitsize; 3658 } else if (nr_bits > int_bitsize) { 3659 btf_verifier_log_member(env, struct_type, member, 3660 "Invalid member bitfield_size"); 3661 return -EINVAL; 3662 } 3663 3664 struct_size = struct_type->size; 3665 bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits); 3666 if (struct_size < bytes_end) { 3667 btf_verifier_log_member(env, struct_type, member, 3668 "Member exceeds struct_size"); 3669 return -EINVAL; 3670 } 3671 3672 return 0; 3673 } 3674 3675 static s32 btf_enum_check_meta(struct btf_verifier_env *env, 3676 const struct btf_type *t, 3677 u32 meta_left) 3678 { 3679 const struct btf_enum *enums = btf_type_enum(t); 3680 struct btf *btf = env->btf; 3681 const char *fmt_str; 3682 u16 i, nr_enums; 3683 u32 meta_needed; 3684 3685 nr_enums = btf_type_vlen(t); 3686 meta_needed = nr_enums * sizeof(*enums); 3687 3688 if (meta_left < meta_needed) { 3689 btf_verifier_log_basic(env, t, 3690 "meta_left:%u meta_needed:%u", 3691 meta_left, meta_needed); 3692 return -EINVAL; 3693 } 3694 3695 if (t->size > 8 || !is_power_of_2(t->size)) { 3696 btf_verifier_log_type(env, t, "Unexpected size"); 3697 return -EINVAL; 3698 } 3699 3700 /* enum type either no name or a valid one */ 3701 if (t->name_off && 3702 !btf_name_valid_identifier(env->btf, t->name_off)) { 3703 btf_verifier_log_type(env, t, "Invalid name"); 3704 return -EINVAL; 3705 } 3706 3707 btf_verifier_log_type(env, t, NULL); 3708 3709 for (i = 0; i < nr_enums; i++) { 3710 if (!btf_name_offset_valid(btf, enums[i].name_off)) { 3711 btf_verifier_log(env, "\tInvalid name_offset:%u", 3712 enums[i].name_off); 3713 return -EINVAL; 3714 } 3715 3716 /* enum member must have a valid name */ 3717 if (!enums[i].name_off || 3718 !btf_name_valid_identifier(btf, enums[i].name_off)) { 3719 btf_verifier_log_type(env, t, "Invalid name"); 3720 return -EINVAL; 3721 } 3722 3723 if (env->log.level == BPF_LOG_KERNEL) 3724 continue; 3725 fmt_str = btf_type_kflag(t) ? "\t%s val=%d\n" : "\t%s val=%u\n"; 3726 btf_verifier_log(env, fmt_str, 3727 __btf_name_by_offset(btf, enums[i].name_off), 3728 enums[i].val); 3729 } 3730 3731 return meta_needed; 3732 } 3733 3734 static void btf_enum_log(struct btf_verifier_env *env, 3735 const struct btf_type *t) 3736 { 3737 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); 3738 } 3739 3740 static void btf_enum_show(const struct btf *btf, const struct btf_type *t, 3741 u32 type_id, void *data, u8 bits_offset, 3742 struct btf_show *show) 3743 { 3744 const struct btf_enum *enums = btf_type_enum(t); 3745 u32 i, nr_enums = btf_type_vlen(t); 3746 void *safe_data; 3747 int v; 3748 3749 safe_data = btf_show_start_type(show, t, type_id, data); 3750 if (!safe_data) 3751 return; 3752 3753 v = *(int *)safe_data; 3754 3755 for (i = 0; i < nr_enums; i++) { 3756 if (v != enums[i].val) 3757 continue; 3758 3759 btf_show_type_value(show, "%s", 3760 __btf_name_by_offset(btf, 3761 enums[i].name_off)); 3762 3763 btf_show_end_type(show); 3764 return; 3765 } 3766 3767 if (btf_type_kflag(t)) 3768 btf_show_type_value(show, "%d", v); 3769 else 3770 btf_show_type_value(show, "%u", v); 3771 btf_show_end_type(show); 3772 } 3773 3774 static struct btf_kind_operations enum_ops = { 3775 .check_meta = btf_enum_check_meta, 3776 .resolve = btf_df_resolve, 3777 .check_member = btf_enum_check_member, 3778 .check_kflag_member = btf_enum_check_kflag_member, 3779 .log_details = btf_enum_log, 3780 .show = btf_enum_show, 3781 }; 3782 3783 static s32 btf_enum64_check_meta(struct btf_verifier_env *env, 3784 const struct btf_type *t, 3785 u32 meta_left) 3786 { 3787 const struct btf_enum64 *enums = btf_type_enum64(t); 3788 struct btf *btf = env->btf; 3789 const char *fmt_str; 3790 u16 i, nr_enums; 3791 u32 meta_needed; 3792 3793 nr_enums = btf_type_vlen(t); 3794 meta_needed = nr_enums * sizeof(*enums); 3795 3796 if (meta_left < meta_needed) { 3797 btf_verifier_log_basic(env, t, 3798 "meta_left:%u meta_needed:%u", 3799 meta_left, meta_needed); 3800 return -EINVAL; 3801 } 3802 3803 if (t->size > 8 || !is_power_of_2(t->size)) { 3804 btf_verifier_log_type(env, t, "Unexpected size"); 3805 return -EINVAL; 3806 } 3807 3808 /* enum type either no name or a valid one */ 3809 if (t->name_off && 3810 !btf_name_valid_identifier(env->btf, t->name_off)) { 3811 btf_verifier_log_type(env, t, "Invalid name"); 3812 return -EINVAL; 3813 } 3814 3815 btf_verifier_log_type(env, t, NULL); 3816 3817 for (i = 0; i < nr_enums; i++) { 3818 if (!btf_name_offset_valid(btf, enums[i].name_off)) { 3819 btf_verifier_log(env, "\tInvalid name_offset:%u", 3820 enums[i].name_off); 3821 return -EINVAL; 3822 } 3823 3824 /* enum member must have a valid name */ 3825 if (!enums[i].name_off || 3826 !btf_name_valid_identifier(btf, enums[i].name_off)) { 3827 btf_verifier_log_type(env, t, "Invalid name"); 3828 return -EINVAL; 3829 } 3830 3831 if (env->log.level == BPF_LOG_KERNEL) 3832 continue; 3833 3834 fmt_str = btf_type_kflag(t) ? "\t%s val=%lld\n" : "\t%s val=%llu\n"; 3835 btf_verifier_log(env, fmt_str, 3836 __btf_name_by_offset(btf, enums[i].name_off), 3837 btf_enum64_value(enums + i)); 3838 } 3839 3840 return meta_needed; 3841 } 3842 3843 static void btf_enum64_show(const struct btf *btf, const struct btf_type *t, 3844 u32 type_id, void *data, u8 bits_offset, 3845 struct btf_show *show) 3846 { 3847 const struct btf_enum64 *enums = btf_type_enum64(t); 3848 u32 i, nr_enums = btf_type_vlen(t); 3849 void *safe_data; 3850 s64 v; 3851 3852 safe_data = btf_show_start_type(show, t, type_id, data); 3853 if (!safe_data) 3854 return; 3855 3856 v = *(u64 *)safe_data; 3857 3858 for (i = 0; i < nr_enums; i++) { 3859 if (v != btf_enum64_value(enums + i)) 3860 continue; 3861 3862 btf_show_type_value(show, "%s", 3863 __btf_name_by_offset(btf, 3864 enums[i].name_off)); 3865 3866 btf_show_end_type(show); 3867 return; 3868 } 3869 3870 if (btf_type_kflag(t)) 3871 btf_show_type_value(show, "%lld", v); 3872 else 3873 btf_show_type_value(show, "%llu", v); 3874 btf_show_end_type(show); 3875 } 3876 3877 static struct btf_kind_operations enum64_ops = { 3878 .check_meta = btf_enum64_check_meta, 3879 .resolve = btf_df_resolve, 3880 .check_member = btf_enum_check_member, 3881 .check_kflag_member = btf_enum_check_kflag_member, 3882 .log_details = btf_enum_log, 3883 .show = btf_enum64_show, 3884 }; 3885 3886 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env, 3887 const struct btf_type *t, 3888 u32 meta_left) 3889 { 3890 u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param); 3891 3892 if (meta_left < meta_needed) { 3893 btf_verifier_log_basic(env, t, 3894 "meta_left:%u meta_needed:%u", 3895 meta_left, meta_needed); 3896 return -EINVAL; 3897 } 3898 3899 if (t->name_off) { 3900 btf_verifier_log_type(env, t, "Invalid name"); 3901 return -EINVAL; 3902 } 3903 3904 if (btf_type_kflag(t)) { 3905 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 3906 return -EINVAL; 3907 } 3908 3909 btf_verifier_log_type(env, t, NULL); 3910 3911 return meta_needed; 3912 } 3913 3914 static void btf_func_proto_log(struct btf_verifier_env *env, 3915 const struct btf_type *t) 3916 { 3917 const struct btf_param *args = (const struct btf_param *)(t + 1); 3918 u16 nr_args = btf_type_vlen(t), i; 3919 3920 btf_verifier_log(env, "return=%u args=(", t->type); 3921 if (!nr_args) { 3922 btf_verifier_log(env, "void"); 3923 goto done; 3924 } 3925 3926 if (nr_args == 1 && !args[0].type) { 3927 /* Only one vararg */ 3928 btf_verifier_log(env, "vararg"); 3929 goto done; 3930 } 3931 3932 btf_verifier_log(env, "%u %s", args[0].type, 3933 __btf_name_by_offset(env->btf, 3934 args[0].name_off)); 3935 for (i = 1; i < nr_args - 1; i++) 3936 btf_verifier_log(env, ", %u %s", args[i].type, 3937 __btf_name_by_offset(env->btf, 3938 args[i].name_off)); 3939 3940 if (nr_args > 1) { 3941 const struct btf_param *last_arg = &args[nr_args - 1]; 3942 3943 if (last_arg->type) 3944 btf_verifier_log(env, ", %u %s", last_arg->type, 3945 __btf_name_by_offset(env->btf, 3946 last_arg->name_off)); 3947 else 3948 btf_verifier_log(env, ", vararg"); 3949 } 3950 3951 done: 3952 btf_verifier_log(env, ")"); 3953 } 3954 3955 static struct btf_kind_operations func_proto_ops = { 3956 .check_meta = btf_func_proto_check_meta, 3957 .resolve = btf_df_resolve, 3958 /* 3959 * BTF_KIND_FUNC_PROTO cannot be directly referred by 3960 * a struct's member. 3961 * 3962 * It should be a function pointer instead. 3963 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO) 3964 * 3965 * Hence, there is no btf_func_check_member(). 3966 */ 3967 .check_member = btf_df_check_member, 3968 .check_kflag_member = btf_df_check_kflag_member, 3969 .log_details = btf_func_proto_log, 3970 .show = btf_df_show, 3971 }; 3972 3973 static s32 btf_func_check_meta(struct btf_verifier_env *env, 3974 const struct btf_type *t, 3975 u32 meta_left) 3976 { 3977 if (!t->name_off || 3978 !btf_name_valid_identifier(env->btf, t->name_off)) { 3979 btf_verifier_log_type(env, t, "Invalid name"); 3980 return -EINVAL; 3981 } 3982 3983 if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) { 3984 btf_verifier_log_type(env, t, "Invalid func linkage"); 3985 return -EINVAL; 3986 } 3987 3988 if (btf_type_kflag(t)) { 3989 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 3990 return -EINVAL; 3991 } 3992 3993 btf_verifier_log_type(env, t, NULL); 3994 3995 return 0; 3996 } 3997 3998 static int btf_func_resolve(struct btf_verifier_env *env, 3999 const struct resolve_vertex *v) 4000 { 4001 const struct btf_type *t = v->t; 4002 u32 next_type_id = t->type; 4003 int err; 4004 4005 err = btf_func_check(env, t); 4006 if (err) 4007 return err; 4008 4009 env_stack_pop_resolved(env, next_type_id, 0); 4010 return 0; 4011 } 4012 4013 static struct btf_kind_operations func_ops = { 4014 .check_meta = btf_func_check_meta, 4015 .resolve = btf_func_resolve, 4016 .check_member = btf_df_check_member, 4017 .check_kflag_member = btf_df_check_kflag_member, 4018 .log_details = btf_ref_type_log, 4019 .show = btf_df_show, 4020 }; 4021 4022 static s32 btf_var_check_meta(struct btf_verifier_env *env, 4023 const struct btf_type *t, 4024 u32 meta_left) 4025 { 4026 const struct btf_var *var; 4027 u32 meta_needed = sizeof(*var); 4028 4029 if (meta_left < meta_needed) { 4030 btf_verifier_log_basic(env, t, 4031 "meta_left:%u meta_needed:%u", 4032 meta_left, meta_needed); 4033 return -EINVAL; 4034 } 4035 4036 if (btf_type_vlen(t)) { 4037 btf_verifier_log_type(env, t, "vlen != 0"); 4038 return -EINVAL; 4039 } 4040 4041 if (btf_type_kflag(t)) { 4042 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 4043 return -EINVAL; 4044 } 4045 4046 if (!t->name_off || 4047 !__btf_name_valid(env->btf, t->name_off, true)) { 4048 btf_verifier_log_type(env, t, "Invalid name"); 4049 return -EINVAL; 4050 } 4051 4052 /* A var cannot be in type void */ 4053 if (!t->type || !BTF_TYPE_ID_VALID(t->type)) { 4054 btf_verifier_log_type(env, t, "Invalid type_id"); 4055 return -EINVAL; 4056 } 4057 4058 var = btf_type_var(t); 4059 if (var->linkage != BTF_VAR_STATIC && 4060 var->linkage != BTF_VAR_GLOBAL_ALLOCATED) { 4061 btf_verifier_log_type(env, t, "Linkage not supported"); 4062 return -EINVAL; 4063 } 4064 4065 btf_verifier_log_type(env, t, NULL); 4066 4067 return meta_needed; 4068 } 4069 4070 static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t) 4071 { 4072 const struct btf_var *var = btf_type_var(t); 4073 4074 btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage); 4075 } 4076 4077 static const struct btf_kind_operations var_ops = { 4078 .check_meta = btf_var_check_meta, 4079 .resolve = btf_var_resolve, 4080 .check_member = btf_df_check_member, 4081 .check_kflag_member = btf_df_check_kflag_member, 4082 .log_details = btf_var_log, 4083 .show = btf_var_show, 4084 }; 4085 4086 static s32 btf_datasec_check_meta(struct btf_verifier_env *env, 4087 const struct btf_type *t, 4088 u32 meta_left) 4089 { 4090 const struct btf_var_secinfo *vsi; 4091 u64 last_vsi_end_off = 0, sum = 0; 4092 u32 i, meta_needed; 4093 4094 meta_needed = btf_type_vlen(t) * sizeof(*vsi); 4095 if (meta_left < meta_needed) { 4096 btf_verifier_log_basic(env, t, 4097 "meta_left:%u meta_needed:%u", 4098 meta_left, meta_needed); 4099 return -EINVAL; 4100 } 4101 4102 if (!t->size) { 4103 btf_verifier_log_type(env, t, "size == 0"); 4104 return -EINVAL; 4105 } 4106 4107 if (btf_type_kflag(t)) { 4108 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 4109 return -EINVAL; 4110 } 4111 4112 if (!t->name_off || 4113 !btf_name_valid_section(env->btf, t->name_off)) { 4114 btf_verifier_log_type(env, t, "Invalid name"); 4115 return -EINVAL; 4116 } 4117 4118 btf_verifier_log_type(env, t, NULL); 4119 4120 for_each_vsi(i, t, vsi) { 4121 /* A var cannot be in type void */ 4122 if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) { 4123 btf_verifier_log_vsi(env, t, vsi, 4124 "Invalid type_id"); 4125 return -EINVAL; 4126 } 4127 4128 if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) { 4129 btf_verifier_log_vsi(env, t, vsi, 4130 "Invalid offset"); 4131 return -EINVAL; 4132 } 4133 4134 if (!vsi->size || vsi->size > t->size) { 4135 btf_verifier_log_vsi(env, t, vsi, 4136 "Invalid size"); 4137 return -EINVAL; 4138 } 4139 4140 last_vsi_end_off = vsi->offset + vsi->size; 4141 if (last_vsi_end_off > t->size) { 4142 btf_verifier_log_vsi(env, t, vsi, 4143 "Invalid offset+size"); 4144 return -EINVAL; 4145 } 4146 4147 btf_verifier_log_vsi(env, t, vsi, NULL); 4148 sum += vsi->size; 4149 } 4150 4151 if (t->size < sum) { 4152 btf_verifier_log_type(env, t, "Invalid btf_info size"); 4153 return -EINVAL; 4154 } 4155 4156 return meta_needed; 4157 } 4158 4159 static int btf_datasec_resolve(struct btf_verifier_env *env, 4160 const struct resolve_vertex *v) 4161 { 4162 const struct btf_var_secinfo *vsi; 4163 struct btf *btf = env->btf; 4164 u16 i; 4165 4166 for_each_vsi_from(i, v->next_member, v->t, vsi) { 4167 u32 var_type_id = vsi->type, type_id, type_size = 0; 4168 const struct btf_type *var_type = btf_type_by_id(env->btf, 4169 var_type_id); 4170 if (!var_type || !btf_type_is_var(var_type)) { 4171 btf_verifier_log_vsi(env, v->t, vsi, 4172 "Not a VAR kind member"); 4173 return -EINVAL; 4174 } 4175 4176 if (!env_type_is_resolve_sink(env, var_type) && 4177 !env_type_is_resolved(env, var_type_id)) { 4178 env_stack_set_next_member(env, i + 1); 4179 return env_stack_push(env, var_type, var_type_id); 4180 } 4181 4182 type_id = var_type->type; 4183 if (!btf_type_id_size(btf, &type_id, &type_size)) { 4184 btf_verifier_log_vsi(env, v->t, vsi, "Invalid type"); 4185 return -EINVAL; 4186 } 4187 4188 if (vsi->size < type_size) { 4189 btf_verifier_log_vsi(env, v->t, vsi, "Invalid size"); 4190 return -EINVAL; 4191 } 4192 } 4193 4194 env_stack_pop_resolved(env, 0, 0); 4195 return 0; 4196 } 4197 4198 static void btf_datasec_log(struct btf_verifier_env *env, 4199 const struct btf_type *t) 4200 { 4201 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); 4202 } 4203 4204 static void btf_datasec_show(const struct btf *btf, 4205 const struct btf_type *t, u32 type_id, 4206 void *data, u8 bits_offset, 4207 struct btf_show *show) 4208 { 4209 const struct btf_var_secinfo *vsi; 4210 const struct btf_type *var; 4211 u32 i; 4212 4213 if (!btf_show_start_type(show, t, type_id, data)) 4214 return; 4215 4216 btf_show_type_value(show, "section (\"%s\") = {", 4217 __btf_name_by_offset(btf, t->name_off)); 4218 for_each_vsi(i, t, vsi) { 4219 var = btf_type_by_id(btf, vsi->type); 4220 if (i) 4221 btf_show(show, ","); 4222 btf_type_ops(var)->show(btf, var, vsi->type, 4223 data + vsi->offset, bits_offset, show); 4224 } 4225 btf_show_end_type(show); 4226 } 4227 4228 static const struct btf_kind_operations datasec_ops = { 4229 .check_meta = btf_datasec_check_meta, 4230 .resolve = btf_datasec_resolve, 4231 .check_member = btf_df_check_member, 4232 .check_kflag_member = btf_df_check_kflag_member, 4233 .log_details = btf_datasec_log, 4234 .show = btf_datasec_show, 4235 }; 4236 4237 static s32 btf_float_check_meta(struct btf_verifier_env *env, 4238 const struct btf_type *t, 4239 u32 meta_left) 4240 { 4241 if (btf_type_vlen(t)) { 4242 btf_verifier_log_type(env, t, "vlen != 0"); 4243 return -EINVAL; 4244 } 4245 4246 if (btf_type_kflag(t)) { 4247 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 4248 return -EINVAL; 4249 } 4250 4251 if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 && 4252 t->size != 16) { 4253 btf_verifier_log_type(env, t, "Invalid type_size"); 4254 return -EINVAL; 4255 } 4256 4257 btf_verifier_log_type(env, t, NULL); 4258 4259 return 0; 4260 } 4261 4262 static int btf_float_check_member(struct btf_verifier_env *env, 4263 const struct btf_type *struct_type, 4264 const struct btf_member *member, 4265 const struct btf_type *member_type) 4266 { 4267 u64 start_offset_bytes; 4268 u64 end_offset_bytes; 4269 u64 misalign_bits; 4270 u64 align_bytes; 4271 u64 align_bits; 4272 4273 /* Different architectures have different alignment requirements, so 4274 * here we check only for the reasonable minimum. This way we ensure 4275 * that types after CO-RE can pass the kernel BTF verifier. 4276 */ 4277 align_bytes = min_t(u64, sizeof(void *), member_type->size); 4278 align_bits = align_bytes * BITS_PER_BYTE; 4279 div64_u64_rem(member->offset, align_bits, &misalign_bits); 4280 if (misalign_bits) { 4281 btf_verifier_log_member(env, struct_type, member, 4282 "Member is not properly aligned"); 4283 return -EINVAL; 4284 } 4285 4286 start_offset_bytes = member->offset / BITS_PER_BYTE; 4287 end_offset_bytes = start_offset_bytes + member_type->size; 4288 if (end_offset_bytes > struct_type->size) { 4289 btf_verifier_log_member(env, struct_type, member, 4290 "Member exceeds struct_size"); 4291 return -EINVAL; 4292 } 4293 4294 return 0; 4295 } 4296 4297 static void btf_float_log(struct btf_verifier_env *env, 4298 const struct btf_type *t) 4299 { 4300 btf_verifier_log(env, "size=%u", t->size); 4301 } 4302 4303 static const struct btf_kind_operations float_ops = { 4304 .check_meta = btf_float_check_meta, 4305 .resolve = btf_df_resolve, 4306 .check_member = btf_float_check_member, 4307 .check_kflag_member = btf_generic_check_kflag_member, 4308 .log_details = btf_float_log, 4309 .show = btf_df_show, 4310 }; 4311 4312 static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env, 4313 const struct btf_type *t, 4314 u32 meta_left) 4315 { 4316 const struct btf_decl_tag *tag; 4317 u32 meta_needed = sizeof(*tag); 4318 s32 component_idx; 4319 const char *value; 4320 4321 if (meta_left < meta_needed) { 4322 btf_verifier_log_basic(env, t, 4323 "meta_left:%u meta_needed:%u", 4324 meta_left, meta_needed); 4325 return -EINVAL; 4326 } 4327 4328 value = btf_name_by_offset(env->btf, t->name_off); 4329 if (!value || !value[0]) { 4330 btf_verifier_log_type(env, t, "Invalid value"); 4331 return -EINVAL; 4332 } 4333 4334 if (btf_type_vlen(t)) { 4335 btf_verifier_log_type(env, t, "vlen != 0"); 4336 return -EINVAL; 4337 } 4338 4339 if (btf_type_kflag(t)) { 4340 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 4341 return -EINVAL; 4342 } 4343 4344 component_idx = btf_type_decl_tag(t)->component_idx; 4345 if (component_idx < -1) { 4346 btf_verifier_log_type(env, t, "Invalid component_idx"); 4347 return -EINVAL; 4348 } 4349 4350 btf_verifier_log_type(env, t, NULL); 4351 4352 return meta_needed; 4353 } 4354 4355 static int btf_decl_tag_resolve(struct btf_verifier_env *env, 4356 const struct resolve_vertex *v) 4357 { 4358 const struct btf_type *next_type; 4359 const struct btf_type *t = v->t; 4360 u32 next_type_id = t->type; 4361 struct btf *btf = env->btf; 4362 s32 component_idx; 4363 u32 vlen; 4364 4365 next_type = btf_type_by_id(btf, next_type_id); 4366 if (!next_type || !btf_type_is_decl_tag_target(next_type)) { 4367 btf_verifier_log_type(env, v->t, "Invalid type_id"); 4368 return -EINVAL; 4369 } 4370 4371 if (!env_type_is_resolve_sink(env, next_type) && 4372 !env_type_is_resolved(env, next_type_id)) 4373 return env_stack_push(env, next_type, next_type_id); 4374 4375 component_idx = btf_type_decl_tag(t)->component_idx; 4376 if (component_idx != -1) { 4377 if (btf_type_is_var(next_type) || btf_type_is_typedef(next_type)) { 4378 btf_verifier_log_type(env, v->t, "Invalid component_idx"); 4379 return -EINVAL; 4380 } 4381 4382 if (btf_type_is_struct(next_type)) { 4383 vlen = btf_type_vlen(next_type); 4384 } else { 4385 /* next_type should be a function */ 4386 next_type = btf_type_by_id(btf, next_type->type); 4387 vlen = btf_type_vlen(next_type); 4388 } 4389 4390 if ((u32)component_idx >= vlen) { 4391 btf_verifier_log_type(env, v->t, "Invalid component_idx"); 4392 return -EINVAL; 4393 } 4394 } 4395 4396 env_stack_pop_resolved(env, next_type_id, 0); 4397 4398 return 0; 4399 } 4400 4401 static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t) 4402 { 4403 btf_verifier_log(env, "type=%u component_idx=%d", t->type, 4404 btf_type_decl_tag(t)->component_idx); 4405 } 4406 4407 static const struct btf_kind_operations decl_tag_ops = { 4408 .check_meta = btf_decl_tag_check_meta, 4409 .resolve = btf_decl_tag_resolve, 4410 .check_member = btf_df_check_member, 4411 .check_kflag_member = btf_df_check_kflag_member, 4412 .log_details = btf_decl_tag_log, 4413 .show = btf_df_show, 4414 }; 4415 4416 static int btf_func_proto_check(struct btf_verifier_env *env, 4417 const struct btf_type *t) 4418 { 4419 const struct btf_type *ret_type; 4420 const struct btf_param *args; 4421 const struct btf *btf; 4422 u16 nr_args, i; 4423 int err; 4424 4425 btf = env->btf; 4426 args = (const struct btf_param *)(t + 1); 4427 nr_args = btf_type_vlen(t); 4428 4429 /* Check func return type which could be "void" (t->type == 0) */ 4430 if (t->type) { 4431 u32 ret_type_id = t->type; 4432 4433 ret_type = btf_type_by_id(btf, ret_type_id); 4434 if (!ret_type) { 4435 btf_verifier_log_type(env, t, "Invalid return type"); 4436 return -EINVAL; 4437 } 4438 4439 if (btf_type_is_resolve_source_only(ret_type)) { 4440 btf_verifier_log_type(env, t, "Invalid return type"); 4441 return -EINVAL; 4442 } 4443 4444 if (btf_type_needs_resolve(ret_type) && 4445 !env_type_is_resolved(env, ret_type_id)) { 4446 err = btf_resolve(env, ret_type, ret_type_id); 4447 if (err) 4448 return err; 4449 } 4450 4451 /* Ensure the return type is a type that has a size */ 4452 if (!btf_type_id_size(btf, &ret_type_id, NULL)) { 4453 btf_verifier_log_type(env, t, "Invalid return type"); 4454 return -EINVAL; 4455 } 4456 } 4457 4458 if (!nr_args) 4459 return 0; 4460 4461 /* Last func arg type_id could be 0 if it is a vararg */ 4462 if (!args[nr_args - 1].type) { 4463 if (args[nr_args - 1].name_off) { 4464 btf_verifier_log_type(env, t, "Invalid arg#%u", 4465 nr_args); 4466 return -EINVAL; 4467 } 4468 nr_args--; 4469 } 4470 4471 err = 0; 4472 for (i = 0; i < nr_args; i++) { 4473 const struct btf_type *arg_type; 4474 u32 arg_type_id; 4475 4476 arg_type_id = args[i].type; 4477 arg_type = btf_type_by_id(btf, arg_type_id); 4478 if (!arg_type) { 4479 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); 4480 err = -EINVAL; 4481 break; 4482 } 4483 4484 if (args[i].name_off && 4485 (!btf_name_offset_valid(btf, args[i].name_off) || 4486 !btf_name_valid_identifier(btf, args[i].name_off))) { 4487 btf_verifier_log_type(env, t, 4488 "Invalid arg#%u", i + 1); 4489 err = -EINVAL; 4490 break; 4491 } 4492 4493 if (btf_type_needs_resolve(arg_type) && 4494 !env_type_is_resolved(env, arg_type_id)) { 4495 err = btf_resolve(env, arg_type, arg_type_id); 4496 if (err) 4497 break; 4498 } 4499 4500 if (!btf_type_id_size(btf, &arg_type_id, NULL)) { 4501 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); 4502 err = -EINVAL; 4503 break; 4504 } 4505 } 4506 4507 return err; 4508 } 4509 4510 static int btf_func_check(struct btf_verifier_env *env, 4511 const struct btf_type *t) 4512 { 4513 const struct btf_type *proto_type; 4514 const struct btf_param *args; 4515 const struct btf *btf; 4516 u16 nr_args, i; 4517 4518 btf = env->btf; 4519 proto_type = btf_type_by_id(btf, t->type); 4520 4521 if (!proto_type || !btf_type_is_func_proto(proto_type)) { 4522 btf_verifier_log_type(env, t, "Invalid type_id"); 4523 return -EINVAL; 4524 } 4525 4526 args = (const struct btf_param *)(proto_type + 1); 4527 nr_args = btf_type_vlen(proto_type); 4528 for (i = 0; i < nr_args; i++) { 4529 if (!args[i].name_off && args[i].type) { 4530 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); 4531 return -EINVAL; 4532 } 4533 } 4534 4535 return 0; 4536 } 4537 4538 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = { 4539 [BTF_KIND_INT] = &int_ops, 4540 [BTF_KIND_PTR] = &ptr_ops, 4541 [BTF_KIND_ARRAY] = &array_ops, 4542 [BTF_KIND_STRUCT] = &struct_ops, 4543 [BTF_KIND_UNION] = &struct_ops, 4544 [BTF_KIND_ENUM] = &enum_ops, 4545 [BTF_KIND_FWD] = &fwd_ops, 4546 [BTF_KIND_TYPEDEF] = &modifier_ops, 4547 [BTF_KIND_VOLATILE] = &modifier_ops, 4548 [BTF_KIND_CONST] = &modifier_ops, 4549 [BTF_KIND_RESTRICT] = &modifier_ops, 4550 [BTF_KIND_FUNC] = &func_ops, 4551 [BTF_KIND_FUNC_PROTO] = &func_proto_ops, 4552 [BTF_KIND_VAR] = &var_ops, 4553 [BTF_KIND_DATASEC] = &datasec_ops, 4554 [BTF_KIND_FLOAT] = &float_ops, 4555 [BTF_KIND_DECL_TAG] = &decl_tag_ops, 4556 [BTF_KIND_TYPE_TAG] = &modifier_ops, 4557 [BTF_KIND_ENUM64] = &enum64_ops, 4558 }; 4559 4560 static s32 btf_check_meta(struct btf_verifier_env *env, 4561 const struct btf_type *t, 4562 u32 meta_left) 4563 { 4564 u32 saved_meta_left = meta_left; 4565 s32 var_meta_size; 4566 4567 if (meta_left < sizeof(*t)) { 4568 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu", 4569 env->log_type_id, meta_left, sizeof(*t)); 4570 return -EINVAL; 4571 } 4572 meta_left -= sizeof(*t); 4573 4574 if (t->info & ~BTF_INFO_MASK) { 4575 btf_verifier_log(env, "[%u] Invalid btf_info:%x", 4576 env->log_type_id, t->info); 4577 return -EINVAL; 4578 } 4579 4580 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX || 4581 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) { 4582 btf_verifier_log(env, "[%u] Invalid kind:%u", 4583 env->log_type_id, BTF_INFO_KIND(t->info)); 4584 return -EINVAL; 4585 } 4586 4587 if (!btf_name_offset_valid(env->btf, t->name_off)) { 4588 btf_verifier_log(env, "[%u] Invalid name_offset:%u", 4589 env->log_type_id, t->name_off); 4590 return -EINVAL; 4591 } 4592 4593 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left); 4594 if (var_meta_size < 0) 4595 return var_meta_size; 4596 4597 meta_left -= var_meta_size; 4598 4599 return saved_meta_left - meta_left; 4600 } 4601 4602 static int btf_check_all_metas(struct btf_verifier_env *env) 4603 { 4604 struct btf *btf = env->btf; 4605 struct btf_header *hdr; 4606 void *cur, *end; 4607 4608 hdr = &btf->hdr; 4609 cur = btf->nohdr_data + hdr->type_off; 4610 end = cur + hdr->type_len; 4611 4612 env->log_type_id = btf->base_btf ? btf->start_id : 1; 4613 while (cur < end) { 4614 struct btf_type *t = cur; 4615 s32 meta_size; 4616 4617 meta_size = btf_check_meta(env, t, end - cur); 4618 if (meta_size < 0) 4619 return meta_size; 4620 4621 btf_add_type(env, t); 4622 cur += meta_size; 4623 env->log_type_id++; 4624 } 4625 4626 return 0; 4627 } 4628 4629 static bool btf_resolve_valid(struct btf_verifier_env *env, 4630 const struct btf_type *t, 4631 u32 type_id) 4632 { 4633 struct btf *btf = env->btf; 4634 4635 if (!env_type_is_resolved(env, type_id)) 4636 return false; 4637 4638 if (btf_type_is_struct(t) || btf_type_is_datasec(t)) 4639 return !btf_resolved_type_id(btf, type_id) && 4640 !btf_resolved_type_size(btf, type_id); 4641 4642 if (btf_type_is_decl_tag(t) || btf_type_is_func(t)) 4643 return btf_resolved_type_id(btf, type_id) && 4644 !btf_resolved_type_size(btf, type_id); 4645 4646 if (btf_type_is_modifier(t) || btf_type_is_ptr(t) || 4647 btf_type_is_var(t)) { 4648 t = btf_type_id_resolve(btf, &type_id); 4649 return t && 4650 !btf_type_is_modifier(t) && 4651 !btf_type_is_var(t) && 4652 !btf_type_is_datasec(t); 4653 } 4654 4655 if (btf_type_is_array(t)) { 4656 const struct btf_array *array = btf_type_array(t); 4657 const struct btf_type *elem_type; 4658 u32 elem_type_id = array->type; 4659 u32 elem_size; 4660 4661 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); 4662 return elem_type && !btf_type_is_modifier(elem_type) && 4663 (array->nelems * elem_size == 4664 btf_resolved_type_size(btf, type_id)); 4665 } 4666 4667 return false; 4668 } 4669 4670 static int btf_resolve(struct btf_verifier_env *env, 4671 const struct btf_type *t, u32 type_id) 4672 { 4673 u32 save_log_type_id = env->log_type_id; 4674 const struct resolve_vertex *v; 4675 int err = 0; 4676 4677 env->resolve_mode = RESOLVE_TBD; 4678 env_stack_push(env, t, type_id); 4679 while (!err && (v = env_stack_peak(env))) { 4680 env->log_type_id = v->type_id; 4681 err = btf_type_ops(v->t)->resolve(env, v); 4682 } 4683 4684 env->log_type_id = type_id; 4685 if (err == -E2BIG) { 4686 btf_verifier_log_type(env, t, 4687 "Exceeded max resolving depth:%u", 4688 MAX_RESOLVE_DEPTH); 4689 } else if (err == -EEXIST) { 4690 btf_verifier_log_type(env, t, "Loop detected"); 4691 } 4692 4693 /* Final sanity check */ 4694 if (!err && !btf_resolve_valid(env, t, type_id)) { 4695 btf_verifier_log_type(env, t, "Invalid resolve state"); 4696 err = -EINVAL; 4697 } 4698 4699 env->log_type_id = save_log_type_id; 4700 return err; 4701 } 4702 4703 static int btf_check_all_types(struct btf_verifier_env *env) 4704 { 4705 struct btf *btf = env->btf; 4706 const struct btf_type *t; 4707 u32 type_id, i; 4708 int err; 4709 4710 err = env_resolve_init(env); 4711 if (err) 4712 return err; 4713 4714 env->phase++; 4715 for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) { 4716 type_id = btf->start_id + i; 4717 t = btf_type_by_id(btf, type_id); 4718 4719 env->log_type_id = type_id; 4720 if (btf_type_needs_resolve(t) && 4721 !env_type_is_resolved(env, type_id)) { 4722 err = btf_resolve(env, t, type_id); 4723 if (err) 4724 return err; 4725 } 4726 4727 if (btf_type_is_func_proto(t)) { 4728 err = btf_func_proto_check(env, t); 4729 if (err) 4730 return err; 4731 } 4732 } 4733 4734 return 0; 4735 } 4736 4737 static int btf_parse_type_sec(struct btf_verifier_env *env) 4738 { 4739 const struct btf_header *hdr = &env->btf->hdr; 4740 int err; 4741 4742 /* Type section must align to 4 bytes */ 4743 if (hdr->type_off & (sizeof(u32) - 1)) { 4744 btf_verifier_log(env, "Unaligned type_off"); 4745 return -EINVAL; 4746 } 4747 4748 if (!env->btf->base_btf && !hdr->type_len) { 4749 btf_verifier_log(env, "No type found"); 4750 return -EINVAL; 4751 } 4752 4753 err = btf_check_all_metas(env); 4754 if (err) 4755 return err; 4756 4757 return btf_check_all_types(env); 4758 } 4759 4760 static int btf_parse_str_sec(struct btf_verifier_env *env) 4761 { 4762 const struct btf_header *hdr; 4763 struct btf *btf = env->btf; 4764 const char *start, *end; 4765 4766 hdr = &btf->hdr; 4767 start = btf->nohdr_data + hdr->str_off; 4768 end = start + hdr->str_len; 4769 4770 if (end != btf->data + btf->data_size) { 4771 btf_verifier_log(env, "String section is not at the end"); 4772 return -EINVAL; 4773 } 4774 4775 btf->strings = start; 4776 4777 if (btf->base_btf && !hdr->str_len) 4778 return 0; 4779 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) { 4780 btf_verifier_log(env, "Invalid string section"); 4781 return -EINVAL; 4782 } 4783 if (!btf->base_btf && start[0]) { 4784 btf_verifier_log(env, "Invalid string section"); 4785 return -EINVAL; 4786 } 4787 4788 return 0; 4789 } 4790 4791 static const size_t btf_sec_info_offset[] = { 4792 offsetof(struct btf_header, type_off), 4793 offsetof(struct btf_header, str_off), 4794 }; 4795 4796 static int btf_sec_info_cmp(const void *a, const void *b) 4797 { 4798 const struct btf_sec_info *x = a; 4799 const struct btf_sec_info *y = b; 4800 4801 return (int)(x->off - y->off) ? : (int)(x->len - y->len); 4802 } 4803 4804 static int btf_check_sec_info(struct btf_verifier_env *env, 4805 u32 btf_data_size) 4806 { 4807 struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)]; 4808 u32 total, expected_total, i; 4809 const struct btf_header *hdr; 4810 const struct btf *btf; 4811 4812 btf = env->btf; 4813 hdr = &btf->hdr; 4814 4815 /* Populate the secs from hdr */ 4816 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) 4817 secs[i] = *(struct btf_sec_info *)((void *)hdr + 4818 btf_sec_info_offset[i]); 4819 4820 sort(secs, ARRAY_SIZE(btf_sec_info_offset), 4821 sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL); 4822 4823 /* Check for gaps and overlap among sections */ 4824 total = 0; 4825 expected_total = btf_data_size - hdr->hdr_len; 4826 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) { 4827 if (expected_total < secs[i].off) { 4828 btf_verifier_log(env, "Invalid section offset"); 4829 return -EINVAL; 4830 } 4831 if (total < secs[i].off) { 4832 /* gap */ 4833 btf_verifier_log(env, "Unsupported section found"); 4834 return -EINVAL; 4835 } 4836 if (total > secs[i].off) { 4837 btf_verifier_log(env, "Section overlap found"); 4838 return -EINVAL; 4839 } 4840 if (expected_total - total < secs[i].len) { 4841 btf_verifier_log(env, 4842 "Total section length too long"); 4843 return -EINVAL; 4844 } 4845 total += secs[i].len; 4846 } 4847 4848 /* There is data other than hdr and known sections */ 4849 if (expected_total != total) { 4850 btf_verifier_log(env, "Unsupported section found"); 4851 return -EINVAL; 4852 } 4853 4854 return 0; 4855 } 4856 4857 static int btf_parse_hdr(struct btf_verifier_env *env) 4858 { 4859 u32 hdr_len, hdr_copy, btf_data_size; 4860 const struct btf_header *hdr; 4861 struct btf *btf; 4862 4863 btf = env->btf; 4864 btf_data_size = btf->data_size; 4865 4866 if (btf_data_size < offsetofend(struct btf_header, hdr_len)) { 4867 btf_verifier_log(env, "hdr_len not found"); 4868 return -EINVAL; 4869 } 4870 4871 hdr = btf->data; 4872 hdr_len = hdr->hdr_len; 4873 if (btf_data_size < hdr_len) { 4874 btf_verifier_log(env, "btf_header not found"); 4875 return -EINVAL; 4876 } 4877 4878 /* Ensure the unsupported header fields are zero */ 4879 if (hdr_len > sizeof(btf->hdr)) { 4880 u8 *expected_zero = btf->data + sizeof(btf->hdr); 4881 u8 *end = btf->data + hdr_len; 4882 4883 for (; expected_zero < end; expected_zero++) { 4884 if (*expected_zero) { 4885 btf_verifier_log(env, "Unsupported btf_header"); 4886 return -E2BIG; 4887 } 4888 } 4889 } 4890 4891 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr)); 4892 memcpy(&btf->hdr, btf->data, hdr_copy); 4893 4894 hdr = &btf->hdr; 4895 4896 btf_verifier_log_hdr(env, btf_data_size); 4897 4898 if (hdr->magic != BTF_MAGIC) { 4899 btf_verifier_log(env, "Invalid magic"); 4900 return -EINVAL; 4901 } 4902 4903 if (hdr->version != BTF_VERSION) { 4904 btf_verifier_log(env, "Unsupported version"); 4905 return -ENOTSUPP; 4906 } 4907 4908 if (hdr->flags) { 4909 btf_verifier_log(env, "Unsupported flags"); 4910 return -ENOTSUPP; 4911 } 4912 4913 if (!btf->base_btf && btf_data_size == hdr->hdr_len) { 4914 btf_verifier_log(env, "No data"); 4915 return -EINVAL; 4916 } 4917 4918 return btf_check_sec_info(env, btf_data_size); 4919 } 4920 4921 static int btf_check_type_tags(struct btf_verifier_env *env, 4922 struct btf *btf, int start_id) 4923 { 4924 int i, n, good_id = start_id - 1; 4925 bool in_tags; 4926 4927 n = btf_nr_types(btf); 4928 for (i = start_id; i < n; i++) { 4929 const struct btf_type *t; 4930 int chain_limit = 32; 4931 u32 cur_id = i; 4932 4933 t = btf_type_by_id(btf, i); 4934 if (!t) 4935 return -EINVAL; 4936 if (!btf_type_is_modifier(t)) 4937 continue; 4938 4939 cond_resched(); 4940 4941 in_tags = btf_type_is_type_tag(t); 4942 while (btf_type_is_modifier(t)) { 4943 if (!chain_limit--) { 4944 btf_verifier_log(env, "Max chain length or cycle detected"); 4945 return -ELOOP; 4946 } 4947 if (btf_type_is_type_tag(t)) { 4948 if (!in_tags) { 4949 btf_verifier_log(env, "Type tags don't precede modifiers"); 4950 return -EINVAL; 4951 } 4952 } else if (in_tags) { 4953 in_tags = false; 4954 } 4955 if (cur_id <= good_id) 4956 break; 4957 /* Move to next type */ 4958 cur_id = t->type; 4959 t = btf_type_by_id(btf, cur_id); 4960 if (!t) 4961 return -EINVAL; 4962 } 4963 good_id = i; 4964 } 4965 return 0; 4966 } 4967 4968 static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size, 4969 u32 log_level, char __user *log_ubuf, u32 log_size) 4970 { 4971 struct btf_verifier_env *env = NULL; 4972 struct bpf_verifier_log *log; 4973 struct btf *btf = NULL; 4974 u8 *data; 4975 int err; 4976 4977 if (btf_data_size > BTF_MAX_SIZE) 4978 return ERR_PTR(-E2BIG); 4979 4980 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); 4981 if (!env) 4982 return ERR_PTR(-ENOMEM); 4983 4984 log = &env->log; 4985 if (log_level || log_ubuf || log_size) { 4986 /* user requested verbose verifier output 4987 * and supplied buffer to store the verification trace 4988 */ 4989 log->level = log_level; 4990 log->ubuf = log_ubuf; 4991 log->len_total = log_size; 4992 4993 /* log attributes have to be sane */ 4994 if (!bpf_verifier_log_attr_valid(log)) { 4995 err = -EINVAL; 4996 goto errout; 4997 } 4998 } 4999 5000 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); 5001 if (!btf) { 5002 err = -ENOMEM; 5003 goto errout; 5004 } 5005 env->btf = btf; 5006 5007 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN); 5008 if (!data) { 5009 err = -ENOMEM; 5010 goto errout; 5011 } 5012 5013 btf->data = data; 5014 btf->data_size = btf_data_size; 5015 5016 if (copy_from_bpfptr(data, btf_data, btf_data_size)) { 5017 err = -EFAULT; 5018 goto errout; 5019 } 5020 5021 err = btf_parse_hdr(env); 5022 if (err) 5023 goto errout; 5024 5025 btf->nohdr_data = btf->data + btf->hdr.hdr_len; 5026 5027 err = btf_parse_str_sec(env); 5028 if (err) 5029 goto errout; 5030 5031 err = btf_parse_type_sec(env); 5032 if (err) 5033 goto errout; 5034 5035 err = btf_check_type_tags(env, btf, 1); 5036 if (err) 5037 goto errout; 5038 5039 if (log->level && bpf_verifier_log_full(log)) { 5040 err = -ENOSPC; 5041 goto errout; 5042 } 5043 5044 btf_verifier_env_free(env); 5045 refcount_set(&btf->refcnt, 1); 5046 return btf; 5047 5048 errout: 5049 btf_verifier_env_free(env); 5050 if (btf) 5051 btf_free(btf); 5052 return ERR_PTR(err); 5053 } 5054 5055 extern char __weak __start_BTF[]; 5056 extern char __weak __stop_BTF[]; 5057 extern struct btf *btf_vmlinux; 5058 5059 #define BPF_MAP_TYPE(_id, _ops) 5060 #define BPF_LINK_TYPE(_id, _name) 5061 static union { 5062 struct bpf_ctx_convert { 5063 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 5064 prog_ctx_type _id##_prog; \ 5065 kern_ctx_type _id##_kern; 5066 #include <linux/bpf_types.h> 5067 #undef BPF_PROG_TYPE 5068 } *__t; 5069 /* 't' is written once under lock. Read many times. */ 5070 const struct btf_type *t; 5071 } bpf_ctx_convert; 5072 enum { 5073 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 5074 __ctx_convert##_id, 5075 #include <linux/bpf_types.h> 5076 #undef BPF_PROG_TYPE 5077 __ctx_convert_unused, /* to avoid empty enum in extreme .config */ 5078 }; 5079 static u8 bpf_ctx_convert_map[] = { 5080 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 5081 [_id] = __ctx_convert##_id, 5082 #include <linux/bpf_types.h> 5083 #undef BPF_PROG_TYPE 5084 0, /* avoid empty array */ 5085 }; 5086 #undef BPF_MAP_TYPE 5087 #undef BPF_LINK_TYPE 5088 5089 static const struct btf_member * 5090 btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf, 5091 const struct btf_type *t, enum bpf_prog_type prog_type, 5092 int arg) 5093 { 5094 const struct btf_type *conv_struct; 5095 const struct btf_type *ctx_struct; 5096 const struct btf_member *ctx_type; 5097 const char *tname, *ctx_tname; 5098 5099 conv_struct = bpf_ctx_convert.t; 5100 if (!conv_struct) { 5101 bpf_log(log, "btf_vmlinux is malformed\n"); 5102 return NULL; 5103 } 5104 t = btf_type_by_id(btf, t->type); 5105 while (btf_type_is_modifier(t)) 5106 t = btf_type_by_id(btf, t->type); 5107 if (!btf_type_is_struct(t)) { 5108 /* Only pointer to struct is supported for now. 5109 * That means that BPF_PROG_TYPE_TRACEPOINT with BTF 5110 * is not supported yet. 5111 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine. 5112 */ 5113 return NULL; 5114 } 5115 tname = btf_name_by_offset(btf, t->name_off); 5116 if (!tname) { 5117 bpf_log(log, "arg#%d struct doesn't have a name\n", arg); 5118 return NULL; 5119 } 5120 /* prog_type is valid bpf program type. No need for bounds check. */ 5121 ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2; 5122 /* ctx_struct is a pointer to prog_ctx_type in vmlinux. 5123 * Like 'struct __sk_buff' 5124 */ 5125 ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type); 5126 if (!ctx_struct) 5127 /* should not happen */ 5128 return NULL; 5129 ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off); 5130 if (!ctx_tname) { 5131 /* should not happen */ 5132 bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n"); 5133 return NULL; 5134 } 5135 /* only compare that prog's ctx type name is the same as 5136 * kernel expects. No need to compare field by field. 5137 * It's ok for bpf prog to do: 5138 * struct __sk_buff {}; 5139 * int socket_filter_bpf_prog(struct __sk_buff *skb) 5140 * { // no fields of skb are ever used } 5141 */ 5142 if (strcmp(ctx_tname, tname)) 5143 return NULL; 5144 return ctx_type; 5145 } 5146 5147 static int btf_translate_to_vmlinux(struct bpf_verifier_log *log, 5148 struct btf *btf, 5149 const struct btf_type *t, 5150 enum bpf_prog_type prog_type, 5151 int arg) 5152 { 5153 const struct btf_member *prog_ctx_type, *kern_ctx_type; 5154 5155 prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type, arg); 5156 if (!prog_ctx_type) 5157 return -ENOENT; 5158 kern_ctx_type = prog_ctx_type + 1; 5159 return kern_ctx_type->type; 5160 } 5161 5162 BTF_ID_LIST(bpf_ctx_convert_btf_id) 5163 BTF_ID(struct, bpf_ctx_convert) 5164 5165 struct btf *btf_parse_vmlinux(void) 5166 { 5167 struct btf_verifier_env *env = NULL; 5168 struct bpf_verifier_log *log; 5169 struct btf *btf = NULL; 5170 int err; 5171 5172 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); 5173 if (!env) 5174 return ERR_PTR(-ENOMEM); 5175 5176 log = &env->log; 5177 log->level = BPF_LOG_KERNEL; 5178 5179 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); 5180 if (!btf) { 5181 err = -ENOMEM; 5182 goto errout; 5183 } 5184 env->btf = btf; 5185 5186 btf->data = __start_BTF; 5187 btf->data_size = __stop_BTF - __start_BTF; 5188 btf->kernel_btf = true; 5189 snprintf(btf->name, sizeof(btf->name), "vmlinux"); 5190 5191 err = btf_parse_hdr(env); 5192 if (err) 5193 goto errout; 5194 5195 btf->nohdr_data = btf->data + btf->hdr.hdr_len; 5196 5197 err = btf_parse_str_sec(env); 5198 if (err) 5199 goto errout; 5200 5201 err = btf_check_all_metas(env); 5202 if (err) 5203 goto errout; 5204 5205 err = btf_check_type_tags(env, btf, 1); 5206 if (err) 5207 goto errout; 5208 5209 /* btf_parse_vmlinux() runs under bpf_verifier_lock */ 5210 bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]); 5211 5212 bpf_struct_ops_init(btf, log); 5213 5214 refcount_set(&btf->refcnt, 1); 5215 5216 err = btf_alloc_id(btf); 5217 if (err) 5218 goto errout; 5219 5220 btf_verifier_env_free(env); 5221 return btf; 5222 5223 errout: 5224 btf_verifier_env_free(env); 5225 if (btf) { 5226 kvfree(btf->types); 5227 kfree(btf); 5228 } 5229 return ERR_PTR(err); 5230 } 5231 5232 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 5233 5234 static struct btf *btf_parse_module(const char *module_name, const void *data, unsigned int data_size) 5235 { 5236 struct btf_verifier_env *env = NULL; 5237 struct bpf_verifier_log *log; 5238 struct btf *btf = NULL, *base_btf; 5239 int err; 5240 5241 base_btf = bpf_get_btf_vmlinux(); 5242 if (IS_ERR(base_btf)) 5243 return base_btf; 5244 if (!base_btf) 5245 return ERR_PTR(-EINVAL); 5246 5247 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); 5248 if (!env) 5249 return ERR_PTR(-ENOMEM); 5250 5251 log = &env->log; 5252 log->level = BPF_LOG_KERNEL; 5253 5254 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); 5255 if (!btf) { 5256 err = -ENOMEM; 5257 goto errout; 5258 } 5259 env->btf = btf; 5260 5261 btf->base_btf = base_btf; 5262 btf->start_id = base_btf->nr_types; 5263 btf->start_str_off = base_btf->hdr.str_len; 5264 btf->kernel_btf = true; 5265 snprintf(btf->name, sizeof(btf->name), "%s", module_name); 5266 5267 btf->data = kvmalloc(data_size, GFP_KERNEL | __GFP_NOWARN); 5268 if (!btf->data) { 5269 err = -ENOMEM; 5270 goto errout; 5271 } 5272 memcpy(btf->data, data, data_size); 5273 btf->data_size = data_size; 5274 5275 err = btf_parse_hdr(env); 5276 if (err) 5277 goto errout; 5278 5279 btf->nohdr_data = btf->data + btf->hdr.hdr_len; 5280 5281 err = btf_parse_str_sec(env); 5282 if (err) 5283 goto errout; 5284 5285 err = btf_check_all_metas(env); 5286 if (err) 5287 goto errout; 5288 5289 err = btf_check_type_tags(env, btf, btf_nr_types(base_btf)); 5290 if (err) 5291 goto errout; 5292 5293 btf_verifier_env_free(env); 5294 refcount_set(&btf->refcnt, 1); 5295 return btf; 5296 5297 errout: 5298 btf_verifier_env_free(env); 5299 if (btf) { 5300 kvfree(btf->data); 5301 kvfree(btf->types); 5302 kfree(btf); 5303 } 5304 return ERR_PTR(err); 5305 } 5306 5307 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */ 5308 5309 struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog) 5310 { 5311 struct bpf_prog *tgt_prog = prog->aux->dst_prog; 5312 5313 if (tgt_prog) 5314 return tgt_prog->aux->btf; 5315 else 5316 return prog->aux->attach_btf; 5317 } 5318 5319 static bool is_int_ptr(struct btf *btf, const struct btf_type *t) 5320 { 5321 /* t comes in already as a pointer */ 5322 t = btf_type_by_id(btf, t->type); 5323 5324 /* allow const */ 5325 if (BTF_INFO_KIND(t->info) == BTF_KIND_CONST) 5326 t = btf_type_by_id(btf, t->type); 5327 5328 return btf_type_is_int(t); 5329 } 5330 5331 static u32 get_ctx_arg_idx(struct btf *btf, const struct btf_type *func_proto, 5332 int off) 5333 { 5334 const struct btf_param *args; 5335 const struct btf_type *t; 5336 u32 offset = 0, nr_args; 5337 int i; 5338 5339 if (!func_proto) 5340 return off / 8; 5341 5342 nr_args = btf_type_vlen(func_proto); 5343 args = (const struct btf_param *)(func_proto + 1); 5344 for (i = 0; i < nr_args; i++) { 5345 t = btf_type_skip_modifiers(btf, args[i].type, NULL); 5346 offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8); 5347 if (off < offset) 5348 return i; 5349 } 5350 5351 t = btf_type_skip_modifiers(btf, func_proto->type, NULL); 5352 offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8); 5353 if (off < offset) 5354 return nr_args; 5355 5356 return nr_args + 1; 5357 } 5358 5359 bool btf_ctx_access(int off, int size, enum bpf_access_type type, 5360 const struct bpf_prog *prog, 5361 struct bpf_insn_access_aux *info) 5362 { 5363 const struct btf_type *t = prog->aux->attach_func_proto; 5364 struct bpf_prog *tgt_prog = prog->aux->dst_prog; 5365 struct btf *btf = bpf_prog_get_target_btf(prog); 5366 const char *tname = prog->aux->attach_func_name; 5367 struct bpf_verifier_log *log = info->log; 5368 const struct btf_param *args; 5369 const char *tag_value; 5370 u32 nr_args, arg; 5371 int i, ret; 5372 5373 if (off % 8) { 5374 bpf_log(log, "func '%s' offset %d is not multiple of 8\n", 5375 tname, off); 5376 return false; 5377 } 5378 arg = get_ctx_arg_idx(btf, t, off); 5379 args = (const struct btf_param *)(t + 1); 5380 /* if (t == NULL) Fall back to default BPF prog with 5381 * MAX_BPF_FUNC_REG_ARGS u64 arguments. 5382 */ 5383 nr_args = t ? btf_type_vlen(t) : MAX_BPF_FUNC_REG_ARGS; 5384 if (prog->aux->attach_btf_trace) { 5385 /* skip first 'void *__data' argument in btf_trace_##name typedef */ 5386 args++; 5387 nr_args--; 5388 } 5389 5390 if (arg > nr_args) { 5391 bpf_log(log, "func '%s' doesn't have %d-th argument\n", 5392 tname, arg + 1); 5393 return false; 5394 } 5395 5396 if (arg == nr_args) { 5397 switch (prog->expected_attach_type) { 5398 case BPF_LSM_CGROUP: 5399 case BPF_LSM_MAC: 5400 case BPF_TRACE_FEXIT: 5401 /* When LSM programs are attached to void LSM hooks 5402 * they use FEXIT trampolines and when attached to 5403 * int LSM hooks, they use MODIFY_RETURN trampolines. 5404 * 5405 * While the LSM programs are BPF_MODIFY_RETURN-like 5406 * the check: 5407 * 5408 * if (ret_type != 'int') 5409 * return -EINVAL; 5410 * 5411 * is _not_ done here. This is still safe as LSM hooks 5412 * have only void and int return types. 5413 */ 5414 if (!t) 5415 return true; 5416 t = btf_type_by_id(btf, t->type); 5417 break; 5418 case BPF_MODIFY_RETURN: 5419 /* For now the BPF_MODIFY_RETURN can only be attached to 5420 * functions that return an int. 5421 */ 5422 if (!t) 5423 return false; 5424 5425 t = btf_type_skip_modifiers(btf, t->type, NULL); 5426 if (!btf_type_is_small_int(t)) { 5427 bpf_log(log, 5428 "ret type %s not allowed for fmod_ret\n", 5429 btf_type_str(t)); 5430 return false; 5431 } 5432 break; 5433 default: 5434 bpf_log(log, "func '%s' doesn't have %d-th argument\n", 5435 tname, arg + 1); 5436 return false; 5437 } 5438 } else { 5439 if (!t) 5440 /* Default prog with MAX_BPF_FUNC_REG_ARGS args */ 5441 return true; 5442 t = btf_type_by_id(btf, args[arg].type); 5443 } 5444 5445 /* skip modifiers */ 5446 while (btf_type_is_modifier(t)) 5447 t = btf_type_by_id(btf, t->type); 5448 if (btf_type_is_small_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t)) 5449 /* accessing a scalar */ 5450 return true; 5451 if (!btf_type_is_ptr(t)) { 5452 bpf_log(log, 5453 "func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n", 5454 tname, arg, 5455 __btf_name_by_offset(btf, t->name_off), 5456 btf_type_str(t)); 5457 return false; 5458 } 5459 5460 /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */ 5461 for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { 5462 const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; 5463 u32 type, flag; 5464 5465 type = base_type(ctx_arg_info->reg_type); 5466 flag = type_flag(ctx_arg_info->reg_type); 5467 if (ctx_arg_info->offset == off && type == PTR_TO_BUF && 5468 (flag & PTR_MAYBE_NULL)) { 5469 info->reg_type = ctx_arg_info->reg_type; 5470 return true; 5471 } 5472 } 5473 5474 if (t->type == 0) 5475 /* This is a pointer to void. 5476 * It is the same as scalar from the verifier safety pov. 5477 * No further pointer walking is allowed. 5478 */ 5479 return true; 5480 5481 if (is_int_ptr(btf, t)) 5482 return true; 5483 5484 /* this is a pointer to another type */ 5485 for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { 5486 const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; 5487 5488 if (ctx_arg_info->offset == off) { 5489 if (!ctx_arg_info->btf_id) { 5490 bpf_log(log,"invalid btf_id for context argument offset %u\n", off); 5491 return false; 5492 } 5493 5494 info->reg_type = ctx_arg_info->reg_type; 5495 info->btf = btf_vmlinux; 5496 info->btf_id = ctx_arg_info->btf_id; 5497 return true; 5498 } 5499 } 5500 5501 info->reg_type = PTR_TO_BTF_ID; 5502 if (tgt_prog) { 5503 enum bpf_prog_type tgt_type; 5504 5505 if (tgt_prog->type == BPF_PROG_TYPE_EXT) 5506 tgt_type = tgt_prog->aux->saved_dst_prog_type; 5507 else 5508 tgt_type = tgt_prog->type; 5509 5510 ret = btf_translate_to_vmlinux(log, btf, t, tgt_type, arg); 5511 if (ret > 0) { 5512 info->btf = btf_vmlinux; 5513 info->btf_id = ret; 5514 return true; 5515 } else { 5516 return false; 5517 } 5518 } 5519 5520 info->btf = btf; 5521 info->btf_id = t->type; 5522 t = btf_type_by_id(btf, t->type); 5523 5524 if (btf_type_is_type_tag(t)) { 5525 tag_value = __btf_name_by_offset(btf, t->name_off); 5526 if (strcmp(tag_value, "user") == 0) 5527 info->reg_type |= MEM_USER; 5528 if (strcmp(tag_value, "percpu") == 0) 5529 info->reg_type |= MEM_PERCPU; 5530 } 5531 5532 /* skip modifiers */ 5533 while (btf_type_is_modifier(t)) { 5534 info->btf_id = t->type; 5535 t = btf_type_by_id(btf, t->type); 5536 } 5537 if (!btf_type_is_struct(t)) { 5538 bpf_log(log, 5539 "func '%s' arg%d type %s is not a struct\n", 5540 tname, arg, btf_type_str(t)); 5541 return false; 5542 } 5543 bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n", 5544 tname, arg, info->btf_id, btf_type_str(t), 5545 __btf_name_by_offset(btf, t->name_off)); 5546 return true; 5547 } 5548 5549 enum bpf_struct_walk_result { 5550 /* < 0 error */ 5551 WALK_SCALAR = 0, 5552 WALK_PTR, 5553 WALK_STRUCT, 5554 }; 5555 5556 static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf, 5557 const struct btf_type *t, int off, int size, 5558 u32 *next_btf_id, enum bpf_type_flag *flag) 5559 { 5560 u32 i, moff, mtrue_end, msize = 0, total_nelems = 0; 5561 const struct btf_type *mtype, *elem_type = NULL; 5562 const struct btf_member *member; 5563 const char *tname, *mname, *tag_value; 5564 u32 vlen, elem_id, mid; 5565 5566 again: 5567 tname = __btf_name_by_offset(btf, t->name_off); 5568 if (!btf_type_is_struct(t)) { 5569 bpf_log(log, "Type '%s' is not a struct\n", tname); 5570 return -EINVAL; 5571 } 5572 5573 vlen = btf_type_vlen(t); 5574 if (off + size > t->size) { 5575 /* If the last element is a variable size array, we may 5576 * need to relax the rule. 5577 */ 5578 struct btf_array *array_elem; 5579 5580 if (vlen == 0) 5581 goto error; 5582 5583 member = btf_type_member(t) + vlen - 1; 5584 mtype = btf_type_skip_modifiers(btf, member->type, 5585 NULL); 5586 if (!btf_type_is_array(mtype)) 5587 goto error; 5588 5589 array_elem = (struct btf_array *)(mtype + 1); 5590 if (array_elem->nelems != 0) 5591 goto error; 5592 5593 moff = __btf_member_bit_offset(t, member) / 8; 5594 if (off < moff) 5595 goto error; 5596 5597 /* Only allow structure for now, can be relaxed for 5598 * other types later. 5599 */ 5600 t = btf_type_skip_modifiers(btf, array_elem->type, 5601 NULL); 5602 if (!btf_type_is_struct(t)) 5603 goto error; 5604 5605 off = (off - moff) % t->size; 5606 goto again; 5607 5608 error: 5609 bpf_log(log, "access beyond struct %s at off %u size %u\n", 5610 tname, off, size); 5611 return -EACCES; 5612 } 5613 5614 for_each_member(i, t, member) { 5615 /* offset of the field in bytes */ 5616 moff = __btf_member_bit_offset(t, member) / 8; 5617 if (off + size <= moff) 5618 /* won't find anything, field is already too far */ 5619 break; 5620 5621 if (__btf_member_bitfield_size(t, member)) { 5622 u32 end_bit = __btf_member_bit_offset(t, member) + 5623 __btf_member_bitfield_size(t, member); 5624 5625 /* off <= moff instead of off == moff because clang 5626 * does not generate a BTF member for anonymous 5627 * bitfield like the ":16" here: 5628 * struct { 5629 * int :16; 5630 * int x:8; 5631 * }; 5632 */ 5633 if (off <= moff && 5634 BITS_ROUNDUP_BYTES(end_bit) <= off + size) 5635 return WALK_SCALAR; 5636 5637 /* off may be accessing a following member 5638 * 5639 * or 5640 * 5641 * Doing partial access at either end of this 5642 * bitfield. Continue on this case also to 5643 * treat it as not accessing this bitfield 5644 * and eventually error out as field not 5645 * found to keep it simple. 5646 * It could be relaxed if there was a legit 5647 * partial access case later. 5648 */ 5649 continue; 5650 } 5651 5652 /* In case of "off" is pointing to holes of a struct */ 5653 if (off < moff) 5654 break; 5655 5656 /* type of the field */ 5657 mid = member->type; 5658 mtype = btf_type_by_id(btf, member->type); 5659 mname = __btf_name_by_offset(btf, member->name_off); 5660 5661 mtype = __btf_resolve_size(btf, mtype, &msize, 5662 &elem_type, &elem_id, &total_nelems, 5663 &mid); 5664 if (IS_ERR(mtype)) { 5665 bpf_log(log, "field %s doesn't have size\n", mname); 5666 return -EFAULT; 5667 } 5668 5669 mtrue_end = moff + msize; 5670 if (off >= mtrue_end) 5671 /* no overlap with member, keep iterating */ 5672 continue; 5673 5674 if (btf_type_is_array(mtype)) { 5675 u32 elem_idx; 5676 5677 /* __btf_resolve_size() above helps to 5678 * linearize a multi-dimensional array. 5679 * 5680 * The logic here is treating an array 5681 * in a struct as the following way: 5682 * 5683 * struct outer { 5684 * struct inner array[2][2]; 5685 * }; 5686 * 5687 * looks like: 5688 * 5689 * struct outer { 5690 * struct inner array_elem0; 5691 * struct inner array_elem1; 5692 * struct inner array_elem2; 5693 * struct inner array_elem3; 5694 * }; 5695 * 5696 * When accessing outer->array[1][0], it moves 5697 * moff to "array_elem2", set mtype to 5698 * "struct inner", and msize also becomes 5699 * sizeof(struct inner). Then most of the 5700 * remaining logic will fall through without 5701 * caring the current member is an array or 5702 * not. 5703 * 5704 * Unlike mtype/msize/moff, mtrue_end does not 5705 * change. The naming difference ("_true") tells 5706 * that it is not always corresponding to 5707 * the current mtype/msize/moff. 5708 * It is the true end of the current 5709 * member (i.e. array in this case). That 5710 * will allow an int array to be accessed like 5711 * a scratch space, 5712 * i.e. allow access beyond the size of 5713 * the array's element as long as it is 5714 * within the mtrue_end boundary. 5715 */ 5716 5717 /* skip empty array */ 5718 if (moff == mtrue_end) 5719 continue; 5720 5721 msize /= total_nelems; 5722 elem_idx = (off - moff) / msize; 5723 moff += elem_idx * msize; 5724 mtype = elem_type; 5725 mid = elem_id; 5726 } 5727 5728 /* the 'off' we're looking for is either equal to start 5729 * of this field or inside of this struct 5730 */ 5731 if (btf_type_is_struct(mtype)) { 5732 /* our field must be inside that union or struct */ 5733 t = mtype; 5734 5735 /* return if the offset matches the member offset */ 5736 if (off == moff) { 5737 *next_btf_id = mid; 5738 return WALK_STRUCT; 5739 } 5740 5741 /* adjust offset we're looking for */ 5742 off -= moff; 5743 goto again; 5744 } 5745 5746 if (btf_type_is_ptr(mtype)) { 5747 const struct btf_type *stype, *t; 5748 enum bpf_type_flag tmp_flag = 0; 5749 u32 id; 5750 5751 if (msize != size || off != moff) { 5752 bpf_log(log, 5753 "cannot access ptr member %s with moff %u in struct %s with off %u size %u\n", 5754 mname, moff, tname, off, size); 5755 return -EACCES; 5756 } 5757 5758 /* check type tag */ 5759 t = btf_type_by_id(btf, mtype->type); 5760 if (btf_type_is_type_tag(t)) { 5761 tag_value = __btf_name_by_offset(btf, t->name_off); 5762 /* check __user tag */ 5763 if (strcmp(tag_value, "user") == 0) 5764 tmp_flag = MEM_USER; 5765 /* check __percpu tag */ 5766 if (strcmp(tag_value, "percpu") == 0) 5767 tmp_flag = MEM_PERCPU; 5768 } 5769 5770 stype = btf_type_skip_modifiers(btf, mtype->type, &id); 5771 if (btf_type_is_struct(stype)) { 5772 *next_btf_id = id; 5773 *flag = tmp_flag; 5774 return WALK_PTR; 5775 } 5776 } 5777 5778 /* Allow more flexible access within an int as long as 5779 * it is within mtrue_end. 5780 * Since mtrue_end could be the end of an array, 5781 * that also allows using an array of int as a scratch 5782 * space. e.g. skb->cb[]. 5783 */ 5784 if (off + size > mtrue_end) { 5785 bpf_log(log, 5786 "access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n", 5787 mname, mtrue_end, tname, off, size); 5788 return -EACCES; 5789 } 5790 5791 return WALK_SCALAR; 5792 } 5793 bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off); 5794 return -EINVAL; 5795 } 5796 5797 int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, 5798 const struct btf_type *t, int off, int size, 5799 enum bpf_access_type atype __maybe_unused, 5800 u32 *next_btf_id, enum bpf_type_flag *flag) 5801 { 5802 enum bpf_type_flag tmp_flag = 0; 5803 int err; 5804 u32 id; 5805 5806 do { 5807 err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag); 5808 5809 switch (err) { 5810 case WALK_PTR: 5811 /* If we found the pointer or scalar on t+off, 5812 * we're done. 5813 */ 5814 *next_btf_id = id; 5815 *flag = tmp_flag; 5816 return PTR_TO_BTF_ID; 5817 case WALK_SCALAR: 5818 return SCALAR_VALUE; 5819 case WALK_STRUCT: 5820 /* We found nested struct, so continue the search 5821 * by diving in it. At this point the offset is 5822 * aligned with the new type, so set it to 0. 5823 */ 5824 t = btf_type_by_id(btf, id); 5825 off = 0; 5826 break; 5827 default: 5828 /* It's either error or unknown return value.. 5829 * scream and leave. 5830 */ 5831 if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value")) 5832 return -EINVAL; 5833 return err; 5834 } 5835 } while (t); 5836 5837 return -EINVAL; 5838 } 5839 5840 /* Check that two BTF types, each specified as an BTF object + id, are exactly 5841 * the same. Trivial ID check is not enough due to module BTFs, because we can 5842 * end up with two different module BTFs, but IDs point to the common type in 5843 * vmlinux BTF. 5844 */ 5845 static bool btf_types_are_same(const struct btf *btf1, u32 id1, 5846 const struct btf *btf2, u32 id2) 5847 { 5848 if (id1 != id2) 5849 return false; 5850 if (btf1 == btf2) 5851 return true; 5852 return btf_type_by_id(btf1, id1) == btf_type_by_id(btf2, id2); 5853 } 5854 5855 bool btf_struct_ids_match(struct bpf_verifier_log *log, 5856 const struct btf *btf, u32 id, int off, 5857 const struct btf *need_btf, u32 need_type_id, 5858 bool strict) 5859 { 5860 const struct btf_type *type; 5861 enum bpf_type_flag flag; 5862 int err; 5863 5864 /* Are we already done? */ 5865 if (off == 0 && btf_types_are_same(btf, id, need_btf, need_type_id)) 5866 return true; 5867 /* In case of strict type match, we do not walk struct, the top level 5868 * type match must succeed. When strict is true, off should have already 5869 * been 0. 5870 */ 5871 if (strict) 5872 return false; 5873 again: 5874 type = btf_type_by_id(btf, id); 5875 if (!type) 5876 return false; 5877 err = btf_struct_walk(log, btf, type, off, 1, &id, &flag); 5878 if (err != WALK_STRUCT) 5879 return false; 5880 5881 /* We found nested struct object. If it matches 5882 * the requested ID, we're done. Otherwise let's 5883 * continue the search with offset 0 in the new 5884 * type. 5885 */ 5886 if (!btf_types_are_same(btf, id, need_btf, need_type_id)) { 5887 off = 0; 5888 goto again; 5889 } 5890 5891 return true; 5892 } 5893 5894 static int __get_type_size(struct btf *btf, u32 btf_id, 5895 const struct btf_type **ret_type) 5896 { 5897 const struct btf_type *t; 5898 5899 *ret_type = btf_type_by_id(btf, 0); 5900 if (!btf_id) 5901 /* void */ 5902 return 0; 5903 t = btf_type_by_id(btf, btf_id); 5904 while (t && btf_type_is_modifier(t)) 5905 t = btf_type_by_id(btf, t->type); 5906 if (!t) 5907 return -EINVAL; 5908 *ret_type = t; 5909 if (btf_type_is_ptr(t)) 5910 /* kernel size of pointer. Not BPF's size of pointer*/ 5911 return sizeof(void *); 5912 if (btf_type_is_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t)) 5913 return t->size; 5914 return -EINVAL; 5915 } 5916 5917 int btf_distill_func_proto(struct bpf_verifier_log *log, 5918 struct btf *btf, 5919 const struct btf_type *func, 5920 const char *tname, 5921 struct btf_func_model *m) 5922 { 5923 const struct btf_param *args; 5924 const struct btf_type *t; 5925 u32 i, nargs; 5926 int ret; 5927 5928 if (!func) { 5929 /* BTF function prototype doesn't match the verifier types. 5930 * Fall back to MAX_BPF_FUNC_REG_ARGS u64 args. 5931 */ 5932 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) { 5933 m->arg_size[i] = 8; 5934 m->arg_flags[i] = 0; 5935 } 5936 m->ret_size = 8; 5937 m->nr_args = MAX_BPF_FUNC_REG_ARGS; 5938 return 0; 5939 } 5940 args = (const struct btf_param *)(func + 1); 5941 nargs = btf_type_vlen(func); 5942 if (nargs > MAX_BPF_FUNC_ARGS) { 5943 bpf_log(log, 5944 "The function %s has %d arguments. Too many.\n", 5945 tname, nargs); 5946 return -EINVAL; 5947 } 5948 ret = __get_type_size(btf, func->type, &t); 5949 if (ret < 0 || __btf_type_is_struct(t)) { 5950 bpf_log(log, 5951 "The function %s return type %s is unsupported.\n", 5952 tname, btf_type_str(t)); 5953 return -EINVAL; 5954 } 5955 m->ret_size = ret; 5956 5957 for (i = 0; i < nargs; i++) { 5958 if (i == nargs - 1 && args[i].type == 0) { 5959 bpf_log(log, 5960 "The function %s with variable args is unsupported.\n", 5961 tname); 5962 return -EINVAL; 5963 } 5964 ret = __get_type_size(btf, args[i].type, &t); 5965 5966 /* No support of struct argument size greater than 16 bytes */ 5967 if (ret < 0 || ret > 16) { 5968 bpf_log(log, 5969 "The function %s arg%d type %s is unsupported.\n", 5970 tname, i, btf_type_str(t)); 5971 return -EINVAL; 5972 } 5973 if (ret == 0) { 5974 bpf_log(log, 5975 "The function %s has malformed void argument.\n", 5976 tname); 5977 return -EINVAL; 5978 } 5979 m->arg_size[i] = ret; 5980 m->arg_flags[i] = __btf_type_is_struct(t) ? BTF_FMODEL_STRUCT_ARG : 0; 5981 } 5982 m->nr_args = nargs; 5983 return 0; 5984 } 5985 5986 /* Compare BTFs of two functions assuming only scalars and pointers to context. 5987 * t1 points to BTF_KIND_FUNC in btf1 5988 * t2 points to BTF_KIND_FUNC in btf2 5989 * Returns: 5990 * EINVAL - function prototype mismatch 5991 * EFAULT - verifier bug 5992 * 0 - 99% match. The last 1% is validated by the verifier. 5993 */ 5994 static int btf_check_func_type_match(struct bpf_verifier_log *log, 5995 struct btf *btf1, const struct btf_type *t1, 5996 struct btf *btf2, const struct btf_type *t2) 5997 { 5998 const struct btf_param *args1, *args2; 5999 const char *fn1, *fn2, *s1, *s2; 6000 u32 nargs1, nargs2, i; 6001 6002 fn1 = btf_name_by_offset(btf1, t1->name_off); 6003 fn2 = btf_name_by_offset(btf2, t2->name_off); 6004 6005 if (btf_func_linkage(t1) != BTF_FUNC_GLOBAL) { 6006 bpf_log(log, "%s() is not a global function\n", fn1); 6007 return -EINVAL; 6008 } 6009 if (btf_func_linkage(t2) != BTF_FUNC_GLOBAL) { 6010 bpf_log(log, "%s() is not a global function\n", fn2); 6011 return -EINVAL; 6012 } 6013 6014 t1 = btf_type_by_id(btf1, t1->type); 6015 if (!t1 || !btf_type_is_func_proto(t1)) 6016 return -EFAULT; 6017 t2 = btf_type_by_id(btf2, t2->type); 6018 if (!t2 || !btf_type_is_func_proto(t2)) 6019 return -EFAULT; 6020 6021 args1 = (const struct btf_param *)(t1 + 1); 6022 nargs1 = btf_type_vlen(t1); 6023 args2 = (const struct btf_param *)(t2 + 1); 6024 nargs2 = btf_type_vlen(t2); 6025 6026 if (nargs1 != nargs2) { 6027 bpf_log(log, "%s() has %d args while %s() has %d args\n", 6028 fn1, nargs1, fn2, nargs2); 6029 return -EINVAL; 6030 } 6031 6032 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL); 6033 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL); 6034 if (t1->info != t2->info) { 6035 bpf_log(log, 6036 "Return type %s of %s() doesn't match type %s of %s()\n", 6037 btf_type_str(t1), fn1, 6038 btf_type_str(t2), fn2); 6039 return -EINVAL; 6040 } 6041 6042 for (i = 0; i < nargs1; i++) { 6043 t1 = btf_type_skip_modifiers(btf1, args1[i].type, NULL); 6044 t2 = btf_type_skip_modifiers(btf2, args2[i].type, NULL); 6045 6046 if (t1->info != t2->info) { 6047 bpf_log(log, "arg%d in %s() is %s while %s() has %s\n", 6048 i, fn1, btf_type_str(t1), 6049 fn2, btf_type_str(t2)); 6050 return -EINVAL; 6051 } 6052 if (btf_type_has_size(t1) && t1->size != t2->size) { 6053 bpf_log(log, 6054 "arg%d in %s() has size %d while %s() has %d\n", 6055 i, fn1, t1->size, 6056 fn2, t2->size); 6057 return -EINVAL; 6058 } 6059 6060 /* global functions are validated with scalars and pointers 6061 * to context only. And only global functions can be replaced. 6062 * Hence type check only those types. 6063 */ 6064 if (btf_type_is_int(t1) || btf_is_any_enum(t1)) 6065 continue; 6066 if (!btf_type_is_ptr(t1)) { 6067 bpf_log(log, 6068 "arg%d in %s() has unrecognized type\n", 6069 i, fn1); 6070 return -EINVAL; 6071 } 6072 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL); 6073 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL); 6074 if (!btf_type_is_struct(t1)) { 6075 bpf_log(log, 6076 "arg%d in %s() is not a pointer to context\n", 6077 i, fn1); 6078 return -EINVAL; 6079 } 6080 if (!btf_type_is_struct(t2)) { 6081 bpf_log(log, 6082 "arg%d in %s() is not a pointer to context\n", 6083 i, fn2); 6084 return -EINVAL; 6085 } 6086 /* This is an optional check to make program writing easier. 6087 * Compare names of structs and report an error to the user. 6088 * btf_prepare_func_args() already checked that t2 struct 6089 * is a context type. btf_prepare_func_args() will check 6090 * later that t1 struct is a context type as well. 6091 */ 6092 s1 = btf_name_by_offset(btf1, t1->name_off); 6093 s2 = btf_name_by_offset(btf2, t2->name_off); 6094 if (strcmp(s1, s2)) { 6095 bpf_log(log, 6096 "arg%d %s(struct %s *) doesn't match %s(struct %s *)\n", 6097 i, fn1, s1, fn2, s2); 6098 return -EINVAL; 6099 } 6100 } 6101 return 0; 6102 } 6103 6104 /* Compare BTFs of given program with BTF of target program */ 6105 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, 6106 struct btf *btf2, const struct btf_type *t2) 6107 { 6108 struct btf *btf1 = prog->aux->btf; 6109 const struct btf_type *t1; 6110 u32 btf_id = 0; 6111 6112 if (!prog->aux->func_info) { 6113 bpf_log(log, "Program extension requires BTF\n"); 6114 return -EINVAL; 6115 } 6116 6117 btf_id = prog->aux->func_info[0].type_id; 6118 if (!btf_id) 6119 return -EFAULT; 6120 6121 t1 = btf_type_by_id(btf1, btf_id); 6122 if (!t1 || !btf_type_is_func(t1)) 6123 return -EFAULT; 6124 6125 return btf_check_func_type_match(log, btf1, t1, btf2, t2); 6126 } 6127 6128 static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = { 6129 #ifdef CONFIG_NET 6130 [PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK], 6131 [PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], 6132 [PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP], 6133 #endif 6134 }; 6135 6136 /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */ 6137 static bool __btf_type_is_scalar_struct(struct bpf_verifier_log *log, 6138 const struct btf *btf, 6139 const struct btf_type *t, int rec) 6140 { 6141 const struct btf_type *member_type; 6142 const struct btf_member *member; 6143 u32 i; 6144 6145 if (!btf_type_is_struct(t)) 6146 return false; 6147 6148 for_each_member(i, t, member) { 6149 const struct btf_array *array; 6150 6151 member_type = btf_type_skip_modifiers(btf, member->type, NULL); 6152 if (btf_type_is_struct(member_type)) { 6153 if (rec >= 3) { 6154 bpf_log(log, "max struct nesting depth exceeded\n"); 6155 return false; 6156 } 6157 if (!__btf_type_is_scalar_struct(log, btf, member_type, rec + 1)) 6158 return false; 6159 continue; 6160 } 6161 if (btf_type_is_array(member_type)) { 6162 array = btf_type_array(member_type); 6163 if (!array->nelems) 6164 return false; 6165 member_type = btf_type_skip_modifiers(btf, array->type, NULL); 6166 if (!btf_type_is_scalar(member_type)) 6167 return false; 6168 continue; 6169 } 6170 if (!btf_type_is_scalar(member_type)) 6171 return false; 6172 } 6173 return true; 6174 } 6175 6176 static bool is_kfunc_arg_mem_size(const struct btf *btf, 6177 const struct btf_param *arg, 6178 const struct bpf_reg_state *reg) 6179 { 6180 int len, sfx_len = sizeof("__sz") - 1; 6181 const struct btf_type *t; 6182 const char *param_name; 6183 6184 t = btf_type_skip_modifiers(btf, arg->type, NULL); 6185 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) 6186 return false; 6187 6188 /* In the future, this can be ported to use BTF tagging */ 6189 param_name = btf_name_by_offset(btf, arg->name_off); 6190 if (str_is_empty(param_name)) 6191 return false; 6192 len = strlen(param_name); 6193 if (len < sfx_len) 6194 return false; 6195 param_name += len - sfx_len; 6196 if (strncmp(param_name, "__sz", sfx_len)) 6197 return false; 6198 6199 return true; 6200 } 6201 6202 static bool btf_is_kfunc_arg_mem_size(const struct btf *btf, 6203 const struct btf_param *arg, 6204 const struct bpf_reg_state *reg, 6205 const char *name) 6206 { 6207 int len, target_len = strlen(name); 6208 const struct btf_type *t; 6209 const char *param_name; 6210 6211 t = btf_type_skip_modifiers(btf, arg->type, NULL); 6212 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) 6213 return false; 6214 6215 param_name = btf_name_by_offset(btf, arg->name_off); 6216 if (str_is_empty(param_name)) 6217 return false; 6218 len = strlen(param_name); 6219 if (len != target_len) 6220 return false; 6221 if (strcmp(param_name, name)) 6222 return false; 6223 6224 return true; 6225 } 6226 6227 static int btf_check_func_arg_match(struct bpf_verifier_env *env, 6228 const struct btf *btf, u32 func_id, 6229 struct bpf_reg_state *regs, 6230 bool ptr_to_mem_ok, 6231 struct bpf_kfunc_arg_meta *kfunc_meta, 6232 bool processing_call) 6233 { 6234 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 6235 bool rel = false, kptr_get = false, trusted_args = false; 6236 bool sleepable = false; 6237 struct bpf_verifier_log *log = &env->log; 6238 u32 i, nargs, ref_id, ref_obj_id = 0; 6239 bool is_kfunc = btf_is_kernel(btf); 6240 const char *func_name, *ref_tname; 6241 const struct btf_type *t, *ref_t; 6242 const struct btf_param *args; 6243 int ref_regno = 0, ret; 6244 6245 t = btf_type_by_id(btf, func_id); 6246 if (!t || !btf_type_is_func(t)) { 6247 /* These checks were already done by the verifier while loading 6248 * struct bpf_func_info or in add_kfunc_call(). 6249 */ 6250 bpf_log(log, "BTF of func_id %u doesn't point to KIND_FUNC\n", 6251 func_id); 6252 return -EFAULT; 6253 } 6254 func_name = btf_name_by_offset(btf, t->name_off); 6255 6256 t = btf_type_by_id(btf, t->type); 6257 if (!t || !btf_type_is_func_proto(t)) { 6258 bpf_log(log, "Invalid BTF of func %s\n", func_name); 6259 return -EFAULT; 6260 } 6261 args = (const struct btf_param *)(t + 1); 6262 nargs = btf_type_vlen(t); 6263 if (nargs > MAX_BPF_FUNC_REG_ARGS) { 6264 bpf_log(log, "Function %s has %d > %d args\n", func_name, nargs, 6265 MAX_BPF_FUNC_REG_ARGS); 6266 return -EINVAL; 6267 } 6268 6269 if (is_kfunc && kfunc_meta) { 6270 /* Only kfunc can be release func */ 6271 rel = kfunc_meta->flags & KF_RELEASE; 6272 kptr_get = kfunc_meta->flags & KF_KPTR_GET; 6273 trusted_args = kfunc_meta->flags & KF_TRUSTED_ARGS; 6274 sleepable = kfunc_meta->flags & KF_SLEEPABLE; 6275 } 6276 6277 /* check that BTF function arguments match actual types that the 6278 * verifier sees. 6279 */ 6280 for (i = 0; i < nargs; i++) { 6281 enum bpf_arg_type arg_type = ARG_DONTCARE; 6282 u32 regno = i + 1; 6283 struct bpf_reg_state *reg = ®s[regno]; 6284 bool obj_ptr = false; 6285 6286 t = btf_type_skip_modifiers(btf, args[i].type, NULL); 6287 if (btf_type_is_scalar(t)) { 6288 if (is_kfunc && kfunc_meta) { 6289 bool is_buf_size = false; 6290 6291 /* check for any const scalar parameter of name "rdonly_buf_size" 6292 * or "rdwr_buf_size" 6293 */ 6294 if (btf_is_kfunc_arg_mem_size(btf, &args[i], reg, 6295 "rdonly_buf_size")) { 6296 kfunc_meta->r0_rdonly = true; 6297 is_buf_size = true; 6298 } else if (btf_is_kfunc_arg_mem_size(btf, &args[i], reg, 6299 "rdwr_buf_size")) 6300 is_buf_size = true; 6301 6302 if (is_buf_size) { 6303 if (kfunc_meta->r0_size) { 6304 bpf_log(log, "2 or more rdonly/rdwr_buf_size parameters for kfunc"); 6305 return -EINVAL; 6306 } 6307 6308 if (!tnum_is_const(reg->var_off)) { 6309 bpf_log(log, "R%d is not a const\n", regno); 6310 return -EINVAL; 6311 } 6312 6313 kfunc_meta->r0_size = reg->var_off.value; 6314 ret = mark_chain_precision(env, regno); 6315 if (ret) 6316 return ret; 6317 } 6318 } 6319 6320 if (reg->type == SCALAR_VALUE) 6321 continue; 6322 bpf_log(log, "R%d is not a scalar\n", regno); 6323 return -EINVAL; 6324 } 6325 6326 if (!btf_type_is_ptr(t)) { 6327 bpf_log(log, "Unrecognized arg#%d type %s\n", 6328 i, btf_type_str(t)); 6329 return -EINVAL; 6330 } 6331 6332 /* These register types have special constraints wrt ref_obj_id 6333 * and offset checks. The rest of trusted args don't. 6334 */ 6335 obj_ptr = reg->type == PTR_TO_CTX || reg->type == PTR_TO_BTF_ID || 6336 reg2btf_ids[base_type(reg->type)]; 6337 6338 /* Check if argument must be a referenced pointer, args + i has 6339 * been verified to be a pointer (after skipping modifiers). 6340 * PTR_TO_CTX is ok without having non-zero ref_obj_id. 6341 */ 6342 if (is_kfunc && trusted_args && (obj_ptr && reg->type != PTR_TO_CTX) && !reg->ref_obj_id) { 6343 bpf_log(log, "R%d must be referenced\n", regno); 6344 return -EINVAL; 6345 } 6346 6347 ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id); 6348 ref_tname = btf_name_by_offset(btf, ref_t->name_off); 6349 6350 /* Trusted args have the same offset checks as release arguments */ 6351 if ((trusted_args && obj_ptr) || (rel && reg->ref_obj_id)) 6352 arg_type |= OBJ_RELEASE; 6353 ret = check_func_arg_reg_off(env, reg, regno, arg_type); 6354 if (ret < 0) 6355 return ret; 6356 6357 if (is_kfunc && reg->ref_obj_id) { 6358 /* Ensure only one argument is referenced PTR_TO_BTF_ID */ 6359 if (ref_obj_id) { 6360 bpf_log(log, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 6361 regno, reg->ref_obj_id, ref_obj_id); 6362 return -EFAULT; 6363 } 6364 ref_regno = regno; 6365 ref_obj_id = reg->ref_obj_id; 6366 } 6367 6368 /* kptr_get is only true for kfunc */ 6369 if (i == 0 && kptr_get) { 6370 struct bpf_map_value_off_desc *off_desc; 6371 6372 if (reg->type != PTR_TO_MAP_VALUE) { 6373 bpf_log(log, "arg#0 expected pointer to map value\n"); 6374 return -EINVAL; 6375 } 6376 6377 /* check_func_arg_reg_off allows var_off for 6378 * PTR_TO_MAP_VALUE, but we need fixed offset to find 6379 * off_desc. 6380 */ 6381 if (!tnum_is_const(reg->var_off)) { 6382 bpf_log(log, "arg#0 must have constant offset\n"); 6383 return -EINVAL; 6384 } 6385 6386 off_desc = bpf_map_kptr_off_contains(reg->map_ptr, reg->off + reg->var_off.value); 6387 if (!off_desc || off_desc->type != BPF_KPTR_REF) { 6388 bpf_log(log, "arg#0 no referenced kptr at map value offset=%llu\n", 6389 reg->off + reg->var_off.value); 6390 return -EINVAL; 6391 } 6392 6393 if (!btf_type_is_ptr(ref_t)) { 6394 bpf_log(log, "arg#0 BTF type must be a double pointer\n"); 6395 return -EINVAL; 6396 } 6397 6398 ref_t = btf_type_skip_modifiers(btf, ref_t->type, &ref_id); 6399 ref_tname = btf_name_by_offset(btf, ref_t->name_off); 6400 6401 if (!btf_type_is_struct(ref_t)) { 6402 bpf_log(log, "kernel function %s args#%d pointer type %s %s is not supported\n", 6403 func_name, i, btf_type_str(ref_t), ref_tname); 6404 return -EINVAL; 6405 } 6406 if (!btf_struct_ids_match(log, btf, ref_id, 0, off_desc->kptr.btf, 6407 off_desc->kptr.btf_id, true)) { 6408 bpf_log(log, "kernel function %s args#%d expected pointer to %s %s\n", 6409 func_name, i, btf_type_str(ref_t), ref_tname); 6410 return -EINVAL; 6411 } 6412 /* rest of the arguments can be anything, like normal kfunc */ 6413 } else if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) { 6414 /* If function expects ctx type in BTF check that caller 6415 * is passing PTR_TO_CTX. 6416 */ 6417 if (reg->type != PTR_TO_CTX) { 6418 bpf_log(log, 6419 "arg#%d expected pointer to ctx, but got %s\n", 6420 i, btf_type_str(t)); 6421 return -EINVAL; 6422 } 6423 } else if (is_kfunc && (reg->type == PTR_TO_BTF_ID || 6424 (reg2btf_ids[base_type(reg->type)] && !type_flag(reg->type)))) { 6425 const struct btf_type *reg_ref_t; 6426 const struct btf *reg_btf; 6427 const char *reg_ref_tname; 6428 u32 reg_ref_id; 6429 6430 if (!btf_type_is_struct(ref_t)) { 6431 bpf_log(log, "kernel function %s args#%d pointer type %s %s is not supported\n", 6432 func_name, i, btf_type_str(ref_t), 6433 ref_tname); 6434 return -EINVAL; 6435 } 6436 6437 if (reg->type == PTR_TO_BTF_ID) { 6438 reg_btf = reg->btf; 6439 reg_ref_id = reg->btf_id; 6440 } else { 6441 reg_btf = btf_vmlinux; 6442 reg_ref_id = *reg2btf_ids[base_type(reg->type)]; 6443 } 6444 6445 reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, 6446 ®_ref_id); 6447 reg_ref_tname = btf_name_by_offset(reg_btf, 6448 reg_ref_t->name_off); 6449 if (!btf_struct_ids_match(log, reg_btf, reg_ref_id, 6450 reg->off, btf, ref_id, 6451 trusted_args || (rel && reg->ref_obj_id))) { 6452 bpf_log(log, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n", 6453 func_name, i, 6454 btf_type_str(ref_t), ref_tname, 6455 regno, btf_type_str(reg_ref_t), 6456 reg_ref_tname); 6457 return -EINVAL; 6458 } 6459 } else if (ptr_to_mem_ok && processing_call) { 6460 const struct btf_type *resolve_ret; 6461 u32 type_size; 6462 6463 if (is_kfunc) { 6464 bool arg_mem_size = i + 1 < nargs && is_kfunc_arg_mem_size(btf, &args[i + 1], ®s[regno + 1]); 6465 bool arg_dynptr = btf_type_is_struct(ref_t) && 6466 !strcmp(ref_tname, 6467 stringify_struct(bpf_dynptr_kern)); 6468 6469 /* Permit pointer to mem, but only when argument 6470 * type is pointer to scalar, or struct composed 6471 * (recursively) of scalars. 6472 * When arg_mem_size is true, the pointer can be 6473 * void *. 6474 * Also permit initialized local dynamic pointers. 6475 */ 6476 if (!btf_type_is_scalar(ref_t) && 6477 !__btf_type_is_scalar_struct(log, btf, ref_t, 0) && 6478 !arg_dynptr && 6479 (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) { 6480 bpf_log(log, 6481 "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n", 6482 i, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : ""); 6483 return -EINVAL; 6484 } 6485 6486 if (arg_dynptr) { 6487 if (reg->type != PTR_TO_STACK) { 6488 bpf_log(log, "arg#%d pointer type %s %s not to stack\n", 6489 i, btf_type_str(ref_t), 6490 ref_tname); 6491 return -EINVAL; 6492 } 6493 6494 if (!is_dynptr_reg_valid_init(env, reg)) { 6495 bpf_log(log, 6496 "arg#%d pointer type %s %s must be valid and initialized\n", 6497 i, btf_type_str(ref_t), 6498 ref_tname); 6499 return -EINVAL; 6500 } 6501 6502 if (!is_dynptr_type_expected(env, reg, 6503 ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL)) { 6504 bpf_log(log, 6505 "arg#%d pointer type %s %s points to unsupported dynamic pointer type\n", 6506 i, btf_type_str(ref_t), 6507 ref_tname); 6508 return -EINVAL; 6509 } 6510 6511 continue; 6512 } 6513 6514 /* Check for mem, len pair */ 6515 if (arg_mem_size) { 6516 if (check_kfunc_mem_size_reg(env, ®s[regno + 1], regno + 1)) { 6517 bpf_log(log, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", 6518 i, i + 1); 6519 return -EINVAL; 6520 } 6521 i++; 6522 continue; 6523 } 6524 } 6525 6526 resolve_ret = btf_resolve_size(btf, ref_t, &type_size); 6527 if (IS_ERR(resolve_ret)) { 6528 bpf_log(log, 6529 "arg#%d reference type('%s %s') size cannot be determined: %ld\n", 6530 i, btf_type_str(ref_t), ref_tname, 6531 PTR_ERR(resolve_ret)); 6532 return -EINVAL; 6533 } 6534 6535 if (check_mem_reg(env, reg, regno, type_size)) 6536 return -EINVAL; 6537 } else { 6538 bpf_log(log, "reg type unsupported for arg#%d %sfunction %s#%d\n", i, 6539 is_kfunc ? "kernel " : "", func_name, func_id); 6540 return -EINVAL; 6541 } 6542 } 6543 6544 /* Either both are set, or neither */ 6545 WARN_ON_ONCE((ref_obj_id && !ref_regno) || (!ref_obj_id && ref_regno)); 6546 /* We already made sure ref_obj_id is set only for one argument. We do 6547 * allow (!rel && ref_obj_id), so that passing such referenced 6548 * PTR_TO_BTF_ID to other kfuncs works. Note that rel is only true when 6549 * is_kfunc is true. 6550 */ 6551 if (rel && !ref_obj_id) { 6552 bpf_log(log, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n", 6553 func_name); 6554 return -EINVAL; 6555 } 6556 6557 if (sleepable && !env->prog->aux->sleepable) { 6558 bpf_log(log, "kernel function %s is sleepable but the program is not\n", 6559 func_name); 6560 return -EINVAL; 6561 } 6562 6563 if (kfunc_meta && ref_obj_id) 6564 kfunc_meta->ref_obj_id = ref_obj_id; 6565 6566 /* returns argument register number > 0 in case of reference release kfunc */ 6567 return rel ? ref_regno : 0; 6568 } 6569 6570 /* Compare BTF of a function declaration with given bpf_reg_state. 6571 * Returns: 6572 * EFAULT - there is a verifier bug. Abort verification. 6573 * EINVAL - there is a type mismatch or BTF is not available. 6574 * 0 - BTF matches with what bpf_reg_state expects. 6575 * Only PTR_TO_CTX and SCALAR_VALUE states are recognized. 6576 */ 6577 int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, 6578 struct bpf_reg_state *regs) 6579 { 6580 struct bpf_prog *prog = env->prog; 6581 struct btf *btf = prog->aux->btf; 6582 bool is_global; 6583 u32 btf_id; 6584 int err; 6585 6586 if (!prog->aux->func_info) 6587 return -EINVAL; 6588 6589 btf_id = prog->aux->func_info[subprog].type_id; 6590 if (!btf_id) 6591 return -EFAULT; 6592 6593 if (prog->aux->func_info_aux[subprog].unreliable) 6594 return -EINVAL; 6595 6596 is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; 6597 err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, NULL, false); 6598 6599 /* Compiler optimizations can remove arguments from static functions 6600 * or mismatched type can be passed into a global function. 6601 * In such cases mark the function as unreliable from BTF point of view. 6602 */ 6603 if (err) 6604 prog->aux->func_info_aux[subprog].unreliable = true; 6605 return err; 6606 } 6607 6608 /* Compare BTF of a function call with given bpf_reg_state. 6609 * Returns: 6610 * EFAULT - there is a verifier bug. Abort verification. 6611 * EINVAL - there is a type mismatch or BTF is not available. 6612 * 0 - BTF matches with what bpf_reg_state expects. 6613 * Only PTR_TO_CTX and SCALAR_VALUE states are recognized. 6614 * 6615 * NOTE: the code is duplicated from btf_check_subprog_arg_match() 6616 * because btf_check_func_arg_match() is still doing both. Once that 6617 * function is split in 2, we can call from here btf_check_subprog_arg_match() 6618 * first, and then treat the calling part in a new code path. 6619 */ 6620 int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog, 6621 struct bpf_reg_state *regs) 6622 { 6623 struct bpf_prog *prog = env->prog; 6624 struct btf *btf = prog->aux->btf; 6625 bool is_global; 6626 u32 btf_id; 6627 int err; 6628 6629 if (!prog->aux->func_info) 6630 return -EINVAL; 6631 6632 btf_id = prog->aux->func_info[subprog].type_id; 6633 if (!btf_id) 6634 return -EFAULT; 6635 6636 if (prog->aux->func_info_aux[subprog].unreliable) 6637 return -EINVAL; 6638 6639 is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; 6640 err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, NULL, true); 6641 6642 /* Compiler optimizations can remove arguments from static functions 6643 * or mismatched type can be passed into a global function. 6644 * In such cases mark the function as unreliable from BTF point of view. 6645 */ 6646 if (err) 6647 prog->aux->func_info_aux[subprog].unreliable = true; 6648 return err; 6649 } 6650 6651 int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, 6652 const struct btf *btf, u32 func_id, 6653 struct bpf_reg_state *regs, 6654 struct bpf_kfunc_arg_meta *meta) 6655 { 6656 return btf_check_func_arg_match(env, btf, func_id, regs, true, meta, true); 6657 } 6658 6659 /* Convert BTF of a function into bpf_reg_state if possible 6660 * Returns: 6661 * EFAULT - there is a verifier bug. Abort verification. 6662 * EINVAL - cannot convert BTF. 6663 * 0 - Successfully converted BTF into bpf_reg_state 6664 * (either PTR_TO_CTX or SCALAR_VALUE). 6665 */ 6666 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, 6667 struct bpf_reg_state *regs) 6668 { 6669 struct bpf_verifier_log *log = &env->log; 6670 struct bpf_prog *prog = env->prog; 6671 enum bpf_prog_type prog_type = prog->type; 6672 struct btf *btf = prog->aux->btf; 6673 const struct btf_param *args; 6674 const struct btf_type *t, *ref_t; 6675 u32 i, nargs, btf_id; 6676 const char *tname; 6677 6678 if (!prog->aux->func_info || 6679 prog->aux->func_info_aux[subprog].linkage != BTF_FUNC_GLOBAL) { 6680 bpf_log(log, "Verifier bug\n"); 6681 return -EFAULT; 6682 } 6683 6684 btf_id = prog->aux->func_info[subprog].type_id; 6685 if (!btf_id) { 6686 bpf_log(log, "Global functions need valid BTF\n"); 6687 return -EFAULT; 6688 } 6689 6690 t = btf_type_by_id(btf, btf_id); 6691 if (!t || !btf_type_is_func(t)) { 6692 /* These checks were already done by the verifier while loading 6693 * struct bpf_func_info 6694 */ 6695 bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n", 6696 subprog); 6697 return -EFAULT; 6698 } 6699 tname = btf_name_by_offset(btf, t->name_off); 6700 6701 if (log->level & BPF_LOG_LEVEL) 6702 bpf_log(log, "Validating %s() func#%d...\n", 6703 tname, subprog); 6704 6705 if (prog->aux->func_info_aux[subprog].unreliable) { 6706 bpf_log(log, "Verifier bug in function %s()\n", tname); 6707 return -EFAULT; 6708 } 6709 if (prog_type == BPF_PROG_TYPE_EXT) 6710 prog_type = prog->aux->dst_prog->type; 6711 6712 t = btf_type_by_id(btf, t->type); 6713 if (!t || !btf_type_is_func_proto(t)) { 6714 bpf_log(log, "Invalid type of function %s()\n", tname); 6715 return -EFAULT; 6716 } 6717 args = (const struct btf_param *)(t + 1); 6718 nargs = btf_type_vlen(t); 6719 if (nargs > MAX_BPF_FUNC_REG_ARGS) { 6720 bpf_log(log, "Global function %s() with %d > %d args. Buggy compiler.\n", 6721 tname, nargs, MAX_BPF_FUNC_REG_ARGS); 6722 return -EINVAL; 6723 } 6724 /* check that function returns int */ 6725 t = btf_type_by_id(btf, t->type); 6726 while (btf_type_is_modifier(t)) 6727 t = btf_type_by_id(btf, t->type); 6728 if (!btf_type_is_int(t) && !btf_is_any_enum(t)) { 6729 bpf_log(log, 6730 "Global function %s() doesn't return scalar. Only those are supported.\n", 6731 tname); 6732 return -EINVAL; 6733 } 6734 /* Convert BTF function arguments into verifier types. 6735 * Only PTR_TO_CTX and SCALAR are supported atm. 6736 */ 6737 for (i = 0; i < nargs; i++) { 6738 struct bpf_reg_state *reg = ®s[i + 1]; 6739 6740 t = btf_type_by_id(btf, args[i].type); 6741 while (btf_type_is_modifier(t)) 6742 t = btf_type_by_id(btf, t->type); 6743 if (btf_type_is_int(t) || btf_is_any_enum(t)) { 6744 reg->type = SCALAR_VALUE; 6745 continue; 6746 } 6747 if (btf_type_is_ptr(t)) { 6748 if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) { 6749 reg->type = PTR_TO_CTX; 6750 continue; 6751 } 6752 6753 t = btf_type_skip_modifiers(btf, t->type, NULL); 6754 6755 ref_t = btf_resolve_size(btf, t, ®->mem_size); 6756 if (IS_ERR(ref_t)) { 6757 bpf_log(log, 6758 "arg#%d reference type('%s %s') size cannot be determined: %ld\n", 6759 i, btf_type_str(t), btf_name_by_offset(btf, t->name_off), 6760 PTR_ERR(ref_t)); 6761 return -EINVAL; 6762 } 6763 6764 reg->type = PTR_TO_MEM | PTR_MAYBE_NULL; 6765 reg->id = ++env->id_gen; 6766 6767 continue; 6768 } 6769 bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n", 6770 i, btf_type_str(t), tname); 6771 return -EINVAL; 6772 } 6773 return 0; 6774 } 6775 6776 static void btf_type_show(const struct btf *btf, u32 type_id, void *obj, 6777 struct btf_show *show) 6778 { 6779 const struct btf_type *t = btf_type_by_id(btf, type_id); 6780 6781 show->btf = btf; 6782 memset(&show->state, 0, sizeof(show->state)); 6783 memset(&show->obj, 0, sizeof(show->obj)); 6784 6785 btf_type_ops(t)->show(btf, t, type_id, obj, 0, show); 6786 } 6787 6788 static void btf_seq_show(struct btf_show *show, const char *fmt, 6789 va_list args) 6790 { 6791 seq_vprintf((struct seq_file *)show->target, fmt, args); 6792 } 6793 6794 int btf_type_seq_show_flags(const struct btf *btf, u32 type_id, 6795 void *obj, struct seq_file *m, u64 flags) 6796 { 6797 struct btf_show sseq; 6798 6799 sseq.target = m; 6800 sseq.showfn = btf_seq_show; 6801 sseq.flags = flags; 6802 6803 btf_type_show(btf, type_id, obj, &sseq); 6804 6805 return sseq.state.status; 6806 } 6807 6808 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, 6809 struct seq_file *m) 6810 { 6811 (void) btf_type_seq_show_flags(btf, type_id, obj, m, 6812 BTF_SHOW_NONAME | BTF_SHOW_COMPACT | 6813 BTF_SHOW_ZERO | BTF_SHOW_UNSAFE); 6814 } 6815 6816 struct btf_show_snprintf { 6817 struct btf_show show; 6818 int len_left; /* space left in string */ 6819 int len; /* length we would have written */ 6820 }; 6821 6822 static void btf_snprintf_show(struct btf_show *show, const char *fmt, 6823 va_list args) 6824 { 6825 struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show; 6826 int len; 6827 6828 len = vsnprintf(show->target, ssnprintf->len_left, fmt, args); 6829 6830 if (len < 0) { 6831 ssnprintf->len_left = 0; 6832 ssnprintf->len = len; 6833 } else if (len >= ssnprintf->len_left) { 6834 /* no space, drive on to get length we would have written */ 6835 ssnprintf->len_left = 0; 6836 ssnprintf->len += len; 6837 } else { 6838 ssnprintf->len_left -= len; 6839 ssnprintf->len += len; 6840 show->target += len; 6841 } 6842 } 6843 6844 int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj, 6845 char *buf, int len, u64 flags) 6846 { 6847 struct btf_show_snprintf ssnprintf; 6848 6849 ssnprintf.show.target = buf; 6850 ssnprintf.show.flags = flags; 6851 ssnprintf.show.showfn = btf_snprintf_show; 6852 ssnprintf.len_left = len; 6853 ssnprintf.len = 0; 6854 6855 btf_type_show(btf, type_id, obj, (struct btf_show *)&ssnprintf); 6856 6857 /* If we encountered an error, return it. */ 6858 if (ssnprintf.show.state.status) 6859 return ssnprintf.show.state.status; 6860 6861 /* Otherwise return length we would have written */ 6862 return ssnprintf.len; 6863 } 6864 6865 #ifdef CONFIG_PROC_FS 6866 static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp) 6867 { 6868 const struct btf *btf = filp->private_data; 6869 6870 seq_printf(m, "btf_id:\t%u\n", btf->id); 6871 } 6872 #endif 6873 6874 static int btf_release(struct inode *inode, struct file *filp) 6875 { 6876 btf_put(filp->private_data); 6877 return 0; 6878 } 6879 6880 const struct file_operations btf_fops = { 6881 #ifdef CONFIG_PROC_FS 6882 .show_fdinfo = bpf_btf_show_fdinfo, 6883 #endif 6884 .release = btf_release, 6885 }; 6886 6887 static int __btf_new_fd(struct btf *btf) 6888 { 6889 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC); 6890 } 6891 6892 int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr) 6893 { 6894 struct btf *btf; 6895 int ret; 6896 6897 btf = btf_parse(make_bpfptr(attr->btf, uattr.is_kernel), 6898 attr->btf_size, attr->btf_log_level, 6899 u64_to_user_ptr(attr->btf_log_buf), 6900 attr->btf_log_size); 6901 if (IS_ERR(btf)) 6902 return PTR_ERR(btf); 6903 6904 ret = btf_alloc_id(btf); 6905 if (ret) { 6906 btf_free(btf); 6907 return ret; 6908 } 6909 6910 /* 6911 * The BTF ID is published to the userspace. 6912 * All BTF free must go through call_rcu() from 6913 * now on (i.e. free by calling btf_put()). 6914 */ 6915 6916 ret = __btf_new_fd(btf); 6917 if (ret < 0) 6918 btf_put(btf); 6919 6920 return ret; 6921 } 6922 6923 struct btf *btf_get_by_fd(int fd) 6924 { 6925 struct btf *btf; 6926 struct fd f; 6927 6928 f = fdget(fd); 6929 6930 if (!f.file) 6931 return ERR_PTR(-EBADF); 6932 6933 if (f.file->f_op != &btf_fops) { 6934 fdput(f); 6935 return ERR_PTR(-EINVAL); 6936 } 6937 6938 btf = f.file->private_data; 6939 refcount_inc(&btf->refcnt); 6940 fdput(f); 6941 6942 return btf; 6943 } 6944 6945 int btf_get_info_by_fd(const struct btf *btf, 6946 const union bpf_attr *attr, 6947 union bpf_attr __user *uattr) 6948 { 6949 struct bpf_btf_info __user *uinfo; 6950 struct bpf_btf_info info; 6951 u32 info_copy, btf_copy; 6952 void __user *ubtf; 6953 char __user *uname; 6954 u32 uinfo_len, uname_len, name_len; 6955 int ret = 0; 6956 6957 uinfo = u64_to_user_ptr(attr->info.info); 6958 uinfo_len = attr->info.info_len; 6959 6960 info_copy = min_t(u32, uinfo_len, sizeof(info)); 6961 memset(&info, 0, sizeof(info)); 6962 if (copy_from_user(&info, uinfo, info_copy)) 6963 return -EFAULT; 6964 6965 info.id = btf->id; 6966 ubtf = u64_to_user_ptr(info.btf); 6967 btf_copy = min_t(u32, btf->data_size, info.btf_size); 6968 if (copy_to_user(ubtf, btf->data, btf_copy)) 6969 return -EFAULT; 6970 info.btf_size = btf->data_size; 6971 6972 info.kernel_btf = btf->kernel_btf; 6973 6974 uname = u64_to_user_ptr(info.name); 6975 uname_len = info.name_len; 6976 if (!uname ^ !uname_len) 6977 return -EINVAL; 6978 6979 name_len = strlen(btf->name); 6980 info.name_len = name_len; 6981 6982 if (uname) { 6983 if (uname_len >= name_len + 1) { 6984 if (copy_to_user(uname, btf->name, name_len + 1)) 6985 return -EFAULT; 6986 } else { 6987 char zero = '\0'; 6988 6989 if (copy_to_user(uname, btf->name, uname_len - 1)) 6990 return -EFAULT; 6991 if (put_user(zero, uname + uname_len - 1)) 6992 return -EFAULT; 6993 /* let user-space know about too short buffer */ 6994 ret = -ENOSPC; 6995 } 6996 } 6997 6998 if (copy_to_user(uinfo, &info, info_copy) || 6999 put_user(info_copy, &uattr->info.info_len)) 7000 return -EFAULT; 7001 7002 return ret; 7003 } 7004 7005 int btf_get_fd_by_id(u32 id) 7006 { 7007 struct btf *btf; 7008 int fd; 7009 7010 rcu_read_lock(); 7011 btf = idr_find(&btf_idr, id); 7012 if (!btf || !refcount_inc_not_zero(&btf->refcnt)) 7013 btf = ERR_PTR(-ENOENT); 7014 rcu_read_unlock(); 7015 7016 if (IS_ERR(btf)) 7017 return PTR_ERR(btf); 7018 7019 fd = __btf_new_fd(btf); 7020 if (fd < 0) 7021 btf_put(btf); 7022 7023 return fd; 7024 } 7025 7026 u32 btf_obj_id(const struct btf *btf) 7027 { 7028 return btf->id; 7029 } 7030 7031 bool btf_is_kernel(const struct btf *btf) 7032 { 7033 return btf->kernel_btf; 7034 } 7035 7036 bool btf_is_module(const struct btf *btf) 7037 { 7038 return btf->kernel_btf && strcmp(btf->name, "vmlinux") != 0; 7039 } 7040 7041 static int btf_id_cmp_func(const void *a, const void *b) 7042 { 7043 const int *pa = a, *pb = b; 7044 7045 return *pa - *pb; 7046 } 7047 7048 bool btf_id_set_contains(const struct btf_id_set *set, u32 id) 7049 { 7050 return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL; 7051 } 7052 7053 static void *btf_id_set8_contains(const struct btf_id_set8 *set, u32 id) 7054 { 7055 return bsearch(&id, set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func); 7056 } 7057 7058 enum { 7059 BTF_MODULE_F_LIVE = (1 << 0), 7060 }; 7061 7062 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 7063 struct btf_module { 7064 struct list_head list; 7065 struct module *module; 7066 struct btf *btf; 7067 struct bin_attribute *sysfs_attr; 7068 int flags; 7069 }; 7070 7071 static LIST_HEAD(btf_modules); 7072 static DEFINE_MUTEX(btf_module_mutex); 7073 7074 static ssize_t 7075 btf_module_read(struct file *file, struct kobject *kobj, 7076 struct bin_attribute *bin_attr, 7077 char *buf, loff_t off, size_t len) 7078 { 7079 const struct btf *btf = bin_attr->private; 7080 7081 memcpy(buf, btf->data + off, len); 7082 return len; 7083 } 7084 7085 static void purge_cand_cache(struct btf *btf); 7086 7087 static int btf_module_notify(struct notifier_block *nb, unsigned long op, 7088 void *module) 7089 { 7090 struct btf_module *btf_mod, *tmp; 7091 struct module *mod = module; 7092 struct btf *btf; 7093 int err = 0; 7094 7095 if (mod->btf_data_size == 0 || 7096 (op != MODULE_STATE_COMING && op != MODULE_STATE_LIVE && 7097 op != MODULE_STATE_GOING)) 7098 goto out; 7099 7100 switch (op) { 7101 case MODULE_STATE_COMING: 7102 btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL); 7103 if (!btf_mod) { 7104 err = -ENOMEM; 7105 goto out; 7106 } 7107 btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size); 7108 if (IS_ERR(btf)) { 7109 pr_warn("failed to validate module [%s] BTF: %ld\n", 7110 mod->name, PTR_ERR(btf)); 7111 kfree(btf_mod); 7112 if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) 7113 err = PTR_ERR(btf); 7114 goto out; 7115 } 7116 err = btf_alloc_id(btf); 7117 if (err) { 7118 btf_free(btf); 7119 kfree(btf_mod); 7120 goto out; 7121 } 7122 7123 purge_cand_cache(NULL); 7124 mutex_lock(&btf_module_mutex); 7125 btf_mod->module = module; 7126 btf_mod->btf = btf; 7127 list_add(&btf_mod->list, &btf_modules); 7128 mutex_unlock(&btf_module_mutex); 7129 7130 if (IS_ENABLED(CONFIG_SYSFS)) { 7131 struct bin_attribute *attr; 7132 7133 attr = kzalloc(sizeof(*attr), GFP_KERNEL); 7134 if (!attr) 7135 goto out; 7136 7137 sysfs_bin_attr_init(attr); 7138 attr->attr.name = btf->name; 7139 attr->attr.mode = 0444; 7140 attr->size = btf->data_size; 7141 attr->private = btf; 7142 attr->read = btf_module_read; 7143 7144 err = sysfs_create_bin_file(btf_kobj, attr); 7145 if (err) { 7146 pr_warn("failed to register module [%s] BTF in sysfs: %d\n", 7147 mod->name, err); 7148 kfree(attr); 7149 err = 0; 7150 goto out; 7151 } 7152 7153 btf_mod->sysfs_attr = attr; 7154 } 7155 7156 break; 7157 case MODULE_STATE_LIVE: 7158 mutex_lock(&btf_module_mutex); 7159 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { 7160 if (btf_mod->module != module) 7161 continue; 7162 7163 btf_mod->flags |= BTF_MODULE_F_LIVE; 7164 break; 7165 } 7166 mutex_unlock(&btf_module_mutex); 7167 break; 7168 case MODULE_STATE_GOING: 7169 mutex_lock(&btf_module_mutex); 7170 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { 7171 if (btf_mod->module != module) 7172 continue; 7173 7174 list_del(&btf_mod->list); 7175 if (btf_mod->sysfs_attr) 7176 sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr); 7177 purge_cand_cache(btf_mod->btf); 7178 btf_put(btf_mod->btf); 7179 kfree(btf_mod->sysfs_attr); 7180 kfree(btf_mod); 7181 break; 7182 } 7183 mutex_unlock(&btf_module_mutex); 7184 break; 7185 } 7186 out: 7187 return notifier_from_errno(err); 7188 } 7189 7190 static struct notifier_block btf_module_nb = { 7191 .notifier_call = btf_module_notify, 7192 }; 7193 7194 static int __init btf_module_init(void) 7195 { 7196 register_module_notifier(&btf_module_nb); 7197 return 0; 7198 } 7199 7200 fs_initcall(btf_module_init); 7201 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */ 7202 7203 struct module *btf_try_get_module(const struct btf *btf) 7204 { 7205 struct module *res = NULL; 7206 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 7207 struct btf_module *btf_mod, *tmp; 7208 7209 mutex_lock(&btf_module_mutex); 7210 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { 7211 if (btf_mod->btf != btf) 7212 continue; 7213 7214 /* We must only consider module whose __init routine has 7215 * finished, hence we must check for BTF_MODULE_F_LIVE flag, 7216 * which is set from the notifier callback for 7217 * MODULE_STATE_LIVE. 7218 */ 7219 if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module)) 7220 res = btf_mod->module; 7221 7222 break; 7223 } 7224 mutex_unlock(&btf_module_mutex); 7225 #endif 7226 7227 return res; 7228 } 7229 7230 /* Returns struct btf corresponding to the struct module. 7231 * This function can return NULL or ERR_PTR. 7232 */ 7233 static struct btf *btf_get_module_btf(const struct module *module) 7234 { 7235 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 7236 struct btf_module *btf_mod, *tmp; 7237 #endif 7238 struct btf *btf = NULL; 7239 7240 if (!module) { 7241 btf = bpf_get_btf_vmlinux(); 7242 if (!IS_ERR_OR_NULL(btf)) 7243 btf_get(btf); 7244 return btf; 7245 } 7246 7247 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 7248 mutex_lock(&btf_module_mutex); 7249 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { 7250 if (btf_mod->module != module) 7251 continue; 7252 7253 btf_get(btf_mod->btf); 7254 btf = btf_mod->btf; 7255 break; 7256 } 7257 mutex_unlock(&btf_module_mutex); 7258 #endif 7259 7260 return btf; 7261 } 7262 7263 BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags) 7264 { 7265 struct btf *btf = NULL; 7266 int btf_obj_fd = 0; 7267 long ret; 7268 7269 if (flags) 7270 return -EINVAL; 7271 7272 if (name_sz <= 1 || name[name_sz - 1]) 7273 return -EINVAL; 7274 7275 ret = bpf_find_btf_id(name, kind, &btf); 7276 if (ret > 0 && btf_is_module(btf)) { 7277 btf_obj_fd = __btf_new_fd(btf); 7278 if (btf_obj_fd < 0) { 7279 btf_put(btf); 7280 return btf_obj_fd; 7281 } 7282 return ret | (((u64)btf_obj_fd) << 32); 7283 } 7284 if (ret > 0) 7285 btf_put(btf); 7286 return ret; 7287 } 7288 7289 const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = { 7290 .func = bpf_btf_find_by_name_kind, 7291 .gpl_only = false, 7292 .ret_type = RET_INTEGER, 7293 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 7294 .arg2_type = ARG_CONST_SIZE, 7295 .arg3_type = ARG_ANYTHING, 7296 .arg4_type = ARG_ANYTHING, 7297 }; 7298 7299 BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE) 7300 #define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type) 7301 BTF_TRACING_TYPE_xxx 7302 #undef BTF_TRACING_TYPE 7303 7304 /* Kernel Function (kfunc) BTF ID set registration API */ 7305 7306 static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, 7307 struct btf_id_set8 *add_set) 7308 { 7309 bool vmlinux_set = !btf_is_module(btf); 7310 struct btf_kfunc_set_tab *tab; 7311 struct btf_id_set8 *set; 7312 u32 set_cnt; 7313 int ret; 7314 7315 if (hook >= BTF_KFUNC_HOOK_MAX) { 7316 ret = -EINVAL; 7317 goto end; 7318 } 7319 7320 if (!add_set->cnt) 7321 return 0; 7322 7323 tab = btf->kfunc_set_tab; 7324 if (!tab) { 7325 tab = kzalloc(sizeof(*tab), GFP_KERNEL | __GFP_NOWARN); 7326 if (!tab) 7327 return -ENOMEM; 7328 btf->kfunc_set_tab = tab; 7329 } 7330 7331 set = tab->sets[hook]; 7332 /* Warn when register_btf_kfunc_id_set is called twice for the same hook 7333 * for module sets. 7334 */ 7335 if (WARN_ON_ONCE(set && !vmlinux_set)) { 7336 ret = -EINVAL; 7337 goto end; 7338 } 7339 7340 /* We don't need to allocate, concatenate, and sort module sets, because 7341 * only one is allowed per hook. Hence, we can directly assign the 7342 * pointer and return. 7343 */ 7344 if (!vmlinux_set) { 7345 tab->sets[hook] = add_set; 7346 return 0; 7347 } 7348 7349 /* In case of vmlinux sets, there may be more than one set being 7350 * registered per hook. To create a unified set, we allocate a new set 7351 * and concatenate all individual sets being registered. While each set 7352 * is individually sorted, they may become unsorted when concatenated, 7353 * hence re-sorting the final set again is required to make binary 7354 * searching the set using btf_id_set8_contains function work. 7355 */ 7356 set_cnt = set ? set->cnt : 0; 7357 7358 if (set_cnt > U32_MAX - add_set->cnt) { 7359 ret = -EOVERFLOW; 7360 goto end; 7361 } 7362 7363 if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) { 7364 ret = -E2BIG; 7365 goto end; 7366 } 7367 7368 /* Grow set */ 7369 set = krealloc(tab->sets[hook], 7370 offsetof(struct btf_id_set8, pairs[set_cnt + add_set->cnt]), 7371 GFP_KERNEL | __GFP_NOWARN); 7372 if (!set) { 7373 ret = -ENOMEM; 7374 goto end; 7375 } 7376 7377 /* For newly allocated set, initialize set->cnt to 0 */ 7378 if (!tab->sets[hook]) 7379 set->cnt = 0; 7380 tab->sets[hook] = set; 7381 7382 /* Concatenate the two sets */ 7383 memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0])); 7384 set->cnt += add_set->cnt; 7385 7386 sort(set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func, NULL); 7387 7388 return 0; 7389 end: 7390 btf_free_kfunc_set_tab(btf); 7391 return ret; 7392 } 7393 7394 static u32 *__btf_kfunc_id_set_contains(const struct btf *btf, 7395 enum btf_kfunc_hook hook, 7396 u32 kfunc_btf_id) 7397 { 7398 struct btf_id_set8 *set; 7399 u32 *id; 7400 7401 if (hook >= BTF_KFUNC_HOOK_MAX) 7402 return NULL; 7403 if (!btf->kfunc_set_tab) 7404 return NULL; 7405 set = btf->kfunc_set_tab->sets[hook]; 7406 if (!set) 7407 return NULL; 7408 id = btf_id_set8_contains(set, kfunc_btf_id); 7409 if (!id) 7410 return NULL; 7411 /* The flags for BTF ID are located next to it */ 7412 return id + 1; 7413 } 7414 7415 static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type) 7416 { 7417 switch (prog_type) { 7418 case BPF_PROG_TYPE_XDP: 7419 return BTF_KFUNC_HOOK_XDP; 7420 case BPF_PROG_TYPE_SCHED_CLS: 7421 return BTF_KFUNC_HOOK_TC; 7422 case BPF_PROG_TYPE_STRUCT_OPS: 7423 return BTF_KFUNC_HOOK_STRUCT_OPS; 7424 case BPF_PROG_TYPE_TRACING: 7425 case BPF_PROG_TYPE_LSM: 7426 return BTF_KFUNC_HOOK_TRACING; 7427 case BPF_PROG_TYPE_SYSCALL: 7428 return BTF_KFUNC_HOOK_SYSCALL; 7429 default: 7430 return BTF_KFUNC_HOOK_MAX; 7431 } 7432 } 7433 7434 /* Caution: 7435 * Reference to the module (obtained using btf_try_get_module) corresponding to 7436 * the struct btf *MUST* be held when calling this function from verifier 7437 * context. This is usually true as we stash references in prog's kfunc_btf_tab; 7438 * keeping the reference for the duration of the call provides the necessary 7439 * protection for looking up a well-formed btf->kfunc_set_tab. 7440 */ 7441 u32 *btf_kfunc_id_set_contains(const struct btf *btf, 7442 enum bpf_prog_type prog_type, 7443 u32 kfunc_btf_id) 7444 { 7445 enum btf_kfunc_hook hook; 7446 7447 hook = bpf_prog_type_to_kfunc_hook(prog_type); 7448 return __btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id); 7449 } 7450 7451 /* This function must be invoked only from initcalls/module init functions */ 7452 int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, 7453 const struct btf_kfunc_id_set *kset) 7454 { 7455 enum btf_kfunc_hook hook; 7456 struct btf *btf; 7457 int ret; 7458 7459 btf = btf_get_module_btf(kset->owner); 7460 if (!btf) { 7461 if (!kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { 7462 pr_err("missing vmlinux BTF, cannot register kfuncs\n"); 7463 return -ENOENT; 7464 } 7465 if (kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) { 7466 pr_err("missing module BTF, cannot register kfuncs\n"); 7467 return -ENOENT; 7468 } 7469 return 0; 7470 } 7471 if (IS_ERR(btf)) 7472 return PTR_ERR(btf); 7473 7474 hook = bpf_prog_type_to_kfunc_hook(prog_type); 7475 ret = btf_populate_kfunc_set(btf, hook, kset->set); 7476 btf_put(btf); 7477 return ret; 7478 } 7479 EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set); 7480 7481 s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id) 7482 { 7483 struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab; 7484 struct btf_id_dtor_kfunc *dtor; 7485 7486 if (!tab) 7487 return -ENOENT; 7488 /* Even though the size of tab->dtors[0] is > sizeof(u32), we only need 7489 * to compare the first u32 with btf_id, so we can reuse btf_id_cmp_func. 7490 */ 7491 BUILD_BUG_ON(offsetof(struct btf_id_dtor_kfunc, btf_id) != 0); 7492 dtor = bsearch(&btf_id, tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func); 7493 if (!dtor) 7494 return -ENOENT; 7495 return dtor->kfunc_btf_id; 7496 } 7497 7498 static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc *dtors, u32 cnt) 7499 { 7500 const struct btf_type *dtor_func, *dtor_func_proto, *t; 7501 const struct btf_param *args; 7502 s32 dtor_btf_id; 7503 u32 nr_args, i; 7504 7505 for (i = 0; i < cnt; i++) { 7506 dtor_btf_id = dtors[i].kfunc_btf_id; 7507 7508 dtor_func = btf_type_by_id(btf, dtor_btf_id); 7509 if (!dtor_func || !btf_type_is_func(dtor_func)) 7510 return -EINVAL; 7511 7512 dtor_func_proto = btf_type_by_id(btf, dtor_func->type); 7513 if (!dtor_func_proto || !btf_type_is_func_proto(dtor_func_proto)) 7514 return -EINVAL; 7515 7516 /* Make sure the prototype of the destructor kfunc is 'void func(type *)' */ 7517 t = btf_type_by_id(btf, dtor_func_proto->type); 7518 if (!t || !btf_type_is_void(t)) 7519 return -EINVAL; 7520 7521 nr_args = btf_type_vlen(dtor_func_proto); 7522 if (nr_args != 1) 7523 return -EINVAL; 7524 args = btf_params(dtor_func_proto); 7525 t = btf_type_by_id(btf, args[0].type); 7526 /* Allow any pointer type, as width on targets Linux supports 7527 * will be same for all pointer types (i.e. sizeof(void *)) 7528 */ 7529 if (!t || !btf_type_is_ptr(t)) 7530 return -EINVAL; 7531 } 7532 return 0; 7533 } 7534 7535 /* This function must be invoked only from initcalls/module init functions */ 7536 int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt, 7537 struct module *owner) 7538 { 7539 struct btf_id_dtor_kfunc_tab *tab; 7540 struct btf *btf; 7541 u32 tab_cnt; 7542 int ret; 7543 7544 btf = btf_get_module_btf(owner); 7545 if (!btf) { 7546 if (!owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { 7547 pr_err("missing vmlinux BTF, cannot register dtor kfuncs\n"); 7548 return -ENOENT; 7549 } 7550 if (owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) { 7551 pr_err("missing module BTF, cannot register dtor kfuncs\n"); 7552 return -ENOENT; 7553 } 7554 return 0; 7555 } 7556 if (IS_ERR(btf)) 7557 return PTR_ERR(btf); 7558 7559 if (add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) { 7560 pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT); 7561 ret = -E2BIG; 7562 goto end; 7563 } 7564 7565 /* Ensure that the prototype of dtor kfuncs being registered is sane */ 7566 ret = btf_check_dtor_kfuncs(btf, dtors, add_cnt); 7567 if (ret < 0) 7568 goto end; 7569 7570 tab = btf->dtor_kfunc_tab; 7571 /* Only one call allowed for modules */ 7572 if (WARN_ON_ONCE(tab && btf_is_module(btf))) { 7573 ret = -EINVAL; 7574 goto end; 7575 } 7576 7577 tab_cnt = tab ? tab->cnt : 0; 7578 if (tab_cnt > U32_MAX - add_cnt) { 7579 ret = -EOVERFLOW; 7580 goto end; 7581 } 7582 if (tab_cnt + add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) { 7583 pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT); 7584 ret = -E2BIG; 7585 goto end; 7586 } 7587 7588 tab = krealloc(btf->dtor_kfunc_tab, 7589 offsetof(struct btf_id_dtor_kfunc_tab, dtors[tab_cnt + add_cnt]), 7590 GFP_KERNEL | __GFP_NOWARN); 7591 if (!tab) { 7592 ret = -ENOMEM; 7593 goto end; 7594 } 7595 7596 if (!btf->dtor_kfunc_tab) 7597 tab->cnt = 0; 7598 btf->dtor_kfunc_tab = tab; 7599 7600 memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0])); 7601 tab->cnt += add_cnt; 7602 7603 sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL); 7604 7605 return 0; 7606 end: 7607 btf_free_dtor_kfunc_tab(btf); 7608 btf_put(btf); 7609 return ret; 7610 } 7611 EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs); 7612 7613 #define MAX_TYPES_ARE_COMPAT_DEPTH 2 7614 7615 /* Check local and target types for compatibility. This check is used for 7616 * type-based CO-RE relocations and follow slightly different rules than 7617 * field-based relocations. This function assumes that root types were already 7618 * checked for name match. Beyond that initial root-level name check, names 7619 * are completely ignored. Compatibility rules are as follows: 7620 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but 7621 * kind should match for local and target types (i.e., STRUCT is not 7622 * compatible with UNION); 7623 * - for ENUMs/ENUM64s, the size is ignored; 7624 * - for INT, size and signedness are ignored; 7625 * - for ARRAY, dimensionality is ignored, element types are checked for 7626 * compatibility recursively; 7627 * - CONST/VOLATILE/RESTRICT modifiers are ignored; 7628 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; 7629 * - FUNC_PROTOs are compatible if they have compatible signature: same 7630 * number of input args and compatible return and argument types. 7631 * These rules are not set in stone and probably will be adjusted as we get 7632 * more experience with using BPF CO-RE relocations. 7633 */ 7634 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, 7635 const struct btf *targ_btf, __u32 targ_id) 7636 { 7637 return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 7638 MAX_TYPES_ARE_COMPAT_DEPTH); 7639 } 7640 7641 #define MAX_TYPES_MATCH_DEPTH 2 7642 7643 int bpf_core_types_match(const struct btf *local_btf, u32 local_id, 7644 const struct btf *targ_btf, u32 targ_id) 7645 { 7646 return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 7647 MAX_TYPES_MATCH_DEPTH); 7648 } 7649 7650 static bool bpf_core_is_flavor_sep(const char *s) 7651 { 7652 /* check X___Y name pattern, where X and Y are not underscores */ 7653 return s[0] != '_' && /* X */ 7654 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */ 7655 s[4] != '_'; /* Y */ 7656 } 7657 7658 size_t bpf_core_essential_name_len(const char *name) 7659 { 7660 size_t n = strlen(name); 7661 int i; 7662 7663 for (i = n - 5; i >= 0; i--) { 7664 if (bpf_core_is_flavor_sep(name + i)) 7665 return i + 1; 7666 } 7667 return n; 7668 } 7669 7670 struct bpf_cand_cache { 7671 const char *name; 7672 u32 name_len; 7673 u16 kind; 7674 u16 cnt; 7675 struct { 7676 const struct btf *btf; 7677 u32 id; 7678 } cands[]; 7679 }; 7680 7681 static void bpf_free_cands(struct bpf_cand_cache *cands) 7682 { 7683 if (!cands->cnt) 7684 /* empty candidate array was allocated on stack */ 7685 return; 7686 kfree(cands); 7687 } 7688 7689 static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands) 7690 { 7691 kfree(cands->name); 7692 kfree(cands); 7693 } 7694 7695 #define VMLINUX_CAND_CACHE_SIZE 31 7696 static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE]; 7697 7698 #define MODULE_CAND_CACHE_SIZE 31 7699 static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE]; 7700 7701 static DEFINE_MUTEX(cand_cache_mutex); 7702 7703 static void __print_cand_cache(struct bpf_verifier_log *log, 7704 struct bpf_cand_cache **cache, 7705 int cache_size) 7706 { 7707 struct bpf_cand_cache *cc; 7708 int i, j; 7709 7710 for (i = 0; i < cache_size; i++) { 7711 cc = cache[i]; 7712 if (!cc) 7713 continue; 7714 bpf_log(log, "[%d]%s(", i, cc->name); 7715 for (j = 0; j < cc->cnt; j++) { 7716 bpf_log(log, "%d", cc->cands[j].id); 7717 if (j < cc->cnt - 1) 7718 bpf_log(log, " "); 7719 } 7720 bpf_log(log, "), "); 7721 } 7722 } 7723 7724 static void print_cand_cache(struct bpf_verifier_log *log) 7725 { 7726 mutex_lock(&cand_cache_mutex); 7727 bpf_log(log, "vmlinux_cand_cache:"); 7728 __print_cand_cache(log, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); 7729 bpf_log(log, "\nmodule_cand_cache:"); 7730 __print_cand_cache(log, module_cand_cache, MODULE_CAND_CACHE_SIZE); 7731 bpf_log(log, "\n"); 7732 mutex_unlock(&cand_cache_mutex); 7733 } 7734 7735 static u32 hash_cands(struct bpf_cand_cache *cands) 7736 { 7737 return jhash(cands->name, cands->name_len, 0); 7738 } 7739 7740 static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands, 7741 struct bpf_cand_cache **cache, 7742 int cache_size) 7743 { 7744 struct bpf_cand_cache *cc = cache[hash_cands(cands) % cache_size]; 7745 7746 if (cc && cc->name_len == cands->name_len && 7747 !strncmp(cc->name, cands->name, cands->name_len)) 7748 return cc; 7749 return NULL; 7750 } 7751 7752 static size_t sizeof_cands(int cnt) 7753 { 7754 return offsetof(struct bpf_cand_cache, cands[cnt]); 7755 } 7756 7757 static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands, 7758 struct bpf_cand_cache **cache, 7759 int cache_size) 7760 { 7761 struct bpf_cand_cache **cc = &cache[hash_cands(cands) % cache_size], *new_cands; 7762 7763 if (*cc) { 7764 bpf_free_cands_from_cache(*cc); 7765 *cc = NULL; 7766 } 7767 new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL); 7768 if (!new_cands) { 7769 bpf_free_cands(cands); 7770 return ERR_PTR(-ENOMEM); 7771 } 7772 /* strdup the name, since it will stay in cache. 7773 * the cands->name points to strings in prog's BTF and the prog can be unloaded. 7774 */ 7775 new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL); 7776 bpf_free_cands(cands); 7777 if (!new_cands->name) { 7778 kfree(new_cands); 7779 return ERR_PTR(-ENOMEM); 7780 } 7781 *cc = new_cands; 7782 return new_cands; 7783 } 7784 7785 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 7786 static void __purge_cand_cache(struct btf *btf, struct bpf_cand_cache **cache, 7787 int cache_size) 7788 { 7789 struct bpf_cand_cache *cc; 7790 int i, j; 7791 7792 for (i = 0; i < cache_size; i++) { 7793 cc = cache[i]; 7794 if (!cc) 7795 continue; 7796 if (!btf) { 7797 /* when new module is loaded purge all of module_cand_cache, 7798 * since new module might have candidates with the name 7799 * that matches cached cands. 7800 */ 7801 bpf_free_cands_from_cache(cc); 7802 cache[i] = NULL; 7803 continue; 7804 } 7805 /* when module is unloaded purge cache entries 7806 * that match module's btf 7807 */ 7808 for (j = 0; j < cc->cnt; j++) 7809 if (cc->cands[j].btf == btf) { 7810 bpf_free_cands_from_cache(cc); 7811 cache[i] = NULL; 7812 break; 7813 } 7814 } 7815 7816 } 7817 7818 static void purge_cand_cache(struct btf *btf) 7819 { 7820 mutex_lock(&cand_cache_mutex); 7821 __purge_cand_cache(btf, module_cand_cache, MODULE_CAND_CACHE_SIZE); 7822 mutex_unlock(&cand_cache_mutex); 7823 } 7824 #endif 7825 7826 static struct bpf_cand_cache * 7827 bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf, 7828 int targ_start_id) 7829 { 7830 struct bpf_cand_cache *new_cands; 7831 const struct btf_type *t; 7832 const char *targ_name; 7833 size_t targ_essent_len; 7834 int n, i; 7835 7836 n = btf_nr_types(targ_btf); 7837 for (i = targ_start_id; i < n; i++) { 7838 t = btf_type_by_id(targ_btf, i); 7839 if (btf_kind(t) != cands->kind) 7840 continue; 7841 7842 targ_name = btf_name_by_offset(targ_btf, t->name_off); 7843 if (!targ_name) 7844 continue; 7845 7846 /* the resched point is before strncmp to make sure that search 7847 * for non-existing name will have a chance to schedule(). 7848 */ 7849 cond_resched(); 7850 7851 if (strncmp(cands->name, targ_name, cands->name_len) != 0) 7852 continue; 7853 7854 targ_essent_len = bpf_core_essential_name_len(targ_name); 7855 if (targ_essent_len != cands->name_len) 7856 continue; 7857 7858 /* most of the time there is only one candidate for a given kind+name pair */ 7859 new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL); 7860 if (!new_cands) { 7861 bpf_free_cands(cands); 7862 return ERR_PTR(-ENOMEM); 7863 } 7864 7865 memcpy(new_cands, cands, sizeof_cands(cands->cnt)); 7866 bpf_free_cands(cands); 7867 cands = new_cands; 7868 cands->cands[cands->cnt].btf = targ_btf; 7869 cands->cands[cands->cnt].id = i; 7870 cands->cnt++; 7871 } 7872 return cands; 7873 } 7874 7875 static struct bpf_cand_cache * 7876 bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id) 7877 { 7878 struct bpf_cand_cache *cands, *cc, local_cand = {}; 7879 const struct btf *local_btf = ctx->btf; 7880 const struct btf_type *local_type; 7881 const struct btf *main_btf; 7882 size_t local_essent_len; 7883 struct btf *mod_btf; 7884 const char *name; 7885 int id; 7886 7887 main_btf = bpf_get_btf_vmlinux(); 7888 if (IS_ERR(main_btf)) 7889 return ERR_CAST(main_btf); 7890 if (!main_btf) 7891 return ERR_PTR(-EINVAL); 7892 7893 local_type = btf_type_by_id(local_btf, local_type_id); 7894 if (!local_type) 7895 return ERR_PTR(-EINVAL); 7896 7897 name = btf_name_by_offset(local_btf, local_type->name_off); 7898 if (str_is_empty(name)) 7899 return ERR_PTR(-EINVAL); 7900 local_essent_len = bpf_core_essential_name_len(name); 7901 7902 cands = &local_cand; 7903 cands->name = name; 7904 cands->kind = btf_kind(local_type); 7905 cands->name_len = local_essent_len; 7906 7907 cc = check_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); 7908 /* cands is a pointer to stack here */ 7909 if (cc) { 7910 if (cc->cnt) 7911 return cc; 7912 goto check_modules; 7913 } 7914 7915 /* Attempt to find target candidates in vmlinux BTF first */ 7916 cands = bpf_core_add_cands(cands, main_btf, 1); 7917 if (IS_ERR(cands)) 7918 return ERR_CAST(cands); 7919 7920 /* cands is a pointer to kmalloced memory here if cands->cnt > 0 */ 7921 7922 /* populate cache even when cands->cnt == 0 */ 7923 cc = populate_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); 7924 if (IS_ERR(cc)) 7925 return ERR_CAST(cc); 7926 7927 /* if vmlinux BTF has any candidate, don't go for module BTFs */ 7928 if (cc->cnt) 7929 return cc; 7930 7931 check_modules: 7932 /* cands is a pointer to stack here and cands->cnt == 0 */ 7933 cc = check_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE); 7934 if (cc) 7935 /* if cache has it return it even if cc->cnt == 0 */ 7936 return cc; 7937 7938 /* If candidate is not found in vmlinux's BTF then search in module's BTFs */ 7939 spin_lock_bh(&btf_idr_lock); 7940 idr_for_each_entry(&btf_idr, mod_btf, id) { 7941 if (!btf_is_module(mod_btf)) 7942 continue; 7943 /* linear search could be slow hence unlock/lock 7944 * the IDR to avoiding holding it for too long 7945 */ 7946 btf_get(mod_btf); 7947 spin_unlock_bh(&btf_idr_lock); 7948 cands = bpf_core_add_cands(cands, mod_btf, btf_nr_types(main_btf)); 7949 if (IS_ERR(cands)) { 7950 btf_put(mod_btf); 7951 return ERR_CAST(cands); 7952 } 7953 spin_lock_bh(&btf_idr_lock); 7954 btf_put(mod_btf); 7955 } 7956 spin_unlock_bh(&btf_idr_lock); 7957 /* cands is a pointer to kmalloced memory here if cands->cnt > 0 7958 * or pointer to stack if cands->cnd == 0. 7959 * Copy it into the cache even when cands->cnt == 0 and 7960 * return the result. 7961 */ 7962 return populate_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE); 7963 } 7964 7965 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, 7966 int relo_idx, void *insn) 7967 { 7968 bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL; 7969 struct bpf_core_cand_list cands = {}; 7970 struct bpf_core_relo_res targ_res; 7971 struct bpf_core_spec *specs; 7972 int err; 7973 7974 /* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5" 7975 * into arrays of btf_ids of struct fields and array indices. 7976 */ 7977 specs = kcalloc(3, sizeof(*specs), GFP_KERNEL); 7978 if (!specs) 7979 return -ENOMEM; 7980 7981 if (need_cands) { 7982 struct bpf_cand_cache *cc; 7983 int i; 7984 7985 mutex_lock(&cand_cache_mutex); 7986 cc = bpf_core_find_cands(ctx, relo->type_id); 7987 if (IS_ERR(cc)) { 7988 bpf_log(ctx->log, "target candidate search failed for %d\n", 7989 relo->type_id); 7990 err = PTR_ERR(cc); 7991 goto out; 7992 } 7993 if (cc->cnt) { 7994 cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL); 7995 if (!cands.cands) { 7996 err = -ENOMEM; 7997 goto out; 7998 } 7999 } 8000 for (i = 0; i < cc->cnt; i++) { 8001 bpf_log(ctx->log, 8002 "CO-RE relocating %s %s: found target candidate [%d]\n", 8003 btf_kind_str[cc->kind], cc->name, cc->cands[i].id); 8004 cands.cands[i].btf = cc->cands[i].btf; 8005 cands.cands[i].id = cc->cands[i].id; 8006 } 8007 cands.len = cc->cnt; 8008 /* cand_cache_mutex needs to span the cache lookup and 8009 * copy of btf pointer into bpf_core_cand_list, 8010 * since module can be unloaded while bpf_core_calc_relo_insn 8011 * is working with module's btf. 8012 */ 8013 } 8014 8015 err = bpf_core_calc_relo_insn((void *)ctx->log, relo, relo_idx, ctx->btf, &cands, specs, 8016 &targ_res); 8017 if (err) 8018 goto out; 8019 8020 err = bpf_core_patch_insn((void *)ctx->log, insn, relo->insn_off / 8, relo, relo_idx, 8021 &targ_res); 8022 8023 out: 8024 kfree(specs); 8025 if (need_cands) { 8026 kfree(cands.cands); 8027 mutex_unlock(&cand_cache_mutex); 8028 if (ctx->log->level & BPF_LOG_LEVEL2) 8029 print_cand_cache(ctx->log); 8030 } 8031 return err; 8032 } 8033