1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io 5 */ 6 #include <uapi/linux/btf.h> 7 #include <linux/bpf-cgroup.h> 8 #include <linux/kernel.h> 9 #include <linux/types.h> 10 #include <linux/slab.h> 11 #include <linux/bpf.h> 12 #include <linux/btf.h> 13 #include <linux/bpf_verifier.h> 14 #include <linux/filter.h> 15 #include <net/netlink.h> 16 #include <linux/file.h> 17 #include <linux/vmalloc.h> 18 #include <linux/stringify.h> 19 #include <linux/bsearch.h> 20 #include <linux/sort.h> 21 #include <linux/perf_event.h> 22 #include <linux/ctype.h> 23 #include <linux/error-injection.h> 24 #include <linux/bpf_lsm.h> 25 #include <linux/btf_ids.h> 26 #include <linux/poison.h> 27 #include <linux/module.h> 28 #include <linux/cpumask.h> 29 #include <net/xdp.h> 30 31 #include "disasm.h" 32 33 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { 34 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 35 [_id] = & _name ## _verifier_ops, 36 #define BPF_MAP_TYPE(_id, _ops) 37 #define BPF_LINK_TYPE(_id, _name) 38 #include <linux/bpf_types.h> 39 #undef BPF_PROG_TYPE 40 #undef BPF_MAP_TYPE 41 #undef BPF_LINK_TYPE 42 }; 43 44 /* bpf_check() is a static code analyzer that walks eBPF program 45 * instruction by instruction and updates register/stack state. 46 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 47 * 48 * The first pass is depth-first-search to check that the program is a DAG. 49 * It rejects the following programs: 50 * - larger than BPF_MAXINSNS insns 51 * - if loop is present (detected via back-edge) 52 * - unreachable insns exist (shouldn't be a forest. program = one function) 53 * - out of bounds or malformed jumps 54 * The second pass is all possible path descent from the 1st insn. 55 * Since it's analyzing all paths through the program, the length of the 56 * analysis is limited to 64k insn, which may be hit even if total number of 57 * insn is less then 4K, but there are too many branches that change stack/regs. 58 * Number of 'branches to be analyzed' is limited to 1k 59 * 60 * On entry to each instruction, each register has a type, and the instruction 61 * changes the types of the registers depending on instruction semantics. 62 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 63 * copied to R1. 64 * 65 * All registers are 64-bit. 66 * R0 - return register 67 * R1-R5 argument passing registers 68 * R6-R9 callee saved registers 69 * R10 - frame pointer read-only 70 * 71 * At the start of BPF program the register R1 contains a pointer to bpf_context 72 * and has type PTR_TO_CTX. 73 * 74 * Verifier tracks arithmetic operations on pointers in case: 75 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 76 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 77 * 1st insn copies R10 (which has FRAME_PTR) type into R1 78 * and 2nd arithmetic instruction is pattern matched to recognize 79 * that it wants to construct a pointer to some element within stack. 80 * So after 2nd insn, the register R1 has type PTR_TO_STACK 81 * (and -20 constant is saved for further stack bounds checking). 82 * Meaning that this reg is a pointer to stack plus known immediate constant. 83 * 84 * Most of the time the registers have SCALAR_VALUE type, which 85 * means the register has some value, but it's not a valid pointer. 86 * (like pointer plus pointer becomes SCALAR_VALUE type) 87 * 88 * When verifier sees load or store instructions the type of base register 89 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are 90 * four pointer types recognized by check_mem_access() function. 91 * 92 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 93 * and the range of [ptr, ptr + map's value_size) is accessible. 94 * 95 * registers used to pass values to function calls are checked against 96 * function argument constraints. 97 * 98 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 99 * It means that the register type passed to this function must be 100 * PTR_TO_STACK and it will be used inside the function as 101 * 'pointer to map element key' 102 * 103 * For example the argument constraints for bpf_map_lookup_elem(): 104 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 105 * .arg1_type = ARG_CONST_MAP_PTR, 106 * .arg2_type = ARG_PTR_TO_MAP_KEY, 107 * 108 * ret_type says that this function returns 'pointer to map elem value or null' 109 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 110 * 2nd argument should be a pointer to stack, which will be used inside 111 * the helper function as a pointer to map element key. 112 * 113 * On the kernel side the helper function looks like: 114 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 115 * { 116 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 117 * void *key = (void *) (unsigned long) r2; 118 * void *value; 119 * 120 * here kernel can access 'key' and 'map' pointers safely, knowing that 121 * [key, key + map->key_size) bytes are valid and were initialized on 122 * the stack of eBPF program. 123 * } 124 * 125 * Corresponding eBPF program may look like: 126 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 127 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 128 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 129 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 130 * here verifier looks at prototype of map_lookup_elem() and sees: 131 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 132 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 133 * 134 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 135 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 136 * and were initialized prior to this call. 137 * If it's ok, then verifier allows this BPF_CALL insn and looks at 138 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 139 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 140 * returns either pointer to map value or NULL. 141 * 142 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 143 * insn, the register holding that pointer in the true branch changes state to 144 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 145 * branch. See check_cond_jmp_op(). 146 * 147 * After the call R0 is set to return type of the function and registers R1-R5 148 * are set to NOT_INIT to indicate that they are no longer readable. 149 * 150 * The following reference types represent a potential reference to a kernel 151 * resource which, after first being allocated, must be checked and freed by 152 * the BPF program: 153 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET 154 * 155 * When the verifier sees a helper call return a reference type, it allocates a 156 * pointer id for the reference and stores it in the current function state. 157 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into 158 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type 159 * passes through a NULL-check conditional. For the branch wherein the state is 160 * changed to CONST_IMM, the verifier releases the reference. 161 * 162 * For each helper function that allocates a reference, such as 163 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as 164 * bpf_sk_release(). When a reference type passes into the release function, 165 * the verifier also releases the reference. If any unchecked or unreleased 166 * reference remains at the end of the program, the verifier rejects it. 167 */ 168 169 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 170 struct bpf_verifier_stack_elem { 171 /* verifer state is 'st' 172 * before processing instruction 'insn_idx' 173 * and after processing instruction 'prev_insn_idx' 174 */ 175 struct bpf_verifier_state st; 176 int insn_idx; 177 int prev_insn_idx; 178 struct bpf_verifier_stack_elem *next; 179 /* length of verifier log at the time this state was pushed on stack */ 180 u32 log_pos; 181 }; 182 183 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192 184 #define BPF_COMPLEXITY_LIMIT_STATES 64 185 186 #define BPF_MAP_KEY_POISON (1ULL << 63) 187 #define BPF_MAP_KEY_SEEN (1ULL << 62) 188 189 #define BPF_MAP_PTR_UNPRIV 1UL 190 #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ 191 POISON_POINTER_DELTA)) 192 #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) 193 194 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx); 195 static int release_reference(struct bpf_verifier_env *env, int ref_obj_id); 196 static void invalidate_non_owning_refs(struct bpf_verifier_env *env); 197 static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env); 198 static int ref_set_non_owning(struct bpf_verifier_env *env, 199 struct bpf_reg_state *reg); 200 static void specialize_kfunc(struct bpf_verifier_env *env, 201 u32 func_id, u16 offset, unsigned long *addr); 202 static bool is_trusted_reg(const struct bpf_reg_state *reg); 203 204 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) 205 { 206 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON; 207 } 208 209 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) 210 { 211 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV; 212 } 213 214 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, 215 const struct bpf_map *map, bool unpriv) 216 { 217 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); 218 unpriv |= bpf_map_ptr_unpriv(aux); 219 aux->map_ptr_state = (unsigned long)map | 220 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); 221 } 222 223 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux) 224 { 225 return aux->map_key_state & BPF_MAP_KEY_POISON; 226 } 227 228 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux) 229 { 230 return !(aux->map_key_state & BPF_MAP_KEY_SEEN); 231 } 232 233 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux) 234 { 235 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON); 236 } 237 238 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state) 239 { 240 bool poisoned = bpf_map_key_poisoned(aux); 241 242 aux->map_key_state = state | BPF_MAP_KEY_SEEN | 243 (poisoned ? BPF_MAP_KEY_POISON : 0ULL); 244 } 245 246 static bool bpf_helper_call(const struct bpf_insn *insn) 247 { 248 return insn->code == (BPF_JMP | BPF_CALL) && 249 insn->src_reg == 0; 250 } 251 252 static bool bpf_pseudo_call(const struct bpf_insn *insn) 253 { 254 return insn->code == (BPF_JMP | BPF_CALL) && 255 insn->src_reg == BPF_PSEUDO_CALL; 256 } 257 258 static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn) 259 { 260 return insn->code == (BPF_JMP | BPF_CALL) && 261 insn->src_reg == BPF_PSEUDO_KFUNC_CALL; 262 } 263 264 struct bpf_call_arg_meta { 265 struct bpf_map *map_ptr; 266 bool raw_mode; 267 bool pkt_access; 268 u8 release_regno; 269 int regno; 270 int access_size; 271 int mem_size; 272 u64 msize_max_value; 273 int ref_obj_id; 274 int dynptr_id; 275 int map_uid; 276 int func_id; 277 struct btf *btf; 278 u32 btf_id; 279 struct btf *ret_btf; 280 u32 ret_btf_id; 281 u32 subprogno; 282 struct btf_field *kptr_field; 283 }; 284 285 struct bpf_kfunc_call_arg_meta { 286 /* In parameters */ 287 struct btf *btf; 288 u32 func_id; 289 u32 kfunc_flags; 290 const struct btf_type *func_proto; 291 const char *func_name; 292 /* Out parameters */ 293 u32 ref_obj_id; 294 u8 release_regno; 295 bool r0_rdonly; 296 u32 ret_btf_id; 297 u64 r0_size; 298 u32 subprogno; 299 struct { 300 u64 value; 301 bool found; 302 } arg_constant; 303 304 /* arg_{btf,btf_id,owning_ref} are used by kfunc-specific handling, 305 * generally to pass info about user-defined local kptr types to later 306 * verification logic 307 * bpf_obj_drop 308 * Record the local kptr type to be drop'd 309 * bpf_refcount_acquire (via KF_ARG_PTR_TO_REFCOUNTED_KPTR arg type) 310 * Record the local kptr type to be refcount_incr'd and use 311 * arg_owning_ref to determine whether refcount_acquire should be 312 * fallible 313 */ 314 struct btf *arg_btf; 315 u32 arg_btf_id; 316 bool arg_owning_ref; 317 318 struct { 319 struct btf_field *field; 320 } arg_list_head; 321 struct { 322 struct btf_field *field; 323 } arg_rbtree_root; 324 struct { 325 enum bpf_dynptr_type type; 326 u32 id; 327 u32 ref_obj_id; 328 } initialized_dynptr; 329 struct { 330 u8 spi; 331 u8 frameno; 332 } iter; 333 u64 mem_size; 334 }; 335 336 struct btf *btf_vmlinux; 337 338 static DEFINE_MUTEX(bpf_verifier_lock); 339 340 static const struct bpf_line_info * 341 find_linfo(const struct bpf_verifier_env *env, u32 insn_off) 342 { 343 const struct bpf_line_info *linfo; 344 const struct bpf_prog *prog; 345 u32 i, nr_linfo; 346 347 prog = env->prog; 348 nr_linfo = prog->aux->nr_linfo; 349 350 if (!nr_linfo || insn_off >= prog->len) 351 return NULL; 352 353 linfo = prog->aux->linfo; 354 for (i = 1; i < nr_linfo; i++) 355 if (insn_off < linfo[i].insn_off) 356 break; 357 358 return &linfo[i - 1]; 359 } 360 361 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) 362 { 363 struct bpf_verifier_env *env = private_data; 364 va_list args; 365 366 if (!bpf_verifier_log_needed(&env->log)) 367 return; 368 369 va_start(args, fmt); 370 bpf_verifier_vlog(&env->log, fmt, args); 371 va_end(args); 372 } 373 374 static const char *ltrim(const char *s) 375 { 376 while (isspace(*s)) 377 s++; 378 379 return s; 380 } 381 382 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env, 383 u32 insn_off, 384 const char *prefix_fmt, ...) 385 { 386 const struct bpf_line_info *linfo; 387 388 if (!bpf_verifier_log_needed(&env->log)) 389 return; 390 391 linfo = find_linfo(env, insn_off); 392 if (!linfo || linfo == env->prev_linfo) 393 return; 394 395 if (prefix_fmt) { 396 va_list args; 397 398 va_start(args, prefix_fmt); 399 bpf_verifier_vlog(&env->log, prefix_fmt, args); 400 va_end(args); 401 } 402 403 verbose(env, "%s\n", 404 ltrim(btf_name_by_offset(env->prog->aux->btf, 405 linfo->line_off))); 406 407 env->prev_linfo = linfo; 408 } 409 410 static void verbose_invalid_scalar(struct bpf_verifier_env *env, 411 struct bpf_reg_state *reg, 412 struct tnum *range, const char *ctx, 413 const char *reg_name) 414 { 415 char tn_buf[48]; 416 417 verbose(env, "At %s the register %s ", ctx, reg_name); 418 if (!tnum_is_unknown(reg->var_off)) { 419 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 420 verbose(env, "has value %s", tn_buf); 421 } else { 422 verbose(env, "has unknown scalar value"); 423 } 424 tnum_strn(tn_buf, sizeof(tn_buf), *range); 425 verbose(env, " should have been in %s\n", tn_buf); 426 } 427 428 static bool type_is_pkt_pointer(enum bpf_reg_type type) 429 { 430 type = base_type(type); 431 return type == PTR_TO_PACKET || 432 type == PTR_TO_PACKET_META; 433 } 434 435 static bool type_is_sk_pointer(enum bpf_reg_type type) 436 { 437 return type == PTR_TO_SOCKET || 438 type == PTR_TO_SOCK_COMMON || 439 type == PTR_TO_TCP_SOCK || 440 type == PTR_TO_XDP_SOCK; 441 } 442 443 static bool type_may_be_null(u32 type) 444 { 445 return type & PTR_MAYBE_NULL; 446 } 447 448 static bool reg_not_null(const struct bpf_reg_state *reg) 449 { 450 enum bpf_reg_type type; 451 452 type = reg->type; 453 if (type_may_be_null(type)) 454 return false; 455 456 type = base_type(type); 457 return type == PTR_TO_SOCKET || 458 type == PTR_TO_TCP_SOCK || 459 type == PTR_TO_MAP_VALUE || 460 type == PTR_TO_MAP_KEY || 461 type == PTR_TO_SOCK_COMMON || 462 (type == PTR_TO_BTF_ID && is_trusted_reg(reg)) || 463 type == PTR_TO_MEM; 464 } 465 466 static bool type_is_ptr_alloc_obj(u32 type) 467 { 468 return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC; 469 } 470 471 static bool type_is_non_owning_ref(u32 type) 472 { 473 return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF; 474 } 475 476 static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg) 477 { 478 struct btf_record *rec = NULL; 479 struct btf_struct_meta *meta; 480 481 if (reg->type == PTR_TO_MAP_VALUE) { 482 rec = reg->map_ptr->record; 483 } else if (type_is_ptr_alloc_obj(reg->type)) { 484 meta = btf_find_struct_meta(reg->btf, reg->btf_id); 485 if (meta) 486 rec = meta->record; 487 } 488 return rec; 489 } 490 491 static bool subprog_is_global(const struct bpf_verifier_env *env, int subprog) 492 { 493 struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux; 494 495 return aux && aux[subprog].linkage == BTF_FUNC_GLOBAL; 496 } 497 498 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) 499 { 500 return btf_record_has_field(reg_btf_record(reg), BPF_SPIN_LOCK); 501 } 502 503 static bool type_is_rdonly_mem(u32 type) 504 { 505 return type & MEM_RDONLY; 506 } 507 508 static bool is_acquire_function(enum bpf_func_id func_id, 509 const struct bpf_map *map) 510 { 511 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC; 512 513 if (func_id == BPF_FUNC_sk_lookup_tcp || 514 func_id == BPF_FUNC_sk_lookup_udp || 515 func_id == BPF_FUNC_skc_lookup_tcp || 516 func_id == BPF_FUNC_ringbuf_reserve || 517 func_id == BPF_FUNC_kptr_xchg) 518 return true; 519 520 if (func_id == BPF_FUNC_map_lookup_elem && 521 (map_type == BPF_MAP_TYPE_SOCKMAP || 522 map_type == BPF_MAP_TYPE_SOCKHASH)) 523 return true; 524 525 return false; 526 } 527 528 static bool is_ptr_cast_function(enum bpf_func_id func_id) 529 { 530 return func_id == BPF_FUNC_tcp_sock || 531 func_id == BPF_FUNC_sk_fullsock || 532 func_id == BPF_FUNC_skc_to_tcp_sock || 533 func_id == BPF_FUNC_skc_to_tcp6_sock || 534 func_id == BPF_FUNC_skc_to_udp6_sock || 535 func_id == BPF_FUNC_skc_to_mptcp_sock || 536 func_id == BPF_FUNC_skc_to_tcp_timewait_sock || 537 func_id == BPF_FUNC_skc_to_tcp_request_sock; 538 } 539 540 static bool is_dynptr_ref_function(enum bpf_func_id func_id) 541 { 542 return func_id == BPF_FUNC_dynptr_data; 543 } 544 545 static bool is_callback_calling_kfunc(u32 btf_id); 546 547 static bool is_callback_calling_function(enum bpf_func_id func_id) 548 { 549 return func_id == BPF_FUNC_for_each_map_elem || 550 func_id == BPF_FUNC_timer_set_callback || 551 func_id == BPF_FUNC_find_vma || 552 func_id == BPF_FUNC_loop || 553 func_id == BPF_FUNC_user_ringbuf_drain; 554 } 555 556 static bool is_async_callback_calling_function(enum bpf_func_id func_id) 557 { 558 return func_id == BPF_FUNC_timer_set_callback; 559 } 560 561 static bool is_storage_get_function(enum bpf_func_id func_id) 562 { 563 return func_id == BPF_FUNC_sk_storage_get || 564 func_id == BPF_FUNC_inode_storage_get || 565 func_id == BPF_FUNC_task_storage_get || 566 func_id == BPF_FUNC_cgrp_storage_get; 567 } 568 569 static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id, 570 const struct bpf_map *map) 571 { 572 int ref_obj_uses = 0; 573 574 if (is_ptr_cast_function(func_id)) 575 ref_obj_uses++; 576 if (is_acquire_function(func_id, map)) 577 ref_obj_uses++; 578 if (is_dynptr_ref_function(func_id)) 579 ref_obj_uses++; 580 581 return ref_obj_uses > 1; 582 } 583 584 static bool is_cmpxchg_insn(const struct bpf_insn *insn) 585 { 586 return BPF_CLASS(insn->code) == BPF_STX && 587 BPF_MODE(insn->code) == BPF_ATOMIC && 588 insn->imm == BPF_CMPXCHG; 589 } 590 591 /* string representation of 'enum bpf_reg_type' 592 * 593 * Note that reg_type_str() can not appear more than once in a single verbose() 594 * statement. 595 */ 596 static const char *reg_type_str(struct bpf_verifier_env *env, 597 enum bpf_reg_type type) 598 { 599 char postfix[16] = {0}, prefix[64] = {0}; 600 static const char * const str[] = { 601 [NOT_INIT] = "?", 602 [SCALAR_VALUE] = "scalar", 603 [PTR_TO_CTX] = "ctx", 604 [CONST_PTR_TO_MAP] = "map_ptr", 605 [PTR_TO_MAP_VALUE] = "map_value", 606 [PTR_TO_STACK] = "fp", 607 [PTR_TO_PACKET] = "pkt", 608 [PTR_TO_PACKET_META] = "pkt_meta", 609 [PTR_TO_PACKET_END] = "pkt_end", 610 [PTR_TO_FLOW_KEYS] = "flow_keys", 611 [PTR_TO_SOCKET] = "sock", 612 [PTR_TO_SOCK_COMMON] = "sock_common", 613 [PTR_TO_TCP_SOCK] = "tcp_sock", 614 [PTR_TO_TP_BUFFER] = "tp_buffer", 615 [PTR_TO_XDP_SOCK] = "xdp_sock", 616 [PTR_TO_BTF_ID] = "ptr_", 617 [PTR_TO_MEM] = "mem", 618 [PTR_TO_BUF] = "buf", 619 [PTR_TO_FUNC] = "func", 620 [PTR_TO_MAP_KEY] = "map_key", 621 [CONST_PTR_TO_DYNPTR] = "dynptr_ptr", 622 }; 623 624 if (type & PTR_MAYBE_NULL) { 625 if (base_type(type) == PTR_TO_BTF_ID) 626 strncpy(postfix, "or_null_", 16); 627 else 628 strncpy(postfix, "_or_null", 16); 629 } 630 631 snprintf(prefix, sizeof(prefix), "%s%s%s%s%s%s%s", 632 type & MEM_RDONLY ? "rdonly_" : "", 633 type & MEM_RINGBUF ? "ringbuf_" : "", 634 type & MEM_USER ? "user_" : "", 635 type & MEM_PERCPU ? "percpu_" : "", 636 type & MEM_RCU ? "rcu_" : "", 637 type & PTR_UNTRUSTED ? "untrusted_" : "", 638 type & PTR_TRUSTED ? "trusted_" : "" 639 ); 640 641 snprintf(env->tmp_str_buf, TMP_STR_BUF_LEN, "%s%s%s", 642 prefix, str[base_type(type)], postfix); 643 return env->tmp_str_buf; 644 } 645 646 static char slot_type_char[] = { 647 [STACK_INVALID] = '?', 648 [STACK_SPILL] = 'r', 649 [STACK_MISC] = 'm', 650 [STACK_ZERO] = '0', 651 [STACK_DYNPTR] = 'd', 652 [STACK_ITER] = 'i', 653 }; 654 655 static void print_liveness(struct bpf_verifier_env *env, 656 enum bpf_reg_liveness live) 657 { 658 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE)) 659 verbose(env, "_"); 660 if (live & REG_LIVE_READ) 661 verbose(env, "r"); 662 if (live & REG_LIVE_WRITTEN) 663 verbose(env, "w"); 664 if (live & REG_LIVE_DONE) 665 verbose(env, "D"); 666 } 667 668 static int __get_spi(s32 off) 669 { 670 return (-off - 1) / BPF_REG_SIZE; 671 } 672 673 static struct bpf_func_state *func(struct bpf_verifier_env *env, 674 const struct bpf_reg_state *reg) 675 { 676 struct bpf_verifier_state *cur = env->cur_state; 677 678 return cur->frame[reg->frameno]; 679 } 680 681 static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots) 682 { 683 int allocated_slots = state->allocated_stack / BPF_REG_SIZE; 684 685 /* We need to check that slots between [spi - nr_slots + 1, spi] are 686 * within [0, allocated_stack). 687 * 688 * Please note that the spi grows downwards. For example, a dynptr 689 * takes the size of two stack slots; the first slot will be at 690 * spi and the second slot will be at spi - 1. 691 */ 692 return spi - nr_slots + 1 >= 0 && spi < allocated_slots; 693 } 694 695 static int stack_slot_obj_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 696 const char *obj_kind, int nr_slots) 697 { 698 int off, spi; 699 700 if (!tnum_is_const(reg->var_off)) { 701 verbose(env, "%s has to be at a constant offset\n", obj_kind); 702 return -EINVAL; 703 } 704 705 off = reg->off + reg->var_off.value; 706 if (off % BPF_REG_SIZE) { 707 verbose(env, "cannot pass in %s at an offset=%d\n", obj_kind, off); 708 return -EINVAL; 709 } 710 711 spi = __get_spi(off); 712 if (spi + 1 < nr_slots) { 713 verbose(env, "cannot pass in %s at an offset=%d\n", obj_kind, off); 714 return -EINVAL; 715 } 716 717 if (!is_spi_bounds_valid(func(env, reg), spi, nr_slots)) 718 return -ERANGE; 719 return spi; 720 } 721 722 static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 723 { 724 return stack_slot_obj_get_spi(env, reg, "dynptr", BPF_DYNPTR_NR_SLOTS); 725 } 726 727 static int iter_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int nr_slots) 728 { 729 return stack_slot_obj_get_spi(env, reg, "iter", nr_slots); 730 } 731 732 static const char *btf_type_name(const struct btf *btf, u32 id) 733 { 734 return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); 735 } 736 737 static const char *dynptr_type_str(enum bpf_dynptr_type type) 738 { 739 switch (type) { 740 case BPF_DYNPTR_TYPE_LOCAL: 741 return "local"; 742 case BPF_DYNPTR_TYPE_RINGBUF: 743 return "ringbuf"; 744 case BPF_DYNPTR_TYPE_SKB: 745 return "skb"; 746 case BPF_DYNPTR_TYPE_XDP: 747 return "xdp"; 748 case BPF_DYNPTR_TYPE_INVALID: 749 return "<invalid>"; 750 default: 751 WARN_ONCE(1, "unknown dynptr type %d\n", type); 752 return "<unknown>"; 753 } 754 } 755 756 static const char *iter_type_str(const struct btf *btf, u32 btf_id) 757 { 758 if (!btf || btf_id == 0) 759 return "<invalid>"; 760 761 /* we already validated that type is valid and has conforming name */ 762 return btf_type_name(btf, btf_id) + sizeof(ITER_PREFIX) - 1; 763 } 764 765 static const char *iter_state_str(enum bpf_iter_state state) 766 { 767 switch (state) { 768 case BPF_ITER_STATE_ACTIVE: 769 return "active"; 770 case BPF_ITER_STATE_DRAINED: 771 return "drained"; 772 case BPF_ITER_STATE_INVALID: 773 return "<invalid>"; 774 default: 775 WARN_ONCE(1, "unknown iter state %d\n", state); 776 return "<unknown>"; 777 } 778 } 779 780 static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno) 781 { 782 env->scratched_regs |= 1U << regno; 783 } 784 785 static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi) 786 { 787 env->scratched_stack_slots |= 1ULL << spi; 788 } 789 790 static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno) 791 { 792 return (env->scratched_regs >> regno) & 1; 793 } 794 795 static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno) 796 { 797 return (env->scratched_stack_slots >> regno) & 1; 798 } 799 800 static bool verifier_state_scratched(const struct bpf_verifier_env *env) 801 { 802 return env->scratched_regs || env->scratched_stack_slots; 803 } 804 805 static void mark_verifier_state_clean(struct bpf_verifier_env *env) 806 { 807 env->scratched_regs = 0U; 808 env->scratched_stack_slots = 0ULL; 809 } 810 811 /* Used for printing the entire verifier state. */ 812 static void mark_verifier_state_scratched(struct bpf_verifier_env *env) 813 { 814 env->scratched_regs = ~0U; 815 env->scratched_stack_slots = ~0ULL; 816 } 817 818 static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type) 819 { 820 switch (arg_type & DYNPTR_TYPE_FLAG_MASK) { 821 case DYNPTR_TYPE_LOCAL: 822 return BPF_DYNPTR_TYPE_LOCAL; 823 case DYNPTR_TYPE_RINGBUF: 824 return BPF_DYNPTR_TYPE_RINGBUF; 825 case DYNPTR_TYPE_SKB: 826 return BPF_DYNPTR_TYPE_SKB; 827 case DYNPTR_TYPE_XDP: 828 return BPF_DYNPTR_TYPE_XDP; 829 default: 830 return BPF_DYNPTR_TYPE_INVALID; 831 } 832 } 833 834 static enum bpf_type_flag get_dynptr_type_flag(enum bpf_dynptr_type type) 835 { 836 switch (type) { 837 case BPF_DYNPTR_TYPE_LOCAL: 838 return DYNPTR_TYPE_LOCAL; 839 case BPF_DYNPTR_TYPE_RINGBUF: 840 return DYNPTR_TYPE_RINGBUF; 841 case BPF_DYNPTR_TYPE_SKB: 842 return DYNPTR_TYPE_SKB; 843 case BPF_DYNPTR_TYPE_XDP: 844 return DYNPTR_TYPE_XDP; 845 default: 846 return 0; 847 } 848 } 849 850 static bool dynptr_type_refcounted(enum bpf_dynptr_type type) 851 { 852 return type == BPF_DYNPTR_TYPE_RINGBUF; 853 } 854 855 static void __mark_dynptr_reg(struct bpf_reg_state *reg, 856 enum bpf_dynptr_type type, 857 bool first_slot, int dynptr_id); 858 859 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 860 struct bpf_reg_state *reg); 861 862 static void mark_dynptr_stack_regs(struct bpf_verifier_env *env, 863 struct bpf_reg_state *sreg1, 864 struct bpf_reg_state *sreg2, 865 enum bpf_dynptr_type type) 866 { 867 int id = ++env->id_gen; 868 869 __mark_dynptr_reg(sreg1, type, true, id); 870 __mark_dynptr_reg(sreg2, type, false, id); 871 } 872 873 static void mark_dynptr_cb_reg(struct bpf_verifier_env *env, 874 struct bpf_reg_state *reg, 875 enum bpf_dynptr_type type) 876 { 877 __mark_dynptr_reg(reg, type, true, ++env->id_gen); 878 } 879 880 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env, 881 struct bpf_func_state *state, int spi); 882 883 static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 884 enum bpf_arg_type arg_type, int insn_idx, int clone_ref_obj_id) 885 { 886 struct bpf_func_state *state = func(env, reg); 887 enum bpf_dynptr_type type; 888 int spi, i, err; 889 890 spi = dynptr_get_spi(env, reg); 891 if (spi < 0) 892 return spi; 893 894 /* We cannot assume both spi and spi - 1 belong to the same dynptr, 895 * hence we need to call destroy_if_dynptr_stack_slot twice for both, 896 * to ensure that for the following example: 897 * [d1][d1][d2][d2] 898 * spi 3 2 1 0 899 * So marking spi = 2 should lead to destruction of both d1 and d2. In 900 * case they do belong to same dynptr, second call won't see slot_type 901 * as STACK_DYNPTR and will simply skip destruction. 902 */ 903 err = destroy_if_dynptr_stack_slot(env, state, spi); 904 if (err) 905 return err; 906 err = destroy_if_dynptr_stack_slot(env, state, spi - 1); 907 if (err) 908 return err; 909 910 for (i = 0; i < BPF_REG_SIZE; i++) { 911 state->stack[spi].slot_type[i] = STACK_DYNPTR; 912 state->stack[spi - 1].slot_type[i] = STACK_DYNPTR; 913 } 914 915 type = arg_to_dynptr_type(arg_type); 916 if (type == BPF_DYNPTR_TYPE_INVALID) 917 return -EINVAL; 918 919 mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr, 920 &state->stack[spi - 1].spilled_ptr, type); 921 922 if (dynptr_type_refcounted(type)) { 923 /* The id is used to track proper releasing */ 924 int id; 925 926 if (clone_ref_obj_id) 927 id = clone_ref_obj_id; 928 else 929 id = acquire_reference_state(env, insn_idx); 930 931 if (id < 0) 932 return id; 933 934 state->stack[spi].spilled_ptr.ref_obj_id = id; 935 state->stack[spi - 1].spilled_ptr.ref_obj_id = id; 936 } 937 938 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 939 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; 940 941 return 0; 942 } 943 944 static void invalidate_dynptr(struct bpf_verifier_env *env, struct bpf_func_state *state, int spi) 945 { 946 int i; 947 948 for (i = 0; i < BPF_REG_SIZE; i++) { 949 state->stack[spi].slot_type[i] = STACK_INVALID; 950 state->stack[spi - 1].slot_type[i] = STACK_INVALID; 951 } 952 953 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); 954 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); 955 956 /* Why do we need to set REG_LIVE_WRITTEN for STACK_INVALID slot? 957 * 958 * While we don't allow reading STACK_INVALID, it is still possible to 959 * do <8 byte writes marking some but not all slots as STACK_MISC. Then, 960 * helpers or insns can do partial read of that part without failing, 961 * but check_stack_range_initialized, check_stack_read_var_off, and 962 * check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of 963 * the slot conservatively. Hence we need to prevent those liveness 964 * marking walks. 965 * 966 * This was not a problem before because STACK_INVALID is only set by 967 * default (where the default reg state has its reg->parent as NULL), or 968 * in clean_live_states after REG_LIVE_DONE (at which point 969 * mark_reg_read won't walk reg->parent chain), but not randomly during 970 * verifier state exploration (like we did above). Hence, for our case 971 * parentage chain will still be live (i.e. reg->parent may be 972 * non-NULL), while earlier reg->parent was NULL, so we need 973 * REG_LIVE_WRITTEN to screen off read marker propagation when it is 974 * done later on reads or by mark_dynptr_read as well to unnecessary 975 * mark registers in verifier state. 976 */ 977 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 978 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; 979 } 980 981 static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 982 { 983 struct bpf_func_state *state = func(env, reg); 984 int spi, ref_obj_id, i; 985 986 spi = dynptr_get_spi(env, reg); 987 if (spi < 0) 988 return spi; 989 990 if (!dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { 991 invalidate_dynptr(env, state, spi); 992 return 0; 993 } 994 995 ref_obj_id = state->stack[spi].spilled_ptr.ref_obj_id; 996 997 /* If the dynptr has a ref_obj_id, then we need to invalidate 998 * two things: 999 * 1000 * 1) Any dynptrs with a matching ref_obj_id (clones) 1001 * 2) Any slices derived from this dynptr. 1002 */ 1003 1004 /* Invalidate any slices associated with this dynptr */ 1005 WARN_ON_ONCE(release_reference(env, ref_obj_id)); 1006 1007 /* Invalidate any dynptr clones */ 1008 for (i = 1; i < state->allocated_stack / BPF_REG_SIZE; i++) { 1009 if (state->stack[i].spilled_ptr.ref_obj_id != ref_obj_id) 1010 continue; 1011 1012 /* it should always be the case that if the ref obj id 1013 * matches then the stack slot also belongs to a 1014 * dynptr 1015 */ 1016 if (state->stack[i].slot_type[0] != STACK_DYNPTR) { 1017 verbose(env, "verifier internal error: misconfigured ref_obj_id\n"); 1018 return -EFAULT; 1019 } 1020 if (state->stack[i].spilled_ptr.dynptr.first_slot) 1021 invalidate_dynptr(env, state, i); 1022 } 1023 1024 return 0; 1025 } 1026 1027 static void __mark_reg_unknown(const struct bpf_verifier_env *env, 1028 struct bpf_reg_state *reg); 1029 1030 static void mark_reg_invalid(const struct bpf_verifier_env *env, struct bpf_reg_state *reg) 1031 { 1032 if (!env->allow_ptr_leaks) 1033 __mark_reg_not_init(env, reg); 1034 else 1035 __mark_reg_unknown(env, reg); 1036 } 1037 1038 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env, 1039 struct bpf_func_state *state, int spi) 1040 { 1041 struct bpf_func_state *fstate; 1042 struct bpf_reg_state *dreg; 1043 int i, dynptr_id; 1044 1045 /* We always ensure that STACK_DYNPTR is never set partially, 1046 * hence just checking for slot_type[0] is enough. This is 1047 * different for STACK_SPILL, where it may be only set for 1048 * 1 byte, so code has to use is_spilled_reg. 1049 */ 1050 if (state->stack[spi].slot_type[0] != STACK_DYNPTR) 1051 return 0; 1052 1053 /* Reposition spi to first slot */ 1054 if (!state->stack[spi].spilled_ptr.dynptr.first_slot) 1055 spi = spi + 1; 1056 1057 if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { 1058 verbose(env, "cannot overwrite referenced dynptr\n"); 1059 return -EINVAL; 1060 } 1061 1062 mark_stack_slot_scratched(env, spi); 1063 mark_stack_slot_scratched(env, spi - 1); 1064 1065 /* Writing partially to one dynptr stack slot destroys both. */ 1066 for (i = 0; i < BPF_REG_SIZE; i++) { 1067 state->stack[spi].slot_type[i] = STACK_INVALID; 1068 state->stack[spi - 1].slot_type[i] = STACK_INVALID; 1069 } 1070 1071 dynptr_id = state->stack[spi].spilled_ptr.id; 1072 /* Invalidate any slices associated with this dynptr */ 1073 bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({ 1074 /* Dynptr slices are only PTR_TO_MEM_OR_NULL and PTR_TO_MEM */ 1075 if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM) 1076 continue; 1077 if (dreg->dynptr_id == dynptr_id) 1078 mark_reg_invalid(env, dreg); 1079 })); 1080 1081 /* Do not release reference state, we are destroying dynptr on stack, 1082 * not using some helper to release it. Just reset register. 1083 */ 1084 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); 1085 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); 1086 1087 /* Same reason as unmark_stack_slots_dynptr above */ 1088 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 1089 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; 1090 1091 return 0; 1092 } 1093 1094 static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 1095 { 1096 int spi; 1097 1098 if (reg->type == CONST_PTR_TO_DYNPTR) 1099 return false; 1100 1101 spi = dynptr_get_spi(env, reg); 1102 1103 /* -ERANGE (i.e. spi not falling into allocated stack slots) isn't an 1104 * error because this just means the stack state hasn't been updated yet. 1105 * We will do check_mem_access to check and update stack bounds later. 1106 */ 1107 if (spi < 0 && spi != -ERANGE) 1108 return false; 1109 1110 /* We don't need to check if the stack slots are marked by previous 1111 * dynptr initializations because we allow overwriting existing unreferenced 1112 * STACK_DYNPTR slots, see mark_stack_slots_dynptr which calls 1113 * destroy_if_dynptr_stack_slot to ensure dynptr objects at the slots we are 1114 * touching are completely destructed before we reinitialize them for a new 1115 * one. For referenced ones, destroy_if_dynptr_stack_slot returns an error early 1116 * instead of delaying it until the end where the user will get "Unreleased 1117 * reference" error. 1118 */ 1119 return true; 1120 } 1121 1122 static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 1123 { 1124 struct bpf_func_state *state = func(env, reg); 1125 int i, spi; 1126 1127 /* This already represents first slot of initialized bpf_dynptr. 1128 * 1129 * CONST_PTR_TO_DYNPTR already has fixed and var_off as 0 due to 1130 * check_func_arg_reg_off's logic, so we don't need to check its 1131 * offset and alignment. 1132 */ 1133 if (reg->type == CONST_PTR_TO_DYNPTR) 1134 return true; 1135 1136 spi = dynptr_get_spi(env, reg); 1137 if (spi < 0) 1138 return false; 1139 if (!state->stack[spi].spilled_ptr.dynptr.first_slot) 1140 return false; 1141 1142 for (i = 0; i < BPF_REG_SIZE; i++) { 1143 if (state->stack[spi].slot_type[i] != STACK_DYNPTR || 1144 state->stack[spi - 1].slot_type[i] != STACK_DYNPTR) 1145 return false; 1146 } 1147 1148 return true; 1149 } 1150 1151 static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 1152 enum bpf_arg_type arg_type) 1153 { 1154 struct bpf_func_state *state = func(env, reg); 1155 enum bpf_dynptr_type dynptr_type; 1156 int spi; 1157 1158 /* ARG_PTR_TO_DYNPTR takes any type of dynptr */ 1159 if (arg_type == ARG_PTR_TO_DYNPTR) 1160 return true; 1161 1162 dynptr_type = arg_to_dynptr_type(arg_type); 1163 if (reg->type == CONST_PTR_TO_DYNPTR) { 1164 return reg->dynptr.type == dynptr_type; 1165 } else { 1166 spi = dynptr_get_spi(env, reg); 1167 if (spi < 0) 1168 return false; 1169 return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type; 1170 } 1171 } 1172 1173 static void __mark_reg_known_zero(struct bpf_reg_state *reg); 1174 1175 static int mark_stack_slots_iter(struct bpf_verifier_env *env, 1176 struct bpf_reg_state *reg, int insn_idx, 1177 struct btf *btf, u32 btf_id, int nr_slots) 1178 { 1179 struct bpf_func_state *state = func(env, reg); 1180 int spi, i, j, id; 1181 1182 spi = iter_get_spi(env, reg, nr_slots); 1183 if (spi < 0) 1184 return spi; 1185 1186 id = acquire_reference_state(env, insn_idx); 1187 if (id < 0) 1188 return id; 1189 1190 for (i = 0; i < nr_slots; i++) { 1191 struct bpf_stack_state *slot = &state->stack[spi - i]; 1192 struct bpf_reg_state *st = &slot->spilled_ptr; 1193 1194 __mark_reg_known_zero(st); 1195 st->type = PTR_TO_STACK; /* we don't have dedicated reg type */ 1196 st->live |= REG_LIVE_WRITTEN; 1197 st->ref_obj_id = i == 0 ? id : 0; 1198 st->iter.btf = btf; 1199 st->iter.btf_id = btf_id; 1200 st->iter.state = BPF_ITER_STATE_ACTIVE; 1201 st->iter.depth = 0; 1202 1203 for (j = 0; j < BPF_REG_SIZE; j++) 1204 slot->slot_type[j] = STACK_ITER; 1205 1206 mark_stack_slot_scratched(env, spi - i); 1207 } 1208 1209 return 0; 1210 } 1211 1212 static int unmark_stack_slots_iter(struct bpf_verifier_env *env, 1213 struct bpf_reg_state *reg, int nr_slots) 1214 { 1215 struct bpf_func_state *state = func(env, reg); 1216 int spi, i, j; 1217 1218 spi = iter_get_spi(env, reg, nr_slots); 1219 if (spi < 0) 1220 return spi; 1221 1222 for (i = 0; i < nr_slots; i++) { 1223 struct bpf_stack_state *slot = &state->stack[spi - i]; 1224 struct bpf_reg_state *st = &slot->spilled_ptr; 1225 1226 if (i == 0) 1227 WARN_ON_ONCE(release_reference(env, st->ref_obj_id)); 1228 1229 __mark_reg_not_init(env, st); 1230 1231 /* see unmark_stack_slots_dynptr() for why we need to set REG_LIVE_WRITTEN */ 1232 st->live |= REG_LIVE_WRITTEN; 1233 1234 for (j = 0; j < BPF_REG_SIZE; j++) 1235 slot->slot_type[j] = STACK_INVALID; 1236 1237 mark_stack_slot_scratched(env, spi - i); 1238 } 1239 1240 return 0; 1241 } 1242 1243 static bool is_iter_reg_valid_uninit(struct bpf_verifier_env *env, 1244 struct bpf_reg_state *reg, int nr_slots) 1245 { 1246 struct bpf_func_state *state = func(env, reg); 1247 int spi, i, j; 1248 1249 /* For -ERANGE (i.e. spi not falling into allocated stack slots), we 1250 * will do check_mem_access to check and update stack bounds later, so 1251 * return true for that case. 1252 */ 1253 spi = iter_get_spi(env, reg, nr_slots); 1254 if (spi == -ERANGE) 1255 return true; 1256 if (spi < 0) 1257 return false; 1258 1259 for (i = 0; i < nr_slots; i++) { 1260 struct bpf_stack_state *slot = &state->stack[spi - i]; 1261 1262 for (j = 0; j < BPF_REG_SIZE; j++) 1263 if (slot->slot_type[j] == STACK_ITER) 1264 return false; 1265 } 1266 1267 return true; 1268 } 1269 1270 static bool is_iter_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 1271 struct btf *btf, u32 btf_id, int nr_slots) 1272 { 1273 struct bpf_func_state *state = func(env, reg); 1274 int spi, i, j; 1275 1276 spi = iter_get_spi(env, reg, nr_slots); 1277 if (spi < 0) 1278 return false; 1279 1280 for (i = 0; i < nr_slots; i++) { 1281 struct bpf_stack_state *slot = &state->stack[spi - i]; 1282 struct bpf_reg_state *st = &slot->spilled_ptr; 1283 1284 /* only main (first) slot has ref_obj_id set */ 1285 if (i == 0 && !st->ref_obj_id) 1286 return false; 1287 if (i != 0 && st->ref_obj_id) 1288 return false; 1289 if (st->iter.btf != btf || st->iter.btf_id != btf_id) 1290 return false; 1291 1292 for (j = 0; j < BPF_REG_SIZE; j++) 1293 if (slot->slot_type[j] != STACK_ITER) 1294 return false; 1295 } 1296 1297 return true; 1298 } 1299 1300 /* Check if given stack slot is "special": 1301 * - spilled register state (STACK_SPILL); 1302 * - dynptr state (STACK_DYNPTR); 1303 * - iter state (STACK_ITER). 1304 */ 1305 static bool is_stack_slot_special(const struct bpf_stack_state *stack) 1306 { 1307 enum bpf_stack_slot_type type = stack->slot_type[BPF_REG_SIZE - 1]; 1308 1309 switch (type) { 1310 case STACK_SPILL: 1311 case STACK_DYNPTR: 1312 case STACK_ITER: 1313 return true; 1314 case STACK_INVALID: 1315 case STACK_MISC: 1316 case STACK_ZERO: 1317 return false; 1318 default: 1319 WARN_ONCE(1, "unknown stack slot type %d\n", type); 1320 return true; 1321 } 1322 } 1323 1324 /* The reg state of a pointer or a bounded scalar was saved when 1325 * it was spilled to the stack. 1326 */ 1327 static bool is_spilled_reg(const struct bpf_stack_state *stack) 1328 { 1329 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; 1330 } 1331 1332 static bool is_spilled_scalar_reg(const struct bpf_stack_state *stack) 1333 { 1334 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL && 1335 stack->spilled_ptr.type == SCALAR_VALUE; 1336 } 1337 1338 static void scrub_spilled_slot(u8 *stype) 1339 { 1340 if (*stype != STACK_INVALID) 1341 *stype = STACK_MISC; 1342 } 1343 1344 static void print_verifier_state(struct bpf_verifier_env *env, 1345 const struct bpf_func_state *state, 1346 bool print_all) 1347 { 1348 const struct bpf_reg_state *reg; 1349 enum bpf_reg_type t; 1350 int i; 1351 1352 if (state->frameno) 1353 verbose(env, " frame%d:", state->frameno); 1354 for (i = 0; i < MAX_BPF_REG; i++) { 1355 reg = &state->regs[i]; 1356 t = reg->type; 1357 if (t == NOT_INIT) 1358 continue; 1359 if (!print_all && !reg_scratched(env, i)) 1360 continue; 1361 verbose(env, " R%d", i); 1362 print_liveness(env, reg->live); 1363 verbose(env, "="); 1364 if (t == SCALAR_VALUE && reg->precise) 1365 verbose(env, "P"); 1366 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && 1367 tnum_is_const(reg->var_off)) { 1368 /* reg->off should be 0 for SCALAR_VALUE */ 1369 verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t)); 1370 verbose(env, "%lld", reg->var_off.value + reg->off); 1371 } else { 1372 const char *sep = ""; 1373 1374 verbose(env, "%s", reg_type_str(env, t)); 1375 if (base_type(t) == PTR_TO_BTF_ID) 1376 verbose(env, "%s", btf_type_name(reg->btf, reg->btf_id)); 1377 verbose(env, "("); 1378 /* 1379 * _a stands for append, was shortened to avoid multiline statements below. 1380 * This macro is used to output a comma separated list of attributes. 1381 */ 1382 #define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; }) 1383 1384 if (reg->id) 1385 verbose_a("id=%d", reg->id); 1386 if (reg->ref_obj_id) 1387 verbose_a("ref_obj_id=%d", reg->ref_obj_id); 1388 if (type_is_non_owning_ref(reg->type)) 1389 verbose_a("%s", "non_own_ref"); 1390 if (t != SCALAR_VALUE) 1391 verbose_a("off=%d", reg->off); 1392 if (type_is_pkt_pointer(t)) 1393 verbose_a("r=%d", reg->range); 1394 else if (base_type(t) == CONST_PTR_TO_MAP || 1395 base_type(t) == PTR_TO_MAP_KEY || 1396 base_type(t) == PTR_TO_MAP_VALUE) 1397 verbose_a("ks=%d,vs=%d", 1398 reg->map_ptr->key_size, 1399 reg->map_ptr->value_size); 1400 if (tnum_is_const(reg->var_off)) { 1401 /* Typically an immediate SCALAR_VALUE, but 1402 * could be a pointer whose offset is too big 1403 * for reg->off 1404 */ 1405 verbose_a("imm=%llx", reg->var_off.value); 1406 } else { 1407 if (reg->smin_value != reg->umin_value && 1408 reg->smin_value != S64_MIN) 1409 verbose_a("smin=%lld", (long long)reg->smin_value); 1410 if (reg->smax_value != reg->umax_value && 1411 reg->smax_value != S64_MAX) 1412 verbose_a("smax=%lld", (long long)reg->smax_value); 1413 if (reg->umin_value != 0) 1414 verbose_a("umin=%llu", (unsigned long long)reg->umin_value); 1415 if (reg->umax_value != U64_MAX) 1416 verbose_a("umax=%llu", (unsigned long long)reg->umax_value); 1417 if (!tnum_is_unknown(reg->var_off)) { 1418 char tn_buf[48]; 1419 1420 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1421 verbose_a("var_off=%s", tn_buf); 1422 } 1423 if (reg->s32_min_value != reg->smin_value && 1424 reg->s32_min_value != S32_MIN) 1425 verbose_a("s32_min=%d", (int)(reg->s32_min_value)); 1426 if (reg->s32_max_value != reg->smax_value && 1427 reg->s32_max_value != S32_MAX) 1428 verbose_a("s32_max=%d", (int)(reg->s32_max_value)); 1429 if (reg->u32_min_value != reg->umin_value && 1430 reg->u32_min_value != U32_MIN) 1431 verbose_a("u32_min=%d", (int)(reg->u32_min_value)); 1432 if (reg->u32_max_value != reg->umax_value && 1433 reg->u32_max_value != U32_MAX) 1434 verbose_a("u32_max=%d", (int)(reg->u32_max_value)); 1435 } 1436 #undef verbose_a 1437 1438 verbose(env, ")"); 1439 } 1440 } 1441 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 1442 char types_buf[BPF_REG_SIZE + 1]; 1443 bool valid = false; 1444 int j; 1445 1446 for (j = 0; j < BPF_REG_SIZE; j++) { 1447 if (state->stack[i].slot_type[j] != STACK_INVALID) 1448 valid = true; 1449 types_buf[j] = slot_type_char[state->stack[i].slot_type[j]]; 1450 } 1451 types_buf[BPF_REG_SIZE] = 0; 1452 if (!valid) 1453 continue; 1454 if (!print_all && !stack_slot_scratched(env, i)) 1455 continue; 1456 switch (state->stack[i].slot_type[BPF_REG_SIZE - 1]) { 1457 case STACK_SPILL: 1458 reg = &state->stack[i].spilled_ptr; 1459 t = reg->type; 1460 1461 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); 1462 print_liveness(env, reg->live); 1463 verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t)); 1464 if (t == SCALAR_VALUE && reg->precise) 1465 verbose(env, "P"); 1466 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) 1467 verbose(env, "%lld", reg->var_off.value + reg->off); 1468 break; 1469 case STACK_DYNPTR: 1470 i += BPF_DYNPTR_NR_SLOTS - 1; 1471 reg = &state->stack[i].spilled_ptr; 1472 1473 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); 1474 print_liveness(env, reg->live); 1475 verbose(env, "=dynptr_%s", dynptr_type_str(reg->dynptr.type)); 1476 if (reg->ref_obj_id) 1477 verbose(env, "(ref_id=%d)", reg->ref_obj_id); 1478 break; 1479 case STACK_ITER: 1480 /* only main slot has ref_obj_id set; skip others */ 1481 reg = &state->stack[i].spilled_ptr; 1482 if (!reg->ref_obj_id) 1483 continue; 1484 1485 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); 1486 print_liveness(env, reg->live); 1487 verbose(env, "=iter_%s(ref_id=%d,state=%s,depth=%u)", 1488 iter_type_str(reg->iter.btf, reg->iter.btf_id), 1489 reg->ref_obj_id, iter_state_str(reg->iter.state), 1490 reg->iter.depth); 1491 break; 1492 case STACK_MISC: 1493 case STACK_ZERO: 1494 default: 1495 reg = &state->stack[i].spilled_ptr; 1496 1497 for (j = 0; j < BPF_REG_SIZE; j++) 1498 types_buf[j] = slot_type_char[state->stack[i].slot_type[j]]; 1499 types_buf[BPF_REG_SIZE] = 0; 1500 1501 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); 1502 print_liveness(env, reg->live); 1503 verbose(env, "=%s", types_buf); 1504 break; 1505 } 1506 } 1507 if (state->acquired_refs && state->refs[0].id) { 1508 verbose(env, " refs=%d", state->refs[0].id); 1509 for (i = 1; i < state->acquired_refs; i++) 1510 if (state->refs[i].id) 1511 verbose(env, ",%d", state->refs[i].id); 1512 } 1513 if (state->in_callback_fn) 1514 verbose(env, " cb"); 1515 if (state->in_async_callback_fn) 1516 verbose(env, " async_cb"); 1517 verbose(env, "\n"); 1518 if (!print_all) 1519 mark_verifier_state_clean(env); 1520 } 1521 1522 static inline u32 vlog_alignment(u32 pos) 1523 { 1524 return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT), 1525 BPF_LOG_MIN_ALIGNMENT) - pos - 1; 1526 } 1527 1528 static void print_insn_state(struct bpf_verifier_env *env, 1529 const struct bpf_func_state *state) 1530 { 1531 if (env->prev_log_pos && env->prev_log_pos == env->log.end_pos) { 1532 /* remove new line character */ 1533 bpf_vlog_reset(&env->log, env->prev_log_pos - 1); 1534 verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_pos), ' '); 1535 } else { 1536 verbose(env, "%d:", env->insn_idx); 1537 } 1538 print_verifier_state(env, state, false); 1539 } 1540 1541 /* copy array src of length n * size bytes to dst. dst is reallocated if it's too 1542 * small to hold src. This is different from krealloc since we don't want to preserve 1543 * the contents of dst. 1544 * 1545 * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could 1546 * not be allocated. 1547 */ 1548 static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags) 1549 { 1550 size_t alloc_bytes; 1551 void *orig = dst; 1552 size_t bytes; 1553 1554 if (ZERO_OR_NULL_PTR(src)) 1555 goto out; 1556 1557 if (unlikely(check_mul_overflow(n, size, &bytes))) 1558 return NULL; 1559 1560 alloc_bytes = max(ksize(orig), kmalloc_size_roundup(bytes)); 1561 dst = krealloc(orig, alloc_bytes, flags); 1562 if (!dst) { 1563 kfree(orig); 1564 return NULL; 1565 } 1566 1567 memcpy(dst, src, bytes); 1568 out: 1569 return dst ? dst : ZERO_SIZE_PTR; 1570 } 1571 1572 /* resize an array from old_n items to new_n items. the array is reallocated if it's too 1573 * small to hold new_n items. new items are zeroed out if the array grows. 1574 * 1575 * Contrary to krealloc_array, does not free arr if new_n is zero. 1576 */ 1577 static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size) 1578 { 1579 size_t alloc_size; 1580 void *new_arr; 1581 1582 if (!new_n || old_n == new_n) 1583 goto out; 1584 1585 alloc_size = kmalloc_size_roundup(size_mul(new_n, size)); 1586 new_arr = krealloc(arr, alloc_size, GFP_KERNEL); 1587 if (!new_arr) { 1588 kfree(arr); 1589 return NULL; 1590 } 1591 arr = new_arr; 1592 1593 if (new_n > old_n) 1594 memset(arr + old_n * size, 0, (new_n - old_n) * size); 1595 1596 out: 1597 return arr ? arr : ZERO_SIZE_PTR; 1598 } 1599 1600 static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src) 1601 { 1602 dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs, 1603 sizeof(struct bpf_reference_state), GFP_KERNEL); 1604 if (!dst->refs) 1605 return -ENOMEM; 1606 1607 dst->acquired_refs = src->acquired_refs; 1608 return 0; 1609 } 1610 1611 static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src) 1612 { 1613 size_t n = src->allocated_stack / BPF_REG_SIZE; 1614 1615 dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state), 1616 GFP_KERNEL); 1617 if (!dst->stack) 1618 return -ENOMEM; 1619 1620 dst->allocated_stack = src->allocated_stack; 1621 return 0; 1622 } 1623 1624 static int resize_reference_state(struct bpf_func_state *state, size_t n) 1625 { 1626 state->refs = realloc_array(state->refs, state->acquired_refs, n, 1627 sizeof(struct bpf_reference_state)); 1628 if (!state->refs) 1629 return -ENOMEM; 1630 1631 state->acquired_refs = n; 1632 return 0; 1633 } 1634 1635 static int grow_stack_state(struct bpf_func_state *state, int size) 1636 { 1637 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE; 1638 1639 if (old_n >= n) 1640 return 0; 1641 1642 state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state)); 1643 if (!state->stack) 1644 return -ENOMEM; 1645 1646 state->allocated_stack = size; 1647 return 0; 1648 } 1649 1650 /* Acquire a pointer id from the env and update the state->refs to include 1651 * this new pointer reference. 1652 * On success, returns a valid pointer id to associate with the register 1653 * On failure, returns a negative errno. 1654 */ 1655 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) 1656 { 1657 struct bpf_func_state *state = cur_func(env); 1658 int new_ofs = state->acquired_refs; 1659 int id, err; 1660 1661 err = resize_reference_state(state, state->acquired_refs + 1); 1662 if (err) 1663 return err; 1664 id = ++env->id_gen; 1665 state->refs[new_ofs].id = id; 1666 state->refs[new_ofs].insn_idx = insn_idx; 1667 state->refs[new_ofs].callback_ref = state->in_callback_fn ? state->frameno : 0; 1668 1669 return id; 1670 } 1671 1672 /* release function corresponding to acquire_reference_state(). Idempotent. */ 1673 static int release_reference_state(struct bpf_func_state *state, int ptr_id) 1674 { 1675 int i, last_idx; 1676 1677 last_idx = state->acquired_refs - 1; 1678 for (i = 0; i < state->acquired_refs; i++) { 1679 if (state->refs[i].id == ptr_id) { 1680 /* Cannot release caller references in callbacks */ 1681 if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno) 1682 return -EINVAL; 1683 if (last_idx && i != last_idx) 1684 memcpy(&state->refs[i], &state->refs[last_idx], 1685 sizeof(*state->refs)); 1686 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); 1687 state->acquired_refs--; 1688 return 0; 1689 } 1690 } 1691 return -EINVAL; 1692 } 1693 1694 static void free_func_state(struct bpf_func_state *state) 1695 { 1696 if (!state) 1697 return; 1698 kfree(state->refs); 1699 kfree(state->stack); 1700 kfree(state); 1701 } 1702 1703 static void clear_jmp_history(struct bpf_verifier_state *state) 1704 { 1705 kfree(state->jmp_history); 1706 state->jmp_history = NULL; 1707 state->jmp_history_cnt = 0; 1708 } 1709 1710 static void free_verifier_state(struct bpf_verifier_state *state, 1711 bool free_self) 1712 { 1713 int i; 1714 1715 for (i = 0; i <= state->curframe; i++) { 1716 free_func_state(state->frame[i]); 1717 state->frame[i] = NULL; 1718 } 1719 clear_jmp_history(state); 1720 if (free_self) 1721 kfree(state); 1722 } 1723 1724 /* copy verifier state from src to dst growing dst stack space 1725 * when necessary to accommodate larger src stack 1726 */ 1727 static int copy_func_state(struct bpf_func_state *dst, 1728 const struct bpf_func_state *src) 1729 { 1730 int err; 1731 1732 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); 1733 err = copy_reference_state(dst, src); 1734 if (err) 1735 return err; 1736 return copy_stack_state(dst, src); 1737 } 1738 1739 static int copy_verifier_state(struct bpf_verifier_state *dst_state, 1740 const struct bpf_verifier_state *src) 1741 { 1742 struct bpf_func_state *dst; 1743 int i, err; 1744 1745 dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history, 1746 src->jmp_history_cnt, sizeof(struct bpf_idx_pair), 1747 GFP_USER); 1748 if (!dst_state->jmp_history) 1749 return -ENOMEM; 1750 dst_state->jmp_history_cnt = src->jmp_history_cnt; 1751 1752 /* if dst has more stack frames then src frame, free them */ 1753 for (i = src->curframe + 1; i <= dst_state->curframe; i++) { 1754 free_func_state(dst_state->frame[i]); 1755 dst_state->frame[i] = NULL; 1756 } 1757 dst_state->speculative = src->speculative; 1758 dst_state->active_rcu_lock = src->active_rcu_lock; 1759 dst_state->curframe = src->curframe; 1760 dst_state->active_lock.ptr = src->active_lock.ptr; 1761 dst_state->active_lock.id = src->active_lock.id; 1762 dst_state->branches = src->branches; 1763 dst_state->parent = src->parent; 1764 dst_state->first_insn_idx = src->first_insn_idx; 1765 dst_state->last_insn_idx = src->last_insn_idx; 1766 for (i = 0; i <= src->curframe; i++) { 1767 dst = dst_state->frame[i]; 1768 if (!dst) { 1769 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 1770 if (!dst) 1771 return -ENOMEM; 1772 dst_state->frame[i] = dst; 1773 } 1774 err = copy_func_state(dst, src->frame[i]); 1775 if (err) 1776 return err; 1777 } 1778 return 0; 1779 } 1780 1781 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) 1782 { 1783 while (st) { 1784 u32 br = --st->branches; 1785 1786 /* WARN_ON(br > 1) technically makes sense here, 1787 * but see comment in push_stack(), hence: 1788 */ 1789 WARN_ONCE((int)br < 0, 1790 "BUG update_branch_counts:branches_to_explore=%d\n", 1791 br); 1792 if (br) 1793 break; 1794 st = st->parent; 1795 } 1796 } 1797 1798 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, 1799 int *insn_idx, bool pop_log) 1800 { 1801 struct bpf_verifier_state *cur = env->cur_state; 1802 struct bpf_verifier_stack_elem *elem, *head = env->head; 1803 int err; 1804 1805 if (env->head == NULL) 1806 return -ENOENT; 1807 1808 if (cur) { 1809 err = copy_verifier_state(cur, &head->st); 1810 if (err) 1811 return err; 1812 } 1813 if (pop_log) 1814 bpf_vlog_reset(&env->log, head->log_pos); 1815 if (insn_idx) 1816 *insn_idx = head->insn_idx; 1817 if (prev_insn_idx) 1818 *prev_insn_idx = head->prev_insn_idx; 1819 elem = head->next; 1820 free_verifier_state(&head->st, false); 1821 kfree(head); 1822 env->head = elem; 1823 env->stack_size--; 1824 return 0; 1825 } 1826 1827 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, 1828 int insn_idx, int prev_insn_idx, 1829 bool speculative) 1830 { 1831 struct bpf_verifier_state *cur = env->cur_state; 1832 struct bpf_verifier_stack_elem *elem; 1833 int err; 1834 1835 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 1836 if (!elem) 1837 goto err; 1838 1839 elem->insn_idx = insn_idx; 1840 elem->prev_insn_idx = prev_insn_idx; 1841 elem->next = env->head; 1842 elem->log_pos = env->log.end_pos; 1843 env->head = elem; 1844 env->stack_size++; 1845 err = copy_verifier_state(&elem->st, cur); 1846 if (err) 1847 goto err; 1848 elem->st.speculative |= speculative; 1849 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { 1850 verbose(env, "The sequence of %d jumps is too complex.\n", 1851 env->stack_size); 1852 goto err; 1853 } 1854 if (elem->st.parent) { 1855 ++elem->st.parent->branches; 1856 /* WARN_ON(branches > 2) technically makes sense here, 1857 * but 1858 * 1. speculative states will bump 'branches' for non-branch 1859 * instructions 1860 * 2. is_state_visited() heuristics may decide not to create 1861 * a new state for a sequence of branches and all such current 1862 * and cloned states will be pointing to a single parent state 1863 * which might have large 'branches' count. 1864 */ 1865 } 1866 return &elem->st; 1867 err: 1868 free_verifier_state(env->cur_state, true); 1869 env->cur_state = NULL; 1870 /* pop all elements and return */ 1871 while (!pop_stack(env, NULL, NULL, false)); 1872 return NULL; 1873 } 1874 1875 #define CALLER_SAVED_REGS 6 1876 static const int caller_saved[CALLER_SAVED_REGS] = { 1877 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 1878 }; 1879 1880 /* This helper doesn't clear reg->id */ 1881 static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm) 1882 { 1883 reg->var_off = tnum_const(imm); 1884 reg->smin_value = (s64)imm; 1885 reg->smax_value = (s64)imm; 1886 reg->umin_value = imm; 1887 reg->umax_value = imm; 1888 1889 reg->s32_min_value = (s32)imm; 1890 reg->s32_max_value = (s32)imm; 1891 reg->u32_min_value = (u32)imm; 1892 reg->u32_max_value = (u32)imm; 1893 } 1894 1895 /* Mark the unknown part of a register (variable offset or scalar value) as 1896 * known to have the value @imm. 1897 */ 1898 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) 1899 { 1900 /* Clear off and union(map_ptr, range) */ 1901 memset(((u8 *)reg) + sizeof(reg->type), 0, 1902 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); 1903 reg->id = 0; 1904 reg->ref_obj_id = 0; 1905 ___mark_reg_known(reg, imm); 1906 } 1907 1908 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm) 1909 { 1910 reg->var_off = tnum_const_subreg(reg->var_off, imm); 1911 reg->s32_min_value = (s32)imm; 1912 reg->s32_max_value = (s32)imm; 1913 reg->u32_min_value = (u32)imm; 1914 reg->u32_max_value = (u32)imm; 1915 } 1916 1917 /* Mark the 'variable offset' part of a register as zero. This should be 1918 * used only on registers holding a pointer type. 1919 */ 1920 static void __mark_reg_known_zero(struct bpf_reg_state *reg) 1921 { 1922 __mark_reg_known(reg, 0); 1923 } 1924 1925 static void __mark_reg_const_zero(struct bpf_reg_state *reg) 1926 { 1927 __mark_reg_known(reg, 0); 1928 reg->type = SCALAR_VALUE; 1929 } 1930 1931 static void mark_reg_known_zero(struct bpf_verifier_env *env, 1932 struct bpf_reg_state *regs, u32 regno) 1933 { 1934 if (WARN_ON(regno >= MAX_BPF_REG)) { 1935 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); 1936 /* Something bad happened, let's kill all regs */ 1937 for (regno = 0; regno < MAX_BPF_REG; regno++) 1938 __mark_reg_not_init(env, regs + regno); 1939 return; 1940 } 1941 __mark_reg_known_zero(regs + regno); 1942 } 1943 1944 static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type, 1945 bool first_slot, int dynptr_id) 1946 { 1947 /* reg->type has no meaning for STACK_DYNPTR, but when we set reg for 1948 * callback arguments, it does need to be CONST_PTR_TO_DYNPTR, so simply 1949 * set it unconditionally as it is ignored for STACK_DYNPTR anyway. 1950 */ 1951 __mark_reg_known_zero(reg); 1952 reg->type = CONST_PTR_TO_DYNPTR; 1953 /* Give each dynptr a unique id to uniquely associate slices to it. */ 1954 reg->id = dynptr_id; 1955 reg->dynptr.type = type; 1956 reg->dynptr.first_slot = first_slot; 1957 } 1958 1959 static void mark_ptr_not_null_reg(struct bpf_reg_state *reg) 1960 { 1961 if (base_type(reg->type) == PTR_TO_MAP_VALUE) { 1962 const struct bpf_map *map = reg->map_ptr; 1963 1964 if (map->inner_map_meta) { 1965 reg->type = CONST_PTR_TO_MAP; 1966 reg->map_ptr = map->inner_map_meta; 1967 /* transfer reg's id which is unique for every map_lookup_elem 1968 * as UID of the inner map. 1969 */ 1970 if (btf_record_has_field(map->inner_map_meta->record, BPF_TIMER)) 1971 reg->map_uid = reg->id; 1972 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { 1973 reg->type = PTR_TO_XDP_SOCK; 1974 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || 1975 map->map_type == BPF_MAP_TYPE_SOCKHASH) { 1976 reg->type = PTR_TO_SOCKET; 1977 } else { 1978 reg->type = PTR_TO_MAP_VALUE; 1979 } 1980 return; 1981 } 1982 1983 reg->type &= ~PTR_MAYBE_NULL; 1984 } 1985 1986 static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno, 1987 struct btf_field_graph_root *ds_head) 1988 { 1989 __mark_reg_known_zero(®s[regno]); 1990 regs[regno].type = PTR_TO_BTF_ID | MEM_ALLOC; 1991 regs[regno].btf = ds_head->btf; 1992 regs[regno].btf_id = ds_head->value_btf_id; 1993 regs[regno].off = ds_head->node_offset; 1994 } 1995 1996 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) 1997 { 1998 return type_is_pkt_pointer(reg->type); 1999 } 2000 2001 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) 2002 { 2003 return reg_is_pkt_pointer(reg) || 2004 reg->type == PTR_TO_PACKET_END; 2005 } 2006 2007 static bool reg_is_dynptr_slice_pkt(const struct bpf_reg_state *reg) 2008 { 2009 return base_type(reg->type) == PTR_TO_MEM && 2010 (reg->type & DYNPTR_TYPE_SKB || reg->type & DYNPTR_TYPE_XDP); 2011 } 2012 2013 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ 2014 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, 2015 enum bpf_reg_type which) 2016 { 2017 /* The register can already have a range from prior markings. 2018 * This is fine as long as it hasn't been advanced from its 2019 * origin. 2020 */ 2021 return reg->type == which && 2022 reg->id == 0 && 2023 reg->off == 0 && 2024 tnum_equals_const(reg->var_off, 0); 2025 } 2026 2027 /* Reset the min/max bounds of a register */ 2028 static void __mark_reg_unbounded(struct bpf_reg_state *reg) 2029 { 2030 reg->smin_value = S64_MIN; 2031 reg->smax_value = S64_MAX; 2032 reg->umin_value = 0; 2033 reg->umax_value = U64_MAX; 2034 2035 reg->s32_min_value = S32_MIN; 2036 reg->s32_max_value = S32_MAX; 2037 reg->u32_min_value = 0; 2038 reg->u32_max_value = U32_MAX; 2039 } 2040 2041 static void __mark_reg64_unbounded(struct bpf_reg_state *reg) 2042 { 2043 reg->smin_value = S64_MIN; 2044 reg->smax_value = S64_MAX; 2045 reg->umin_value = 0; 2046 reg->umax_value = U64_MAX; 2047 } 2048 2049 static void __mark_reg32_unbounded(struct bpf_reg_state *reg) 2050 { 2051 reg->s32_min_value = S32_MIN; 2052 reg->s32_max_value = S32_MAX; 2053 reg->u32_min_value = 0; 2054 reg->u32_max_value = U32_MAX; 2055 } 2056 2057 static void __update_reg32_bounds(struct bpf_reg_state *reg) 2058 { 2059 struct tnum var32_off = tnum_subreg(reg->var_off); 2060 2061 /* min signed is max(sign bit) | min(other bits) */ 2062 reg->s32_min_value = max_t(s32, reg->s32_min_value, 2063 var32_off.value | (var32_off.mask & S32_MIN)); 2064 /* max signed is min(sign bit) | max(other bits) */ 2065 reg->s32_max_value = min_t(s32, reg->s32_max_value, 2066 var32_off.value | (var32_off.mask & S32_MAX)); 2067 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value); 2068 reg->u32_max_value = min(reg->u32_max_value, 2069 (u32)(var32_off.value | var32_off.mask)); 2070 } 2071 2072 static void __update_reg64_bounds(struct bpf_reg_state *reg) 2073 { 2074 /* min signed is max(sign bit) | min(other bits) */ 2075 reg->smin_value = max_t(s64, reg->smin_value, 2076 reg->var_off.value | (reg->var_off.mask & S64_MIN)); 2077 /* max signed is min(sign bit) | max(other bits) */ 2078 reg->smax_value = min_t(s64, reg->smax_value, 2079 reg->var_off.value | (reg->var_off.mask & S64_MAX)); 2080 reg->umin_value = max(reg->umin_value, reg->var_off.value); 2081 reg->umax_value = min(reg->umax_value, 2082 reg->var_off.value | reg->var_off.mask); 2083 } 2084 2085 static void __update_reg_bounds(struct bpf_reg_state *reg) 2086 { 2087 __update_reg32_bounds(reg); 2088 __update_reg64_bounds(reg); 2089 } 2090 2091 /* Uses signed min/max values to inform unsigned, and vice-versa */ 2092 static void __reg32_deduce_bounds(struct bpf_reg_state *reg) 2093 { 2094 /* Learn sign from signed bounds. 2095 * If we cannot cross the sign boundary, then signed and unsigned bounds 2096 * are the same, so combine. This works even in the negative case, e.g. 2097 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 2098 */ 2099 if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) { 2100 reg->s32_min_value = reg->u32_min_value = 2101 max_t(u32, reg->s32_min_value, reg->u32_min_value); 2102 reg->s32_max_value = reg->u32_max_value = 2103 min_t(u32, reg->s32_max_value, reg->u32_max_value); 2104 return; 2105 } 2106 /* Learn sign from unsigned bounds. Signed bounds cross the sign 2107 * boundary, so we must be careful. 2108 */ 2109 if ((s32)reg->u32_max_value >= 0) { 2110 /* Positive. We can't learn anything from the smin, but smax 2111 * is positive, hence safe. 2112 */ 2113 reg->s32_min_value = reg->u32_min_value; 2114 reg->s32_max_value = reg->u32_max_value = 2115 min_t(u32, reg->s32_max_value, reg->u32_max_value); 2116 } else if ((s32)reg->u32_min_value < 0) { 2117 /* Negative. We can't learn anything from the smax, but smin 2118 * is negative, hence safe. 2119 */ 2120 reg->s32_min_value = reg->u32_min_value = 2121 max_t(u32, reg->s32_min_value, reg->u32_min_value); 2122 reg->s32_max_value = reg->u32_max_value; 2123 } 2124 } 2125 2126 static void __reg64_deduce_bounds(struct bpf_reg_state *reg) 2127 { 2128 /* Learn sign from signed bounds. 2129 * If we cannot cross the sign boundary, then signed and unsigned bounds 2130 * are the same, so combine. This works even in the negative case, e.g. 2131 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 2132 */ 2133 if (reg->smin_value >= 0 || reg->smax_value < 0) { 2134 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 2135 reg->umin_value); 2136 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 2137 reg->umax_value); 2138 return; 2139 } 2140 /* Learn sign from unsigned bounds. Signed bounds cross the sign 2141 * boundary, so we must be careful. 2142 */ 2143 if ((s64)reg->umax_value >= 0) { 2144 /* Positive. We can't learn anything from the smin, but smax 2145 * is positive, hence safe. 2146 */ 2147 reg->smin_value = reg->umin_value; 2148 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 2149 reg->umax_value); 2150 } else if ((s64)reg->umin_value < 0) { 2151 /* Negative. We can't learn anything from the smax, but smin 2152 * is negative, hence safe. 2153 */ 2154 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 2155 reg->umin_value); 2156 reg->smax_value = reg->umax_value; 2157 } 2158 } 2159 2160 static void __reg_deduce_bounds(struct bpf_reg_state *reg) 2161 { 2162 __reg32_deduce_bounds(reg); 2163 __reg64_deduce_bounds(reg); 2164 } 2165 2166 /* Attempts to improve var_off based on unsigned min/max information */ 2167 static void __reg_bound_offset(struct bpf_reg_state *reg) 2168 { 2169 struct tnum var64_off = tnum_intersect(reg->var_off, 2170 tnum_range(reg->umin_value, 2171 reg->umax_value)); 2172 struct tnum var32_off = tnum_intersect(tnum_subreg(var64_off), 2173 tnum_range(reg->u32_min_value, 2174 reg->u32_max_value)); 2175 2176 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); 2177 } 2178 2179 static void reg_bounds_sync(struct bpf_reg_state *reg) 2180 { 2181 /* We might have learned new bounds from the var_off. */ 2182 __update_reg_bounds(reg); 2183 /* We might have learned something about the sign bit. */ 2184 __reg_deduce_bounds(reg); 2185 /* We might have learned some bits from the bounds. */ 2186 __reg_bound_offset(reg); 2187 /* Intersecting with the old var_off might have improved our bounds 2188 * slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 2189 * then new var_off is (0; 0x7f...fc) which improves our umax. 2190 */ 2191 __update_reg_bounds(reg); 2192 } 2193 2194 static bool __reg32_bound_s64(s32 a) 2195 { 2196 return a >= 0 && a <= S32_MAX; 2197 } 2198 2199 static void __reg_assign_32_into_64(struct bpf_reg_state *reg) 2200 { 2201 reg->umin_value = reg->u32_min_value; 2202 reg->umax_value = reg->u32_max_value; 2203 2204 /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must 2205 * be positive otherwise set to worse case bounds and refine later 2206 * from tnum. 2207 */ 2208 if (__reg32_bound_s64(reg->s32_min_value) && 2209 __reg32_bound_s64(reg->s32_max_value)) { 2210 reg->smin_value = reg->s32_min_value; 2211 reg->smax_value = reg->s32_max_value; 2212 } else { 2213 reg->smin_value = 0; 2214 reg->smax_value = U32_MAX; 2215 } 2216 } 2217 2218 static void __reg_combine_32_into_64(struct bpf_reg_state *reg) 2219 { 2220 /* special case when 64-bit register has upper 32-bit register 2221 * zeroed. Typically happens after zext or <<32, >>32 sequence 2222 * allowing us to use 32-bit bounds directly, 2223 */ 2224 if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) { 2225 __reg_assign_32_into_64(reg); 2226 } else { 2227 /* Otherwise the best we can do is push lower 32bit known and 2228 * unknown bits into register (var_off set from jmp logic) 2229 * then learn as much as possible from the 64-bit tnum 2230 * known and unknown bits. The previous smin/smax bounds are 2231 * invalid here because of jmp32 compare so mark them unknown 2232 * so they do not impact tnum bounds calculation. 2233 */ 2234 __mark_reg64_unbounded(reg); 2235 } 2236 reg_bounds_sync(reg); 2237 } 2238 2239 static bool __reg64_bound_s32(s64 a) 2240 { 2241 return a >= S32_MIN && a <= S32_MAX; 2242 } 2243 2244 static bool __reg64_bound_u32(u64 a) 2245 { 2246 return a >= U32_MIN && a <= U32_MAX; 2247 } 2248 2249 static void __reg_combine_64_into_32(struct bpf_reg_state *reg) 2250 { 2251 __mark_reg32_unbounded(reg); 2252 if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) { 2253 reg->s32_min_value = (s32)reg->smin_value; 2254 reg->s32_max_value = (s32)reg->smax_value; 2255 } 2256 if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) { 2257 reg->u32_min_value = (u32)reg->umin_value; 2258 reg->u32_max_value = (u32)reg->umax_value; 2259 } 2260 reg_bounds_sync(reg); 2261 } 2262 2263 /* Mark a register as having a completely unknown (scalar) value. */ 2264 static void __mark_reg_unknown(const struct bpf_verifier_env *env, 2265 struct bpf_reg_state *reg) 2266 { 2267 /* 2268 * Clear type, off, and union(map_ptr, range) and 2269 * padding between 'type' and union 2270 */ 2271 memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); 2272 reg->type = SCALAR_VALUE; 2273 reg->id = 0; 2274 reg->ref_obj_id = 0; 2275 reg->var_off = tnum_unknown; 2276 reg->frameno = 0; 2277 reg->precise = !env->bpf_capable; 2278 __mark_reg_unbounded(reg); 2279 } 2280 2281 static void mark_reg_unknown(struct bpf_verifier_env *env, 2282 struct bpf_reg_state *regs, u32 regno) 2283 { 2284 if (WARN_ON(regno >= MAX_BPF_REG)) { 2285 verbose(env, "mark_reg_unknown(regs, %u)\n", regno); 2286 /* Something bad happened, let's kill all regs except FP */ 2287 for (regno = 0; regno < BPF_REG_FP; regno++) 2288 __mark_reg_not_init(env, regs + regno); 2289 return; 2290 } 2291 __mark_reg_unknown(env, regs + regno); 2292 } 2293 2294 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 2295 struct bpf_reg_state *reg) 2296 { 2297 __mark_reg_unknown(env, reg); 2298 reg->type = NOT_INIT; 2299 } 2300 2301 static void mark_reg_not_init(struct bpf_verifier_env *env, 2302 struct bpf_reg_state *regs, u32 regno) 2303 { 2304 if (WARN_ON(regno >= MAX_BPF_REG)) { 2305 verbose(env, "mark_reg_not_init(regs, %u)\n", regno); 2306 /* Something bad happened, let's kill all regs except FP */ 2307 for (regno = 0; regno < BPF_REG_FP; regno++) 2308 __mark_reg_not_init(env, regs + regno); 2309 return; 2310 } 2311 __mark_reg_not_init(env, regs + regno); 2312 } 2313 2314 static void mark_btf_ld_reg(struct bpf_verifier_env *env, 2315 struct bpf_reg_state *regs, u32 regno, 2316 enum bpf_reg_type reg_type, 2317 struct btf *btf, u32 btf_id, 2318 enum bpf_type_flag flag) 2319 { 2320 if (reg_type == SCALAR_VALUE) { 2321 mark_reg_unknown(env, regs, regno); 2322 return; 2323 } 2324 mark_reg_known_zero(env, regs, regno); 2325 regs[regno].type = PTR_TO_BTF_ID | flag; 2326 regs[regno].btf = btf; 2327 regs[regno].btf_id = btf_id; 2328 } 2329 2330 #define DEF_NOT_SUBREG (0) 2331 static void init_reg_state(struct bpf_verifier_env *env, 2332 struct bpf_func_state *state) 2333 { 2334 struct bpf_reg_state *regs = state->regs; 2335 int i; 2336 2337 for (i = 0; i < MAX_BPF_REG; i++) { 2338 mark_reg_not_init(env, regs, i); 2339 regs[i].live = REG_LIVE_NONE; 2340 regs[i].parent = NULL; 2341 regs[i].subreg_def = DEF_NOT_SUBREG; 2342 } 2343 2344 /* frame pointer */ 2345 regs[BPF_REG_FP].type = PTR_TO_STACK; 2346 mark_reg_known_zero(env, regs, BPF_REG_FP); 2347 regs[BPF_REG_FP].frameno = state->frameno; 2348 } 2349 2350 #define BPF_MAIN_FUNC (-1) 2351 static void init_func_state(struct bpf_verifier_env *env, 2352 struct bpf_func_state *state, 2353 int callsite, int frameno, int subprogno) 2354 { 2355 state->callsite = callsite; 2356 state->frameno = frameno; 2357 state->subprogno = subprogno; 2358 state->callback_ret_range = tnum_range(0, 0); 2359 init_reg_state(env, state); 2360 mark_verifier_state_scratched(env); 2361 } 2362 2363 /* Similar to push_stack(), but for async callbacks */ 2364 static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, 2365 int insn_idx, int prev_insn_idx, 2366 int subprog) 2367 { 2368 struct bpf_verifier_stack_elem *elem; 2369 struct bpf_func_state *frame; 2370 2371 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 2372 if (!elem) 2373 goto err; 2374 2375 elem->insn_idx = insn_idx; 2376 elem->prev_insn_idx = prev_insn_idx; 2377 elem->next = env->head; 2378 elem->log_pos = env->log.end_pos; 2379 env->head = elem; 2380 env->stack_size++; 2381 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { 2382 verbose(env, 2383 "The sequence of %d jumps is too complex for async cb.\n", 2384 env->stack_size); 2385 goto err; 2386 } 2387 /* Unlike push_stack() do not copy_verifier_state(). 2388 * The caller state doesn't matter. 2389 * This is async callback. It starts in a fresh stack. 2390 * Initialize it similar to do_check_common(). 2391 */ 2392 elem->st.branches = 1; 2393 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 2394 if (!frame) 2395 goto err; 2396 init_func_state(env, frame, 2397 BPF_MAIN_FUNC /* callsite */, 2398 0 /* frameno within this callchain */, 2399 subprog /* subprog number within this prog */); 2400 elem->st.frame[0] = frame; 2401 return &elem->st; 2402 err: 2403 free_verifier_state(env->cur_state, true); 2404 env->cur_state = NULL; 2405 /* pop all elements and return */ 2406 while (!pop_stack(env, NULL, NULL, false)); 2407 return NULL; 2408 } 2409 2410 2411 enum reg_arg_type { 2412 SRC_OP, /* register is used as source operand */ 2413 DST_OP, /* register is used as destination operand */ 2414 DST_OP_NO_MARK /* same as above, check only, don't mark */ 2415 }; 2416 2417 static int cmp_subprogs(const void *a, const void *b) 2418 { 2419 return ((struct bpf_subprog_info *)a)->start - 2420 ((struct bpf_subprog_info *)b)->start; 2421 } 2422 2423 static int find_subprog(struct bpf_verifier_env *env, int off) 2424 { 2425 struct bpf_subprog_info *p; 2426 2427 p = bsearch(&off, env->subprog_info, env->subprog_cnt, 2428 sizeof(env->subprog_info[0]), cmp_subprogs); 2429 if (!p) 2430 return -ENOENT; 2431 return p - env->subprog_info; 2432 2433 } 2434 2435 static int add_subprog(struct bpf_verifier_env *env, int off) 2436 { 2437 int insn_cnt = env->prog->len; 2438 int ret; 2439 2440 if (off >= insn_cnt || off < 0) { 2441 verbose(env, "call to invalid destination\n"); 2442 return -EINVAL; 2443 } 2444 ret = find_subprog(env, off); 2445 if (ret >= 0) 2446 return ret; 2447 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { 2448 verbose(env, "too many subprograms\n"); 2449 return -E2BIG; 2450 } 2451 /* determine subprog starts. The end is one before the next starts */ 2452 env->subprog_info[env->subprog_cnt++].start = off; 2453 sort(env->subprog_info, env->subprog_cnt, 2454 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); 2455 return env->subprog_cnt - 1; 2456 } 2457 2458 #define MAX_KFUNC_DESCS 256 2459 #define MAX_KFUNC_BTFS 256 2460 2461 struct bpf_kfunc_desc { 2462 struct btf_func_model func_model; 2463 u32 func_id; 2464 s32 imm; 2465 u16 offset; 2466 unsigned long addr; 2467 }; 2468 2469 struct bpf_kfunc_btf { 2470 struct btf *btf; 2471 struct module *module; 2472 u16 offset; 2473 }; 2474 2475 struct bpf_kfunc_desc_tab { 2476 /* Sorted by func_id (BTF ID) and offset (fd_array offset) during 2477 * verification. JITs do lookups by bpf_insn, where func_id may not be 2478 * available, therefore at the end of verification do_misc_fixups() 2479 * sorts this by imm and offset. 2480 */ 2481 struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS]; 2482 u32 nr_descs; 2483 }; 2484 2485 struct bpf_kfunc_btf_tab { 2486 struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS]; 2487 u32 nr_descs; 2488 }; 2489 2490 static int kfunc_desc_cmp_by_id_off(const void *a, const void *b) 2491 { 2492 const struct bpf_kfunc_desc *d0 = a; 2493 const struct bpf_kfunc_desc *d1 = b; 2494 2495 /* func_id is not greater than BTF_MAX_TYPE */ 2496 return d0->func_id - d1->func_id ?: d0->offset - d1->offset; 2497 } 2498 2499 static int kfunc_btf_cmp_by_off(const void *a, const void *b) 2500 { 2501 const struct bpf_kfunc_btf *d0 = a; 2502 const struct bpf_kfunc_btf *d1 = b; 2503 2504 return d0->offset - d1->offset; 2505 } 2506 2507 static const struct bpf_kfunc_desc * 2508 find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset) 2509 { 2510 struct bpf_kfunc_desc desc = { 2511 .func_id = func_id, 2512 .offset = offset, 2513 }; 2514 struct bpf_kfunc_desc_tab *tab; 2515 2516 tab = prog->aux->kfunc_tab; 2517 return bsearch(&desc, tab->descs, tab->nr_descs, 2518 sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off); 2519 } 2520 2521 int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id, 2522 u16 btf_fd_idx, u8 **func_addr) 2523 { 2524 const struct bpf_kfunc_desc *desc; 2525 2526 desc = find_kfunc_desc(prog, func_id, btf_fd_idx); 2527 if (!desc) 2528 return -EFAULT; 2529 2530 *func_addr = (u8 *)desc->addr; 2531 return 0; 2532 } 2533 2534 static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env, 2535 s16 offset) 2536 { 2537 struct bpf_kfunc_btf kf_btf = { .offset = offset }; 2538 struct bpf_kfunc_btf_tab *tab; 2539 struct bpf_kfunc_btf *b; 2540 struct module *mod; 2541 struct btf *btf; 2542 int btf_fd; 2543 2544 tab = env->prog->aux->kfunc_btf_tab; 2545 b = bsearch(&kf_btf, tab->descs, tab->nr_descs, 2546 sizeof(tab->descs[0]), kfunc_btf_cmp_by_off); 2547 if (!b) { 2548 if (tab->nr_descs == MAX_KFUNC_BTFS) { 2549 verbose(env, "too many different module BTFs\n"); 2550 return ERR_PTR(-E2BIG); 2551 } 2552 2553 if (bpfptr_is_null(env->fd_array)) { 2554 verbose(env, "kfunc offset > 0 without fd_array is invalid\n"); 2555 return ERR_PTR(-EPROTO); 2556 } 2557 2558 if (copy_from_bpfptr_offset(&btf_fd, env->fd_array, 2559 offset * sizeof(btf_fd), 2560 sizeof(btf_fd))) 2561 return ERR_PTR(-EFAULT); 2562 2563 btf = btf_get_by_fd(btf_fd); 2564 if (IS_ERR(btf)) { 2565 verbose(env, "invalid module BTF fd specified\n"); 2566 return btf; 2567 } 2568 2569 if (!btf_is_module(btf)) { 2570 verbose(env, "BTF fd for kfunc is not a module BTF\n"); 2571 btf_put(btf); 2572 return ERR_PTR(-EINVAL); 2573 } 2574 2575 mod = btf_try_get_module(btf); 2576 if (!mod) { 2577 btf_put(btf); 2578 return ERR_PTR(-ENXIO); 2579 } 2580 2581 b = &tab->descs[tab->nr_descs++]; 2582 b->btf = btf; 2583 b->module = mod; 2584 b->offset = offset; 2585 2586 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 2587 kfunc_btf_cmp_by_off, NULL); 2588 } 2589 return b->btf; 2590 } 2591 2592 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab) 2593 { 2594 if (!tab) 2595 return; 2596 2597 while (tab->nr_descs--) { 2598 module_put(tab->descs[tab->nr_descs].module); 2599 btf_put(tab->descs[tab->nr_descs].btf); 2600 } 2601 kfree(tab); 2602 } 2603 2604 static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset) 2605 { 2606 if (offset) { 2607 if (offset < 0) { 2608 /* In the future, this can be allowed to increase limit 2609 * of fd index into fd_array, interpreted as u16. 2610 */ 2611 verbose(env, "negative offset disallowed for kernel module function call\n"); 2612 return ERR_PTR(-EINVAL); 2613 } 2614 2615 return __find_kfunc_desc_btf(env, offset); 2616 } 2617 return btf_vmlinux ?: ERR_PTR(-ENOENT); 2618 } 2619 2620 static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) 2621 { 2622 const struct btf_type *func, *func_proto; 2623 struct bpf_kfunc_btf_tab *btf_tab; 2624 struct bpf_kfunc_desc_tab *tab; 2625 struct bpf_prog_aux *prog_aux; 2626 struct bpf_kfunc_desc *desc; 2627 const char *func_name; 2628 struct btf *desc_btf; 2629 unsigned long call_imm; 2630 unsigned long addr; 2631 int err; 2632 2633 prog_aux = env->prog->aux; 2634 tab = prog_aux->kfunc_tab; 2635 btf_tab = prog_aux->kfunc_btf_tab; 2636 if (!tab) { 2637 if (!btf_vmlinux) { 2638 verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n"); 2639 return -ENOTSUPP; 2640 } 2641 2642 if (!env->prog->jit_requested) { 2643 verbose(env, "JIT is required for calling kernel function\n"); 2644 return -ENOTSUPP; 2645 } 2646 2647 if (!bpf_jit_supports_kfunc_call()) { 2648 verbose(env, "JIT does not support calling kernel function\n"); 2649 return -ENOTSUPP; 2650 } 2651 2652 if (!env->prog->gpl_compatible) { 2653 verbose(env, "cannot call kernel function from non-GPL compatible program\n"); 2654 return -EINVAL; 2655 } 2656 2657 tab = kzalloc(sizeof(*tab), GFP_KERNEL); 2658 if (!tab) 2659 return -ENOMEM; 2660 prog_aux->kfunc_tab = tab; 2661 } 2662 2663 /* func_id == 0 is always invalid, but instead of returning an error, be 2664 * conservative and wait until the code elimination pass before returning 2665 * error, so that invalid calls that get pruned out can be in BPF programs 2666 * loaded from userspace. It is also required that offset be untouched 2667 * for such calls. 2668 */ 2669 if (!func_id && !offset) 2670 return 0; 2671 2672 if (!btf_tab && offset) { 2673 btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL); 2674 if (!btf_tab) 2675 return -ENOMEM; 2676 prog_aux->kfunc_btf_tab = btf_tab; 2677 } 2678 2679 desc_btf = find_kfunc_desc_btf(env, offset); 2680 if (IS_ERR(desc_btf)) { 2681 verbose(env, "failed to find BTF for kernel function\n"); 2682 return PTR_ERR(desc_btf); 2683 } 2684 2685 if (find_kfunc_desc(env->prog, func_id, offset)) 2686 return 0; 2687 2688 if (tab->nr_descs == MAX_KFUNC_DESCS) { 2689 verbose(env, "too many different kernel function calls\n"); 2690 return -E2BIG; 2691 } 2692 2693 func = btf_type_by_id(desc_btf, func_id); 2694 if (!func || !btf_type_is_func(func)) { 2695 verbose(env, "kernel btf_id %u is not a function\n", 2696 func_id); 2697 return -EINVAL; 2698 } 2699 func_proto = btf_type_by_id(desc_btf, func->type); 2700 if (!func_proto || !btf_type_is_func_proto(func_proto)) { 2701 verbose(env, "kernel function btf_id %u does not have a valid func_proto\n", 2702 func_id); 2703 return -EINVAL; 2704 } 2705 2706 func_name = btf_name_by_offset(desc_btf, func->name_off); 2707 addr = kallsyms_lookup_name(func_name); 2708 if (!addr) { 2709 verbose(env, "cannot find address for kernel function %s\n", 2710 func_name); 2711 return -EINVAL; 2712 } 2713 specialize_kfunc(env, func_id, offset, &addr); 2714 2715 if (bpf_jit_supports_far_kfunc_call()) { 2716 call_imm = func_id; 2717 } else { 2718 call_imm = BPF_CALL_IMM(addr); 2719 /* Check whether the relative offset overflows desc->imm */ 2720 if ((unsigned long)(s32)call_imm != call_imm) { 2721 verbose(env, "address of kernel function %s is out of range\n", 2722 func_name); 2723 return -EINVAL; 2724 } 2725 } 2726 2727 if (bpf_dev_bound_kfunc_id(func_id)) { 2728 err = bpf_dev_bound_kfunc_check(&env->log, prog_aux); 2729 if (err) 2730 return err; 2731 } 2732 2733 desc = &tab->descs[tab->nr_descs++]; 2734 desc->func_id = func_id; 2735 desc->imm = call_imm; 2736 desc->offset = offset; 2737 desc->addr = addr; 2738 err = btf_distill_func_proto(&env->log, desc_btf, 2739 func_proto, func_name, 2740 &desc->func_model); 2741 if (!err) 2742 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 2743 kfunc_desc_cmp_by_id_off, NULL); 2744 return err; 2745 } 2746 2747 static int kfunc_desc_cmp_by_imm_off(const void *a, const void *b) 2748 { 2749 const struct bpf_kfunc_desc *d0 = a; 2750 const struct bpf_kfunc_desc *d1 = b; 2751 2752 if (d0->imm != d1->imm) 2753 return d0->imm < d1->imm ? -1 : 1; 2754 if (d0->offset != d1->offset) 2755 return d0->offset < d1->offset ? -1 : 1; 2756 return 0; 2757 } 2758 2759 static void sort_kfunc_descs_by_imm_off(struct bpf_prog *prog) 2760 { 2761 struct bpf_kfunc_desc_tab *tab; 2762 2763 tab = prog->aux->kfunc_tab; 2764 if (!tab) 2765 return; 2766 2767 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 2768 kfunc_desc_cmp_by_imm_off, NULL); 2769 } 2770 2771 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) 2772 { 2773 return !!prog->aux->kfunc_tab; 2774 } 2775 2776 const struct btf_func_model * 2777 bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 2778 const struct bpf_insn *insn) 2779 { 2780 const struct bpf_kfunc_desc desc = { 2781 .imm = insn->imm, 2782 .offset = insn->off, 2783 }; 2784 const struct bpf_kfunc_desc *res; 2785 struct bpf_kfunc_desc_tab *tab; 2786 2787 tab = prog->aux->kfunc_tab; 2788 res = bsearch(&desc, tab->descs, tab->nr_descs, 2789 sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm_off); 2790 2791 return res ? &res->func_model : NULL; 2792 } 2793 2794 static int add_subprog_and_kfunc(struct bpf_verifier_env *env) 2795 { 2796 struct bpf_subprog_info *subprog = env->subprog_info; 2797 struct bpf_insn *insn = env->prog->insnsi; 2798 int i, ret, insn_cnt = env->prog->len; 2799 2800 /* Add entry function. */ 2801 ret = add_subprog(env, 0); 2802 if (ret) 2803 return ret; 2804 2805 for (i = 0; i < insn_cnt; i++, insn++) { 2806 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) && 2807 !bpf_pseudo_kfunc_call(insn)) 2808 continue; 2809 2810 if (!env->bpf_capable) { 2811 verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n"); 2812 return -EPERM; 2813 } 2814 2815 if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn)) 2816 ret = add_subprog(env, i + insn->imm + 1); 2817 else 2818 ret = add_kfunc_call(env, insn->imm, insn->off); 2819 2820 if (ret < 0) 2821 return ret; 2822 } 2823 2824 /* Add a fake 'exit' subprog which could simplify subprog iteration 2825 * logic. 'subprog_cnt' should not be increased. 2826 */ 2827 subprog[env->subprog_cnt].start = insn_cnt; 2828 2829 if (env->log.level & BPF_LOG_LEVEL2) 2830 for (i = 0; i < env->subprog_cnt; i++) 2831 verbose(env, "func#%d @%d\n", i, subprog[i].start); 2832 2833 return 0; 2834 } 2835 2836 static int check_subprogs(struct bpf_verifier_env *env) 2837 { 2838 int i, subprog_start, subprog_end, off, cur_subprog = 0; 2839 struct bpf_subprog_info *subprog = env->subprog_info; 2840 struct bpf_insn *insn = env->prog->insnsi; 2841 int insn_cnt = env->prog->len; 2842 2843 /* now check that all jumps are within the same subprog */ 2844 subprog_start = subprog[cur_subprog].start; 2845 subprog_end = subprog[cur_subprog + 1].start; 2846 for (i = 0; i < insn_cnt; i++) { 2847 u8 code = insn[i].code; 2848 2849 if (code == (BPF_JMP | BPF_CALL) && 2850 insn[i].src_reg == 0 && 2851 insn[i].imm == BPF_FUNC_tail_call) 2852 subprog[cur_subprog].has_tail_call = true; 2853 if (BPF_CLASS(code) == BPF_LD && 2854 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND)) 2855 subprog[cur_subprog].has_ld_abs = true; 2856 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) 2857 goto next; 2858 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) 2859 goto next; 2860 if (code == (BPF_JMP32 | BPF_JA)) 2861 off = i + insn[i].imm + 1; 2862 else 2863 off = i + insn[i].off + 1; 2864 if (off < subprog_start || off >= subprog_end) { 2865 verbose(env, "jump out of range from insn %d to %d\n", i, off); 2866 return -EINVAL; 2867 } 2868 next: 2869 if (i == subprog_end - 1) { 2870 /* to avoid fall-through from one subprog into another 2871 * the last insn of the subprog should be either exit 2872 * or unconditional jump back 2873 */ 2874 if (code != (BPF_JMP | BPF_EXIT) && 2875 code != (BPF_JMP32 | BPF_JA) && 2876 code != (BPF_JMP | BPF_JA)) { 2877 verbose(env, "last insn is not an exit or jmp\n"); 2878 return -EINVAL; 2879 } 2880 subprog_start = subprog_end; 2881 cur_subprog++; 2882 if (cur_subprog < env->subprog_cnt) 2883 subprog_end = subprog[cur_subprog + 1].start; 2884 } 2885 } 2886 return 0; 2887 } 2888 2889 /* Parentage chain of this register (or stack slot) should take care of all 2890 * issues like callee-saved registers, stack slot allocation time, etc. 2891 */ 2892 static int mark_reg_read(struct bpf_verifier_env *env, 2893 const struct bpf_reg_state *state, 2894 struct bpf_reg_state *parent, u8 flag) 2895 { 2896 bool writes = parent == state->parent; /* Observe write marks */ 2897 int cnt = 0; 2898 2899 while (parent) { 2900 /* if read wasn't screened by an earlier write ... */ 2901 if (writes && state->live & REG_LIVE_WRITTEN) 2902 break; 2903 if (parent->live & REG_LIVE_DONE) { 2904 verbose(env, "verifier BUG type %s var_off %lld off %d\n", 2905 reg_type_str(env, parent->type), 2906 parent->var_off.value, parent->off); 2907 return -EFAULT; 2908 } 2909 /* The first condition is more likely to be true than the 2910 * second, checked it first. 2911 */ 2912 if ((parent->live & REG_LIVE_READ) == flag || 2913 parent->live & REG_LIVE_READ64) 2914 /* The parentage chain never changes and 2915 * this parent was already marked as LIVE_READ. 2916 * There is no need to keep walking the chain again and 2917 * keep re-marking all parents as LIVE_READ. 2918 * This case happens when the same register is read 2919 * multiple times without writes into it in-between. 2920 * Also, if parent has the stronger REG_LIVE_READ64 set, 2921 * then no need to set the weak REG_LIVE_READ32. 2922 */ 2923 break; 2924 /* ... then we depend on parent's value */ 2925 parent->live |= flag; 2926 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */ 2927 if (flag == REG_LIVE_READ64) 2928 parent->live &= ~REG_LIVE_READ32; 2929 state = parent; 2930 parent = state->parent; 2931 writes = true; 2932 cnt++; 2933 } 2934 2935 if (env->longest_mark_read_walk < cnt) 2936 env->longest_mark_read_walk = cnt; 2937 return 0; 2938 } 2939 2940 static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 2941 { 2942 struct bpf_func_state *state = func(env, reg); 2943 int spi, ret; 2944 2945 /* For CONST_PTR_TO_DYNPTR, it must have already been done by 2946 * check_reg_arg in check_helper_call and mark_btf_func_reg_size in 2947 * check_kfunc_call. 2948 */ 2949 if (reg->type == CONST_PTR_TO_DYNPTR) 2950 return 0; 2951 spi = dynptr_get_spi(env, reg); 2952 if (spi < 0) 2953 return spi; 2954 /* Caller ensures dynptr is valid and initialized, which means spi is in 2955 * bounds and spi is the first dynptr slot. Simply mark stack slot as 2956 * read. 2957 */ 2958 ret = mark_reg_read(env, &state->stack[spi].spilled_ptr, 2959 state->stack[spi].spilled_ptr.parent, REG_LIVE_READ64); 2960 if (ret) 2961 return ret; 2962 return mark_reg_read(env, &state->stack[spi - 1].spilled_ptr, 2963 state->stack[spi - 1].spilled_ptr.parent, REG_LIVE_READ64); 2964 } 2965 2966 static int mark_iter_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 2967 int spi, int nr_slots) 2968 { 2969 struct bpf_func_state *state = func(env, reg); 2970 int err, i; 2971 2972 for (i = 0; i < nr_slots; i++) { 2973 struct bpf_reg_state *st = &state->stack[spi - i].spilled_ptr; 2974 2975 err = mark_reg_read(env, st, st->parent, REG_LIVE_READ64); 2976 if (err) 2977 return err; 2978 2979 mark_stack_slot_scratched(env, spi - i); 2980 } 2981 2982 return 0; 2983 } 2984 2985 /* This function is supposed to be used by the following 32-bit optimization 2986 * code only. It returns TRUE if the source or destination register operates 2987 * on 64-bit, otherwise return FALSE. 2988 */ 2989 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, 2990 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t) 2991 { 2992 u8 code, class, op; 2993 2994 code = insn->code; 2995 class = BPF_CLASS(code); 2996 op = BPF_OP(code); 2997 if (class == BPF_JMP) { 2998 /* BPF_EXIT for "main" will reach here. Return TRUE 2999 * conservatively. 3000 */ 3001 if (op == BPF_EXIT) 3002 return true; 3003 if (op == BPF_CALL) { 3004 /* BPF to BPF call will reach here because of marking 3005 * caller saved clobber with DST_OP_NO_MARK for which we 3006 * don't care the register def because they are anyway 3007 * marked as NOT_INIT already. 3008 */ 3009 if (insn->src_reg == BPF_PSEUDO_CALL) 3010 return false; 3011 /* Helper call will reach here because of arg type 3012 * check, conservatively return TRUE. 3013 */ 3014 if (t == SRC_OP) 3015 return true; 3016 3017 return false; 3018 } 3019 } 3020 3021 if (class == BPF_ALU64 && op == BPF_END && (insn->imm == 16 || insn->imm == 32)) 3022 return false; 3023 3024 if (class == BPF_ALU64 || class == BPF_JMP || 3025 (class == BPF_ALU && op == BPF_END && insn->imm == 64)) 3026 return true; 3027 3028 if (class == BPF_ALU || class == BPF_JMP32) 3029 return false; 3030 3031 if (class == BPF_LDX) { 3032 if (t != SRC_OP) 3033 return BPF_SIZE(code) == BPF_DW; 3034 /* LDX source must be ptr. */ 3035 return true; 3036 } 3037 3038 if (class == BPF_STX) { 3039 /* BPF_STX (including atomic variants) has multiple source 3040 * operands, one of which is a ptr. Check whether the caller is 3041 * asking about it. 3042 */ 3043 if (t == SRC_OP && reg->type != SCALAR_VALUE) 3044 return true; 3045 return BPF_SIZE(code) == BPF_DW; 3046 } 3047 3048 if (class == BPF_LD) { 3049 u8 mode = BPF_MODE(code); 3050 3051 /* LD_IMM64 */ 3052 if (mode == BPF_IMM) 3053 return true; 3054 3055 /* Both LD_IND and LD_ABS return 32-bit data. */ 3056 if (t != SRC_OP) 3057 return false; 3058 3059 /* Implicit ctx ptr. */ 3060 if (regno == BPF_REG_6) 3061 return true; 3062 3063 /* Explicit source could be any width. */ 3064 return true; 3065 } 3066 3067 if (class == BPF_ST) 3068 /* The only source register for BPF_ST is a ptr. */ 3069 return true; 3070 3071 /* Conservatively return true at default. */ 3072 return true; 3073 } 3074 3075 /* Return the regno defined by the insn, or -1. */ 3076 static int insn_def_regno(const struct bpf_insn *insn) 3077 { 3078 switch (BPF_CLASS(insn->code)) { 3079 case BPF_JMP: 3080 case BPF_JMP32: 3081 case BPF_ST: 3082 return -1; 3083 case BPF_STX: 3084 if (BPF_MODE(insn->code) == BPF_ATOMIC && 3085 (insn->imm & BPF_FETCH)) { 3086 if (insn->imm == BPF_CMPXCHG) 3087 return BPF_REG_0; 3088 else 3089 return insn->src_reg; 3090 } else { 3091 return -1; 3092 } 3093 default: 3094 return insn->dst_reg; 3095 } 3096 } 3097 3098 /* Return TRUE if INSN has defined any 32-bit value explicitly. */ 3099 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn) 3100 { 3101 int dst_reg = insn_def_regno(insn); 3102 3103 if (dst_reg == -1) 3104 return false; 3105 3106 return !is_reg64(env, insn, dst_reg, NULL, DST_OP); 3107 } 3108 3109 static void mark_insn_zext(struct bpf_verifier_env *env, 3110 struct bpf_reg_state *reg) 3111 { 3112 s32 def_idx = reg->subreg_def; 3113 3114 if (def_idx == DEF_NOT_SUBREG) 3115 return; 3116 3117 env->insn_aux_data[def_idx - 1].zext_dst = true; 3118 /* The dst will be zero extended, so won't be sub-register anymore. */ 3119 reg->subreg_def = DEF_NOT_SUBREG; 3120 } 3121 3122 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, 3123 enum reg_arg_type t) 3124 { 3125 struct bpf_verifier_state *vstate = env->cur_state; 3126 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 3127 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; 3128 struct bpf_reg_state *reg, *regs = state->regs; 3129 bool rw64; 3130 3131 if (regno >= MAX_BPF_REG) { 3132 verbose(env, "R%d is invalid\n", regno); 3133 return -EINVAL; 3134 } 3135 3136 mark_reg_scratched(env, regno); 3137 3138 reg = ®s[regno]; 3139 rw64 = is_reg64(env, insn, regno, reg, t); 3140 if (t == SRC_OP) { 3141 /* check whether register used as source operand can be read */ 3142 if (reg->type == NOT_INIT) { 3143 verbose(env, "R%d !read_ok\n", regno); 3144 return -EACCES; 3145 } 3146 /* We don't need to worry about FP liveness because it's read-only */ 3147 if (regno == BPF_REG_FP) 3148 return 0; 3149 3150 if (rw64) 3151 mark_insn_zext(env, reg); 3152 3153 return mark_reg_read(env, reg, reg->parent, 3154 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32); 3155 } else { 3156 /* check whether register used as dest operand can be written to */ 3157 if (regno == BPF_REG_FP) { 3158 verbose(env, "frame pointer is read only\n"); 3159 return -EACCES; 3160 } 3161 reg->live |= REG_LIVE_WRITTEN; 3162 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; 3163 if (t == DST_OP) 3164 mark_reg_unknown(env, regs, regno); 3165 } 3166 return 0; 3167 } 3168 3169 static void mark_jmp_point(struct bpf_verifier_env *env, int idx) 3170 { 3171 env->insn_aux_data[idx].jmp_point = true; 3172 } 3173 3174 static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx) 3175 { 3176 return env->insn_aux_data[insn_idx].jmp_point; 3177 } 3178 3179 /* for any branch, call, exit record the history of jmps in the given state */ 3180 static int push_jmp_history(struct bpf_verifier_env *env, 3181 struct bpf_verifier_state *cur) 3182 { 3183 u32 cnt = cur->jmp_history_cnt; 3184 struct bpf_idx_pair *p; 3185 size_t alloc_size; 3186 3187 if (!is_jmp_point(env, env->insn_idx)) 3188 return 0; 3189 3190 cnt++; 3191 alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p))); 3192 p = krealloc(cur->jmp_history, alloc_size, GFP_USER); 3193 if (!p) 3194 return -ENOMEM; 3195 p[cnt - 1].idx = env->insn_idx; 3196 p[cnt - 1].prev_idx = env->prev_insn_idx; 3197 cur->jmp_history = p; 3198 cur->jmp_history_cnt = cnt; 3199 return 0; 3200 } 3201 3202 /* Backtrack one insn at a time. If idx is not at the top of recorded 3203 * history then previous instruction came from straight line execution. 3204 */ 3205 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, 3206 u32 *history) 3207 { 3208 u32 cnt = *history; 3209 3210 if (cnt && st->jmp_history[cnt - 1].idx == i) { 3211 i = st->jmp_history[cnt - 1].prev_idx; 3212 (*history)--; 3213 } else { 3214 i--; 3215 } 3216 return i; 3217 } 3218 3219 static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn) 3220 { 3221 const struct btf_type *func; 3222 struct btf *desc_btf; 3223 3224 if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL) 3225 return NULL; 3226 3227 desc_btf = find_kfunc_desc_btf(data, insn->off); 3228 if (IS_ERR(desc_btf)) 3229 return "<error>"; 3230 3231 func = btf_type_by_id(desc_btf, insn->imm); 3232 return btf_name_by_offset(desc_btf, func->name_off); 3233 } 3234 3235 static inline void bt_init(struct backtrack_state *bt, u32 frame) 3236 { 3237 bt->frame = frame; 3238 } 3239 3240 static inline void bt_reset(struct backtrack_state *bt) 3241 { 3242 struct bpf_verifier_env *env = bt->env; 3243 3244 memset(bt, 0, sizeof(*bt)); 3245 bt->env = env; 3246 } 3247 3248 static inline u32 bt_empty(struct backtrack_state *bt) 3249 { 3250 u64 mask = 0; 3251 int i; 3252 3253 for (i = 0; i <= bt->frame; i++) 3254 mask |= bt->reg_masks[i] | bt->stack_masks[i]; 3255 3256 return mask == 0; 3257 } 3258 3259 static inline int bt_subprog_enter(struct backtrack_state *bt) 3260 { 3261 if (bt->frame == MAX_CALL_FRAMES - 1) { 3262 verbose(bt->env, "BUG subprog enter from frame %d\n", bt->frame); 3263 WARN_ONCE(1, "verifier backtracking bug"); 3264 return -EFAULT; 3265 } 3266 bt->frame++; 3267 return 0; 3268 } 3269 3270 static inline int bt_subprog_exit(struct backtrack_state *bt) 3271 { 3272 if (bt->frame == 0) { 3273 verbose(bt->env, "BUG subprog exit from frame 0\n"); 3274 WARN_ONCE(1, "verifier backtracking bug"); 3275 return -EFAULT; 3276 } 3277 bt->frame--; 3278 return 0; 3279 } 3280 3281 static inline void bt_set_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg) 3282 { 3283 bt->reg_masks[frame] |= 1 << reg; 3284 } 3285 3286 static inline void bt_clear_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg) 3287 { 3288 bt->reg_masks[frame] &= ~(1 << reg); 3289 } 3290 3291 static inline void bt_set_reg(struct backtrack_state *bt, u32 reg) 3292 { 3293 bt_set_frame_reg(bt, bt->frame, reg); 3294 } 3295 3296 static inline void bt_clear_reg(struct backtrack_state *bt, u32 reg) 3297 { 3298 bt_clear_frame_reg(bt, bt->frame, reg); 3299 } 3300 3301 static inline void bt_set_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot) 3302 { 3303 bt->stack_masks[frame] |= 1ull << slot; 3304 } 3305 3306 static inline void bt_clear_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot) 3307 { 3308 bt->stack_masks[frame] &= ~(1ull << slot); 3309 } 3310 3311 static inline void bt_set_slot(struct backtrack_state *bt, u32 slot) 3312 { 3313 bt_set_frame_slot(bt, bt->frame, slot); 3314 } 3315 3316 static inline void bt_clear_slot(struct backtrack_state *bt, u32 slot) 3317 { 3318 bt_clear_frame_slot(bt, bt->frame, slot); 3319 } 3320 3321 static inline u32 bt_frame_reg_mask(struct backtrack_state *bt, u32 frame) 3322 { 3323 return bt->reg_masks[frame]; 3324 } 3325 3326 static inline u32 bt_reg_mask(struct backtrack_state *bt) 3327 { 3328 return bt->reg_masks[bt->frame]; 3329 } 3330 3331 static inline u64 bt_frame_stack_mask(struct backtrack_state *bt, u32 frame) 3332 { 3333 return bt->stack_masks[frame]; 3334 } 3335 3336 static inline u64 bt_stack_mask(struct backtrack_state *bt) 3337 { 3338 return bt->stack_masks[bt->frame]; 3339 } 3340 3341 static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg) 3342 { 3343 return bt->reg_masks[bt->frame] & (1 << reg); 3344 } 3345 3346 static inline bool bt_is_slot_set(struct backtrack_state *bt, u32 slot) 3347 { 3348 return bt->stack_masks[bt->frame] & (1ull << slot); 3349 } 3350 3351 /* format registers bitmask, e.g., "r0,r2,r4" for 0x15 mask */ 3352 static void fmt_reg_mask(char *buf, ssize_t buf_sz, u32 reg_mask) 3353 { 3354 DECLARE_BITMAP(mask, 64); 3355 bool first = true; 3356 int i, n; 3357 3358 buf[0] = '\0'; 3359 3360 bitmap_from_u64(mask, reg_mask); 3361 for_each_set_bit(i, mask, 32) { 3362 n = snprintf(buf, buf_sz, "%sr%d", first ? "" : ",", i); 3363 first = false; 3364 buf += n; 3365 buf_sz -= n; 3366 if (buf_sz < 0) 3367 break; 3368 } 3369 } 3370 /* format stack slots bitmask, e.g., "-8,-24,-40" for 0x15 mask */ 3371 static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask) 3372 { 3373 DECLARE_BITMAP(mask, 64); 3374 bool first = true; 3375 int i, n; 3376 3377 buf[0] = '\0'; 3378 3379 bitmap_from_u64(mask, stack_mask); 3380 for_each_set_bit(i, mask, 64) { 3381 n = snprintf(buf, buf_sz, "%s%d", first ? "" : ",", -(i + 1) * 8); 3382 first = false; 3383 buf += n; 3384 buf_sz -= n; 3385 if (buf_sz < 0) 3386 break; 3387 } 3388 } 3389 3390 /* For given verifier state backtrack_insn() is called from the last insn to 3391 * the first insn. Its purpose is to compute a bitmask of registers and 3392 * stack slots that needs precision in the parent verifier state. 3393 * 3394 * @idx is an index of the instruction we are currently processing; 3395 * @subseq_idx is an index of the subsequent instruction that: 3396 * - *would be* executed next, if jump history is viewed in forward order; 3397 * - *was* processed previously during backtracking. 3398 */ 3399 static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, 3400 struct backtrack_state *bt) 3401 { 3402 const struct bpf_insn_cbs cbs = { 3403 .cb_call = disasm_kfunc_name, 3404 .cb_print = verbose, 3405 .private_data = env, 3406 }; 3407 struct bpf_insn *insn = env->prog->insnsi + idx; 3408 u8 class = BPF_CLASS(insn->code); 3409 u8 opcode = BPF_OP(insn->code); 3410 u8 mode = BPF_MODE(insn->code); 3411 u32 dreg = insn->dst_reg; 3412 u32 sreg = insn->src_reg; 3413 u32 spi, i; 3414 3415 if (insn->code == 0) 3416 return 0; 3417 if (env->log.level & BPF_LOG_LEVEL2) { 3418 fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_reg_mask(bt)); 3419 verbose(env, "mark_precise: frame%d: regs=%s ", 3420 bt->frame, env->tmp_str_buf); 3421 fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_stack_mask(bt)); 3422 verbose(env, "stack=%s before ", env->tmp_str_buf); 3423 verbose(env, "%d: ", idx); 3424 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 3425 } 3426 3427 if (class == BPF_ALU || class == BPF_ALU64) { 3428 if (!bt_is_reg_set(bt, dreg)) 3429 return 0; 3430 if (opcode == BPF_MOV) { 3431 if (BPF_SRC(insn->code) == BPF_X) { 3432 /* dreg = sreg or dreg = (s8, s16, s32)sreg 3433 * dreg needs precision after this insn 3434 * sreg needs precision before this insn 3435 */ 3436 bt_clear_reg(bt, dreg); 3437 bt_set_reg(bt, sreg); 3438 } else { 3439 /* dreg = K 3440 * dreg needs precision after this insn. 3441 * Corresponding register is already marked 3442 * as precise=true in this verifier state. 3443 * No further markings in parent are necessary 3444 */ 3445 bt_clear_reg(bt, dreg); 3446 } 3447 } else { 3448 if (BPF_SRC(insn->code) == BPF_X) { 3449 /* dreg += sreg 3450 * both dreg and sreg need precision 3451 * before this insn 3452 */ 3453 bt_set_reg(bt, sreg); 3454 } /* else dreg += K 3455 * dreg still needs precision before this insn 3456 */ 3457 } 3458 } else if (class == BPF_LDX) { 3459 if (!bt_is_reg_set(bt, dreg)) 3460 return 0; 3461 bt_clear_reg(bt, dreg); 3462 3463 /* scalars can only be spilled into stack w/o losing precision. 3464 * Load from any other memory can be zero extended. 3465 * The desire to keep that precision is already indicated 3466 * by 'precise' mark in corresponding register of this state. 3467 * No further tracking necessary. 3468 */ 3469 if (insn->src_reg != BPF_REG_FP) 3470 return 0; 3471 3472 /* dreg = *(u64 *)[fp - off] was a fill from the stack. 3473 * that [fp - off] slot contains scalar that needs to be 3474 * tracked with precision 3475 */ 3476 spi = (-insn->off - 1) / BPF_REG_SIZE; 3477 if (spi >= 64) { 3478 verbose(env, "BUG spi %d\n", spi); 3479 WARN_ONCE(1, "verifier backtracking bug"); 3480 return -EFAULT; 3481 } 3482 bt_set_slot(bt, spi); 3483 } else if (class == BPF_STX || class == BPF_ST) { 3484 if (bt_is_reg_set(bt, dreg)) 3485 /* stx & st shouldn't be using _scalar_ dst_reg 3486 * to access memory. It means backtracking 3487 * encountered a case of pointer subtraction. 3488 */ 3489 return -ENOTSUPP; 3490 /* scalars can only be spilled into stack */ 3491 if (insn->dst_reg != BPF_REG_FP) 3492 return 0; 3493 spi = (-insn->off - 1) / BPF_REG_SIZE; 3494 if (spi >= 64) { 3495 verbose(env, "BUG spi %d\n", spi); 3496 WARN_ONCE(1, "verifier backtracking bug"); 3497 return -EFAULT; 3498 } 3499 if (!bt_is_slot_set(bt, spi)) 3500 return 0; 3501 bt_clear_slot(bt, spi); 3502 if (class == BPF_STX) 3503 bt_set_reg(bt, sreg); 3504 } else if (class == BPF_JMP || class == BPF_JMP32) { 3505 if (bpf_pseudo_call(insn)) { 3506 int subprog_insn_idx, subprog; 3507 3508 subprog_insn_idx = idx + insn->imm + 1; 3509 subprog = find_subprog(env, subprog_insn_idx); 3510 if (subprog < 0) 3511 return -EFAULT; 3512 3513 if (subprog_is_global(env, subprog)) { 3514 /* check that jump history doesn't have any 3515 * extra instructions from subprog; the next 3516 * instruction after call to global subprog 3517 * should be literally next instruction in 3518 * caller program 3519 */ 3520 WARN_ONCE(idx + 1 != subseq_idx, "verifier backtracking bug"); 3521 /* r1-r5 are invalidated after subprog call, 3522 * so for global func call it shouldn't be set 3523 * anymore 3524 */ 3525 if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { 3526 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); 3527 WARN_ONCE(1, "verifier backtracking bug"); 3528 return -EFAULT; 3529 } 3530 /* global subprog always sets R0 */ 3531 bt_clear_reg(bt, BPF_REG_0); 3532 return 0; 3533 } else { 3534 /* static subprog call instruction, which 3535 * means that we are exiting current subprog, 3536 * so only r1-r5 could be still requested as 3537 * precise, r0 and r6-r10 or any stack slot in 3538 * the current frame should be zero by now 3539 */ 3540 if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) { 3541 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); 3542 WARN_ONCE(1, "verifier backtracking bug"); 3543 return -EFAULT; 3544 } 3545 /* we don't track register spills perfectly, 3546 * so fallback to force-precise instead of failing */ 3547 if (bt_stack_mask(bt) != 0) 3548 return -ENOTSUPP; 3549 /* propagate r1-r5 to the caller */ 3550 for (i = BPF_REG_1; i <= BPF_REG_5; i++) { 3551 if (bt_is_reg_set(bt, i)) { 3552 bt_clear_reg(bt, i); 3553 bt_set_frame_reg(bt, bt->frame - 1, i); 3554 } 3555 } 3556 if (bt_subprog_exit(bt)) 3557 return -EFAULT; 3558 return 0; 3559 } 3560 } else if ((bpf_helper_call(insn) && 3561 is_callback_calling_function(insn->imm) && 3562 !is_async_callback_calling_function(insn->imm)) || 3563 (bpf_pseudo_kfunc_call(insn) && is_callback_calling_kfunc(insn->imm))) { 3564 /* callback-calling helper or kfunc call, which means 3565 * we are exiting from subprog, but unlike the subprog 3566 * call handling above, we shouldn't propagate 3567 * precision of r1-r5 (if any requested), as they are 3568 * not actually arguments passed directly to callback 3569 * subprogs 3570 */ 3571 if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) { 3572 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); 3573 WARN_ONCE(1, "verifier backtracking bug"); 3574 return -EFAULT; 3575 } 3576 if (bt_stack_mask(bt) != 0) 3577 return -ENOTSUPP; 3578 /* clear r1-r5 in callback subprog's mask */ 3579 for (i = BPF_REG_1; i <= BPF_REG_5; i++) 3580 bt_clear_reg(bt, i); 3581 if (bt_subprog_exit(bt)) 3582 return -EFAULT; 3583 return 0; 3584 } else if (opcode == BPF_CALL) { 3585 /* kfunc with imm==0 is invalid and fixup_kfunc_call will 3586 * catch this error later. Make backtracking conservative 3587 * with ENOTSUPP. 3588 */ 3589 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0) 3590 return -ENOTSUPP; 3591 /* regular helper call sets R0 */ 3592 bt_clear_reg(bt, BPF_REG_0); 3593 if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { 3594 /* if backtracing was looking for registers R1-R5 3595 * they should have been found already. 3596 */ 3597 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); 3598 WARN_ONCE(1, "verifier backtracking bug"); 3599 return -EFAULT; 3600 } 3601 } else if (opcode == BPF_EXIT) { 3602 bool r0_precise; 3603 3604 if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { 3605 /* if backtracing was looking for registers R1-R5 3606 * they should have been found already. 3607 */ 3608 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); 3609 WARN_ONCE(1, "verifier backtracking bug"); 3610 return -EFAULT; 3611 } 3612 3613 /* BPF_EXIT in subprog or callback always returns 3614 * right after the call instruction, so by checking 3615 * whether the instruction at subseq_idx-1 is subprog 3616 * call or not we can distinguish actual exit from 3617 * *subprog* from exit from *callback*. In the former 3618 * case, we need to propagate r0 precision, if 3619 * necessary. In the former we never do that. 3620 */ 3621 r0_precise = subseq_idx - 1 >= 0 && 3622 bpf_pseudo_call(&env->prog->insnsi[subseq_idx - 1]) && 3623 bt_is_reg_set(bt, BPF_REG_0); 3624 3625 bt_clear_reg(bt, BPF_REG_0); 3626 if (bt_subprog_enter(bt)) 3627 return -EFAULT; 3628 3629 if (r0_precise) 3630 bt_set_reg(bt, BPF_REG_0); 3631 /* r6-r9 and stack slots will stay set in caller frame 3632 * bitmasks until we return back from callee(s) 3633 */ 3634 return 0; 3635 } else if (BPF_SRC(insn->code) == BPF_X) { 3636 if (!bt_is_reg_set(bt, dreg) && !bt_is_reg_set(bt, sreg)) 3637 return 0; 3638 /* dreg <cond> sreg 3639 * Both dreg and sreg need precision before 3640 * this insn. If only sreg was marked precise 3641 * before it would be equally necessary to 3642 * propagate it to dreg. 3643 */ 3644 bt_set_reg(bt, dreg); 3645 bt_set_reg(bt, sreg); 3646 /* else dreg <cond> K 3647 * Only dreg still needs precision before 3648 * this insn, so for the K-based conditional 3649 * there is nothing new to be marked. 3650 */ 3651 } 3652 } else if (class == BPF_LD) { 3653 if (!bt_is_reg_set(bt, dreg)) 3654 return 0; 3655 bt_clear_reg(bt, dreg); 3656 /* It's ld_imm64 or ld_abs or ld_ind. 3657 * For ld_imm64 no further tracking of precision 3658 * into parent is necessary 3659 */ 3660 if (mode == BPF_IND || mode == BPF_ABS) 3661 /* to be analyzed */ 3662 return -ENOTSUPP; 3663 } 3664 return 0; 3665 } 3666 3667 /* the scalar precision tracking algorithm: 3668 * . at the start all registers have precise=false. 3669 * . scalar ranges are tracked as normal through alu and jmp insns. 3670 * . once precise value of the scalar register is used in: 3671 * . ptr + scalar alu 3672 * . if (scalar cond K|scalar) 3673 * . helper_call(.., scalar, ...) where ARG_CONST is expected 3674 * backtrack through the verifier states and mark all registers and 3675 * stack slots with spilled constants that these scalar regisers 3676 * should be precise. 3677 * . during state pruning two registers (or spilled stack slots) 3678 * are equivalent if both are not precise. 3679 * 3680 * Note the verifier cannot simply walk register parentage chain, 3681 * since many different registers and stack slots could have been 3682 * used to compute single precise scalar. 3683 * 3684 * The approach of starting with precise=true for all registers and then 3685 * backtrack to mark a register as not precise when the verifier detects 3686 * that program doesn't care about specific value (e.g., when helper 3687 * takes register as ARG_ANYTHING parameter) is not safe. 3688 * 3689 * It's ok to walk single parentage chain of the verifier states. 3690 * It's possible that this backtracking will go all the way till 1st insn. 3691 * All other branches will be explored for needing precision later. 3692 * 3693 * The backtracking needs to deal with cases like: 3694 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0) 3695 * r9 -= r8 3696 * r5 = r9 3697 * if r5 > 0x79f goto pc+7 3698 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff)) 3699 * r5 += 1 3700 * ... 3701 * call bpf_perf_event_output#25 3702 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO 3703 * 3704 * and this case: 3705 * r6 = 1 3706 * call foo // uses callee's r6 inside to compute r0 3707 * r0 += r6 3708 * if r0 == 0 goto 3709 * 3710 * to track above reg_mask/stack_mask needs to be independent for each frame. 3711 * 3712 * Also if parent's curframe > frame where backtracking started, 3713 * the verifier need to mark registers in both frames, otherwise callees 3714 * may incorrectly prune callers. This is similar to 3715 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences") 3716 * 3717 * For now backtracking falls back into conservative marking. 3718 */ 3719 static void mark_all_scalars_precise(struct bpf_verifier_env *env, 3720 struct bpf_verifier_state *st) 3721 { 3722 struct bpf_func_state *func; 3723 struct bpf_reg_state *reg; 3724 int i, j; 3725 3726 if (env->log.level & BPF_LOG_LEVEL2) { 3727 verbose(env, "mark_precise: frame%d: falling back to forcing all scalars precise\n", 3728 st->curframe); 3729 } 3730 3731 /* big hammer: mark all scalars precise in this path. 3732 * pop_stack may still get !precise scalars. 3733 * We also skip current state and go straight to first parent state, 3734 * because precision markings in current non-checkpointed state are 3735 * not needed. See why in the comment in __mark_chain_precision below. 3736 */ 3737 for (st = st->parent; st; st = st->parent) { 3738 for (i = 0; i <= st->curframe; i++) { 3739 func = st->frame[i]; 3740 for (j = 0; j < BPF_REG_FP; j++) { 3741 reg = &func->regs[j]; 3742 if (reg->type != SCALAR_VALUE || reg->precise) 3743 continue; 3744 reg->precise = true; 3745 if (env->log.level & BPF_LOG_LEVEL2) { 3746 verbose(env, "force_precise: frame%d: forcing r%d to be precise\n", 3747 i, j); 3748 } 3749 } 3750 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { 3751 if (!is_spilled_reg(&func->stack[j])) 3752 continue; 3753 reg = &func->stack[j].spilled_ptr; 3754 if (reg->type != SCALAR_VALUE || reg->precise) 3755 continue; 3756 reg->precise = true; 3757 if (env->log.level & BPF_LOG_LEVEL2) { 3758 verbose(env, "force_precise: frame%d: forcing fp%d to be precise\n", 3759 i, -(j + 1) * 8); 3760 } 3761 } 3762 } 3763 } 3764 } 3765 3766 static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st) 3767 { 3768 struct bpf_func_state *func; 3769 struct bpf_reg_state *reg; 3770 int i, j; 3771 3772 for (i = 0; i <= st->curframe; i++) { 3773 func = st->frame[i]; 3774 for (j = 0; j < BPF_REG_FP; j++) { 3775 reg = &func->regs[j]; 3776 if (reg->type != SCALAR_VALUE) 3777 continue; 3778 reg->precise = false; 3779 } 3780 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { 3781 if (!is_spilled_reg(&func->stack[j])) 3782 continue; 3783 reg = &func->stack[j].spilled_ptr; 3784 if (reg->type != SCALAR_VALUE) 3785 continue; 3786 reg->precise = false; 3787 } 3788 } 3789 } 3790 3791 static bool idset_contains(struct bpf_idset *s, u32 id) 3792 { 3793 u32 i; 3794 3795 for (i = 0; i < s->count; ++i) 3796 if (s->ids[i] == id) 3797 return true; 3798 3799 return false; 3800 } 3801 3802 static int idset_push(struct bpf_idset *s, u32 id) 3803 { 3804 if (WARN_ON_ONCE(s->count >= ARRAY_SIZE(s->ids))) 3805 return -EFAULT; 3806 s->ids[s->count++] = id; 3807 return 0; 3808 } 3809 3810 static void idset_reset(struct bpf_idset *s) 3811 { 3812 s->count = 0; 3813 } 3814 3815 /* Collect a set of IDs for all registers currently marked as precise in env->bt. 3816 * Mark all registers with these IDs as precise. 3817 */ 3818 static int mark_precise_scalar_ids(struct bpf_verifier_env *env, struct bpf_verifier_state *st) 3819 { 3820 struct bpf_idset *precise_ids = &env->idset_scratch; 3821 struct backtrack_state *bt = &env->bt; 3822 struct bpf_func_state *func; 3823 struct bpf_reg_state *reg; 3824 DECLARE_BITMAP(mask, 64); 3825 int i, fr; 3826 3827 idset_reset(precise_ids); 3828 3829 for (fr = bt->frame; fr >= 0; fr--) { 3830 func = st->frame[fr]; 3831 3832 bitmap_from_u64(mask, bt_frame_reg_mask(bt, fr)); 3833 for_each_set_bit(i, mask, 32) { 3834 reg = &func->regs[i]; 3835 if (!reg->id || reg->type != SCALAR_VALUE) 3836 continue; 3837 if (idset_push(precise_ids, reg->id)) 3838 return -EFAULT; 3839 } 3840 3841 bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr)); 3842 for_each_set_bit(i, mask, 64) { 3843 if (i >= func->allocated_stack / BPF_REG_SIZE) 3844 break; 3845 if (!is_spilled_scalar_reg(&func->stack[i])) 3846 continue; 3847 reg = &func->stack[i].spilled_ptr; 3848 if (!reg->id) 3849 continue; 3850 if (idset_push(precise_ids, reg->id)) 3851 return -EFAULT; 3852 } 3853 } 3854 3855 for (fr = 0; fr <= st->curframe; ++fr) { 3856 func = st->frame[fr]; 3857 3858 for (i = BPF_REG_0; i < BPF_REG_10; ++i) { 3859 reg = &func->regs[i]; 3860 if (!reg->id) 3861 continue; 3862 if (!idset_contains(precise_ids, reg->id)) 3863 continue; 3864 bt_set_frame_reg(bt, fr, i); 3865 } 3866 for (i = 0; i < func->allocated_stack / BPF_REG_SIZE; ++i) { 3867 if (!is_spilled_scalar_reg(&func->stack[i])) 3868 continue; 3869 reg = &func->stack[i].spilled_ptr; 3870 if (!reg->id) 3871 continue; 3872 if (!idset_contains(precise_ids, reg->id)) 3873 continue; 3874 bt_set_frame_slot(bt, fr, i); 3875 } 3876 } 3877 3878 return 0; 3879 } 3880 3881 /* 3882 * __mark_chain_precision() backtracks BPF program instruction sequence and 3883 * chain of verifier states making sure that register *regno* (if regno >= 0) 3884 * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked 3885 * SCALARS, as well as any other registers and slots that contribute to 3886 * a tracked state of given registers/stack slots, depending on specific BPF 3887 * assembly instructions (see backtrack_insns() for exact instruction handling 3888 * logic). This backtracking relies on recorded jmp_history and is able to 3889 * traverse entire chain of parent states. This process ends only when all the 3890 * necessary registers/slots and their transitive dependencies are marked as 3891 * precise. 3892 * 3893 * One important and subtle aspect is that precise marks *do not matter* in 3894 * the currently verified state (current state). It is important to understand 3895 * why this is the case. 3896 * 3897 * First, note that current state is the state that is not yet "checkpointed", 3898 * i.e., it is not yet put into env->explored_states, and it has no children 3899 * states as well. It's ephemeral, and can end up either a) being discarded if 3900 * compatible explored state is found at some point or BPF_EXIT instruction is 3901 * reached or b) checkpointed and put into env->explored_states, branching out 3902 * into one or more children states. 3903 * 3904 * In the former case, precise markings in current state are completely 3905 * ignored by state comparison code (see regsafe() for details). Only 3906 * checkpointed ("old") state precise markings are important, and if old 3907 * state's register/slot is precise, regsafe() assumes current state's 3908 * register/slot as precise and checks value ranges exactly and precisely. If 3909 * states turn out to be compatible, current state's necessary precise 3910 * markings and any required parent states' precise markings are enforced 3911 * after the fact with propagate_precision() logic, after the fact. But it's 3912 * important to realize that in this case, even after marking current state 3913 * registers/slots as precise, we immediately discard current state. So what 3914 * actually matters is any of the precise markings propagated into current 3915 * state's parent states, which are always checkpointed (due to b) case above). 3916 * As such, for scenario a) it doesn't matter if current state has precise 3917 * markings set or not. 3918 * 3919 * Now, for the scenario b), checkpointing and forking into child(ren) 3920 * state(s). Note that before current state gets to checkpointing step, any 3921 * processed instruction always assumes precise SCALAR register/slot 3922 * knowledge: if precise value or range is useful to prune jump branch, BPF 3923 * verifier takes this opportunity enthusiastically. Similarly, when 3924 * register's value is used to calculate offset or memory address, exact 3925 * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to 3926 * what we mentioned above about state comparison ignoring precise markings 3927 * during state comparison, BPF verifier ignores and also assumes precise 3928 * markings *at will* during instruction verification process. But as verifier 3929 * assumes precision, it also propagates any precision dependencies across 3930 * parent states, which are not yet finalized, so can be further restricted 3931 * based on new knowledge gained from restrictions enforced by their children 3932 * states. This is so that once those parent states are finalized, i.e., when 3933 * they have no more active children state, state comparison logic in 3934 * is_state_visited() would enforce strict and precise SCALAR ranges, if 3935 * required for correctness. 3936 * 3937 * To build a bit more intuition, note also that once a state is checkpointed, 3938 * the path we took to get to that state is not important. This is crucial 3939 * property for state pruning. When state is checkpointed and finalized at 3940 * some instruction index, it can be correctly and safely used to "short 3941 * circuit" any *compatible* state that reaches exactly the same instruction 3942 * index. I.e., if we jumped to that instruction from a completely different 3943 * code path than original finalized state was derived from, it doesn't 3944 * matter, current state can be discarded because from that instruction 3945 * forward having a compatible state will ensure we will safely reach the 3946 * exit. States describe preconditions for further exploration, but completely 3947 * forget the history of how we got here. 3948 * 3949 * This also means that even if we needed precise SCALAR range to get to 3950 * finalized state, but from that point forward *that same* SCALAR register is 3951 * never used in a precise context (i.e., it's precise value is not needed for 3952 * correctness), it's correct and safe to mark such register as "imprecise" 3953 * (i.e., precise marking set to false). This is what we rely on when we do 3954 * not set precise marking in current state. If no child state requires 3955 * precision for any given SCALAR register, it's safe to dictate that it can 3956 * be imprecise. If any child state does require this register to be precise, 3957 * we'll mark it precise later retroactively during precise markings 3958 * propagation from child state to parent states. 3959 * 3960 * Skipping precise marking setting in current state is a mild version of 3961 * relying on the above observation. But we can utilize this property even 3962 * more aggressively by proactively forgetting any precise marking in the 3963 * current state (which we inherited from the parent state), right before we 3964 * checkpoint it and branch off into new child state. This is done by 3965 * mark_all_scalars_imprecise() to hopefully get more permissive and generic 3966 * finalized states which help in short circuiting more future states. 3967 */ 3968 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno) 3969 { 3970 struct backtrack_state *bt = &env->bt; 3971 struct bpf_verifier_state *st = env->cur_state; 3972 int first_idx = st->first_insn_idx; 3973 int last_idx = env->insn_idx; 3974 int subseq_idx = -1; 3975 struct bpf_func_state *func; 3976 struct bpf_reg_state *reg; 3977 bool skip_first = true; 3978 int i, fr, err; 3979 3980 if (!env->bpf_capable) 3981 return 0; 3982 3983 /* set frame number from which we are starting to backtrack */ 3984 bt_init(bt, env->cur_state->curframe); 3985 3986 /* Do sanity checks against current state of register and/or stack 3987 * slot, but don't set precise flag in current state, as precision 3988 * tracking in the current state is unnecessary. 3989 */ 3990 func = st->frame[bt->frame]; 3991 if (regno >= 0) { 3992 reg = &func->regs[regno]; 3993 if (reg->type != SCALAR_VALUE) { 3994 WARN_ONCE(1, "backtracing misuse"); 3995 return -EFAULT; 3996 } 3997 bt_set_reg(bt, regno); 3998 } 3999 4000 if (bt_empty(bt)) 4001 return 0; 4002 4003 for (;;) { 4004 DECLARE_BITMAP(mask, 64); 4005 u32 history = st->jmp_history_cnt; 4006 4007 if (env->log.level & BPF_LOG_LEVEL2) { 4008 verbose(env, "mark_precise: frame%d: last_idx %d first_idx %d subseq_idx %d \n", 4009 bt->frame, last_idx, first_idx, subseq_idx); 4010 } 4011 4012 /* If some register with scalar ID is marked as precise, 4013 * make sure that all registers sharing this ID are also precise. 4014 * This is needed to estimate effect of find_equal_scalars(). 4015 * Do this at the last instruction of each state, 4016 * bpf_reg_state::id fields are valid for these instructions. 4017 * 4018 * Allows to track precision in situation like below: 4019 * 4020 * r2 = unknown value 4021 * ... 4022 * --- state #0 --- 4023 * ... 4024 * r1 = r2 // r1 and r2 now share the same ID 4025 * ... 4026 * --- state #1 {r1.id = A, r2.id = A} --- 4027 * ... 4028 * if (r2 > 10) goto exit; // find_equal_scalars() assigns range to r1 4029 * ... 4030 * --- state #2 {r1.id = A, r2.id = A} --- 4031 * r3 = r10 4032 * r3 += r1 // need to mark both r1 and r2 4033 */ 4034 if (mark_precise_scalar_ids(env, st)) 4035 return -EFAULT; 4036 4037 if (last_idx < 0) { 4038 /* we are at the entry into subprog, which 4039 * is expected for global funcs, but only if 4040 * requested precise registers are R1-R5 4041 * (which are global func's input arguments) 4042 */ 4043 if (st->curframe == 0 && 4044 st->frame[0]->subprogno > 0 && 4045 st->frame[0]->callsite == BPF_MAIN_FUNC && 4046 bt_stack_mask(bt) == 0 && 4047 (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) == 0) { 4048 bitmap_from_u64(mask, bt_reg_mask(bt)); 4049 for_each_set_bit(i, mask, 32) { 4050 reg = &st->frame[0]->regs[i]; 4051 bt_clear_reg(bt, i); 4052 if (reg->type == SCALAR_VALUE) 4053 reg->precise = true; 4054 } 4055 return 0; 4056 } 4057 4058 verbose(env, "BUG backtracking func entry subprog %d reg_mask %x stack_mask %llx\n", 4059 st->frame[0]->subprogno, bt_reg_mask(bt), bt_stack_mask(bt)); 4060 WARN_ONCE(1, "verifier backtracking bug"); 4061 return -EFAULT; 4062 } 4063 4064 for (i = last_idx;;) { 4065 if (skip_first) { 4066 err = 0; 4067 skip_first = false; 4068 } else { 4069 err = backtrack_insn(env, i, subseq_idx, bt); 4070 } 4071 if (err == -ENOTSUPP) { 4072 mark_all_scalars_precise(env, env->cur_state); 4073 bt_reset(bt); 4074 return 0; 4075 } else if (err) { 4076 return err; 4077 } 4078 if (bt_empty(bt)) 4079 /* Found assignment(s) into tracked register in this state. 4080 * Since this state is already marked, just return. 4081 * Nothing to be tracked further in the parent state. 4082 */ 4083 return 0; 4084 if (i == first_idx) 4085 break; 4086 subseq_idx = i; 4087 i = get_prev_insn_idx(st, i, &history); 4088 if (i >= env->prog->len) { 4089 /* This can happen if backtracking reached insn 0 4090 * and there are still reg_mask or stack_mask 4091 * to backtrack. 4092 * It means the backtracking missed the spot where 4093 * particular register was initialized with a constant. 4094 */ 4095 verbose(env, "BUG backtracking idx %d\n", i); 4096 WARN_ONCE(1, "verifier backtracking bug"); 4097 return -EFAULT; 4098 } 4099 } 4100 st = st->parent; 4101 if (!st) 4102 break; 4103 4104 for (fr = bt->frame; fr >= 0; fr--) { 4105 func = st->frame[fr]; 4106 bitmap_from_u64(mask, bt_frame_reg_mask(bt, fr)); 4107 for_each_set_bit(i, mask, 32) { 4108 reg = &func->regs[i]; 4109 if (reg->type != SCALAR_VALUE) { 4110 bt_clear_frame_reg(bt, fr, i); 4111 continue; 4112 } 4113 if (reg->precise) 4114 bt_clear_frame_reg(bt, fr, i); 4115 else 4116 reg->precise = true; 4117 } 4118 4119 bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr)); 4120 for_each_set_bit(i, mask, 64) { 4121 if (i >= func->allocated_stack / BPF_REG_SIZE) { 4122 /* the sequence of instructions: 4123 * 2: (bf) r3 = r10 4124 * 3: (7b) *(u64 *)(r3 -8) = r0 4125 * 4: (79) r4 = *(u64 *)(r10 -8) 4126 * doesn't contain jmps. It's backtracked 4127 * as a single block. 4128 * During backtracking insn 3 is not recognized as 4129 * stack access, so at the end of backtracking 4130 * stack slot fp-8 is still marked in stack_mask. 4131 * However the parent state may not have accessed 4132 * fp-8 and it's "unallocated" stack space. 4133 * In such case fallback to conservative. 4134 */ 4135 mark_all_scalars_precise(env, env->cur_state); 4136 bt_reset(bt); 4137 return 0; 4138 } 4139 4140 if (!is_spilled_scalar_reg(&func->stack[i])) { 4141 bt_clear_frame_slot(bt, fr, i); 4142 continue; 4143 } 4144 reg = &func->stack[i].spilled_ptr; 4145 if (reg->precise) 4146 bt_clear_frame_slot(bt, fr, i); 4147 else 4148 reg->precise = true; 4149 } 4150 if (env->log.level & BPF_LOG_LEVEL2) { 4151 fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, 4152 bt_frame_reg_mask(bt, fr)); 4153 verbose(env, "mark_precise: frame%d: parent state regs=%s ", 4154 fr, env->tmp_str_buf); 4155 fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, 4156 bt_frame_stack_mask(bt, fr)); 4157 verbose(env, "stack=%s: ", env->tmp_str_buf); 4158 print_verifier_state(env, func, true); 4159 } 4160 } 4161 4162 if (bt_empty(bt)) 4163 return 0; 4164 4165 subseq_idx = first_idx; 4166 last_idx = st->last_insn_idx; 4167 first_idx = st->first_insn_idx; 4168 } 4169 4170 /* if we still have requested precise regs or slots, we missed 4171 * something (e.g., stack access through non-r10 register), so 4172 * fallback to marking all precise 4173 */ 4174 if (!bt_empty(bt)) { 4175 mark_all_scalars_precise(env, env->cur_state); 4176 bt_reset(bt); 4177 } 4178 4179 return 0; 4180 } 4181 4182 int mark_chain_precision(struct bpf_verifier_env *env, int regno) 4183 { 4184 return __mark_chain_precision(env, regno); 4185 } 4186 4187 /* mark_chain_precision_batch() assumes that env->bt is set in the caller to 4188 * desired reg and stack masks across all relevant frames 4189 */ 4190 static int mark_chain_precision_batch(struct bpf_verifier_env *env) 4191 { 4192 return __mark_chain_precision(env, -1); 4193 } 4194 4195 static bool is_spillable_regtype(enum bpf_reg_type type) 4196 { 4197 switch (base_type(type)) { 4198 case PTR_TO_MAP_VALUE: 4199 case PTR_TO_STACK: 4200 case PTR_TO_CTX: 4201 case PTR_TO_PACKET: 4202 case PTR_TO_PACKET_META: 4203 case PTR_TO_PACKET_END: 4204 case PTR_TO_FLOW_KEYS: 4205 case CONST_PTR_TO_MAP: 4206 case PTR_TO_SOCKET: 4207 case PTR_TO_SOCK_COMMON: 4208 case PTR_TO_TCP_SOCK: 4209 case PTR_TO_XDP_SOCK: 4210 case PTR_TO_BTF_ID: 4211 case PTR_TO_BUF: 4212 case PTR_TO_MEM: 4213 case PTR_TO_FUNC: 4214 case PTR_TO_MAP_KEY: 4215 return true; 4216 default: 4217 return false; 4218 } 4219 } 4220 4221 /* Does this register contain a constant zero? */ 4222 static bool register_is_null(struct bpf_reg_state *reg) 4223 { 4224 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); 4225 } 4226 4227 static bool register_is_const(struct bpf_reg_state *reg) 4228 { 4229 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off); 4230 } 4231 4232 static bool __is_scalar_unbounded(struct bpf_reg_state *reg) 4233 { 4234 return tnum_is_unknown(reg->var_off) && 4235 reg->smin_value == S64_MIN && reg->smax_value == S64_MAX && 4236 reg->umin_value == 0 && reg->umax_value == U64_MAX && 4237 reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX && 4238 reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX; 4239 } 4240 4241 static bool register_is_bounded(struct bpf_reg_state *reg) 4242 { 4243 return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg); 4244 } 4245 4246 static bool __is_pointer_value(bool allow_ptr_leaks, 4247 const struct bpf_reg_state *reg) 4248 { 4249 if (allow_ptr_leaks) 4250 return false; 4251 4252 return reg->type != SCALAR_VALUE; 4253 } 4254 4255 /* Copy src state preserving dst->parent and dst->live fields */ 4256 static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src) 4257 { 4258 struct bpf_reg_state *parent = dst->parent; 4259 enum bpf_reg_liveness live = dst->live; 4260 4261 *dst = *src; 4262 dst->parent = parent; 4263 dst->live = live; 4264 } 4265 4266 static void save_register_state(struct bpf_func_state *state, 4267 int spi, struct bpf_reg_state *reg, 4268 int size) 4269 { 4270 int i; 4271 4272 copy_register_state(&state->stack[spi].spilled_ptr, reg); 4273 if (size == BPF_REG_SIZE) 4274 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 4275 4276 for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) 4277 state->stack[spi].slot_type[i - 1] = STACK_SPILL; 4278 4279 /* size < 8 bytes spill */ 4280 for (; i; i--) 4281 scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]); 4282 } 4283 4284 static bool is_bpf_st_mem(struct bpf_insn *insn) 4285 { 4286 return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM; 4287 } 4288 4289 /* check_stack_{read,write}_fixed_off functions track spill/fill of registers, 4290 * stack boundary and alignment are checked in check_mem_access() 4291 */ 4292 static int check_stack_write_fixed_off(struct bpf_verifier_env *env, 4293 /* stack frame we're writing to */ 4294 struct bpf_func_state *state, 4295 int off, int size, int value_regno, 4296 int insn_idx) 4297 { 4298 struct bpf_func_state *cur; /* state of the current function */ 4299 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; 4300 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; 4301 struct bpf_reg_state *reg = NULL; 4302 u32 dst_reg = insn->dst_reg; 4303 4304 err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE)); 4305 if (err) 4306 return err; 4307 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 4308 * so it's aligned access and [off, off + size) are within stack limits 4309 */ 4310 if (!env->allow_ptr_leaks && 4311 state->stack[spi].slot_type[0] == STACK_SPILL && 4312 size != BPF_REG_SIZE) { 4313 verbose(env, "attempt to corrupt spilled pointer on stack\n"); 4314 return -EACCES; 4315 } 4316 4317 cur = env->cur_state->frame[env->cur_state->curframe]; 4318 if (value_regno >= 0) 4319 reg = &cur->regs[value_regno]; 4320 if (!env->bypass_spec_v4) { 4321 bool sanitize = reg && is_spillable_regtype(reg->type); 4322 4323 for (i = 0; i < size; i++) { 4324 u8 type = state->stack[spi].slot_type[i]; 4325 4326 if (type != STACK_MISC && type != STACK_ZERO) { 4327 sanitize = true; 4328 break; 4329 } 4330 } 4331 4332 if (sanitize) 4333 env->insn_aux_data[insn_idx].sanitize_stack_spill = true; 4334 } 4335 4336 err = destroy_if_dynptr_stack_slot(env, state, spi); 4337 if (err) 4338 return err; 4339 4340 mark_stack_slot_scratched(env, spi); 4341 if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && 4342 !register_is_null(reg) && env->bpf_capable) { 4343 if (dst_reg != BPF_REG_FP) { 4344 /* The backtracking logic can only recognize explicit 4345 * stack slot address like [fp - 8]. Other spill of 4346 * scalar via different register has to be conservative. 4347 * Backtrack from here and mark all registers as precise 4348 * that contributed into 'reg' being a constant. 4349 */ 4350 err = mark_chain_precision(env, value_regno); 4351 if (err) 4352 return err; 4353 } 4354 save_register_state(state, spi, reg, size); 4355 /* Break the relation on a narrowing spill. */ 4356 if (fls64(reg->umax_value) > BITS_PER_BYTE * size) 4357 state->stack[spi].spilled_ptr.id = 0; 4358 } else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) && 4359 insn->imm != 0 && env->bpf_capable) { 4360 struct bpf_reg_state fake_reg = {}; 4361 4362 __mark_reg_known(&fake_reg, (u32)insn->imm); 4363 fake_reg.type = SCALAR_VALUE; 4364 save_register_state(state, spi, &fake_reg, size); 4365 } else if (reg && is_spillable_regtype(reg->type)) { 4366 /* register containing pointer is being spilled into stack */ 4367 if (size != BPF_REG_SIZE) { 4368 verbose_linfo(env, insn_idx, "; "); 4369 verbose(env, "invalid size of register spill\n"); 4370 return -EACCES; 4371 } 4372 if (state != cur && reg->type == PTR_TO_STACK) { 4373 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); 4374 return -EINVAL; 4375 } 4376 save_register_state(state, spi, reg, size); 4377 } else { 4378 u8 type = STACK_MISC; 4379 4380 /* regular write of data into stack destroys any spilled ptr */ 4381 state->stack[spi].spilled_ptr.type = NOT_INIT; 4382 /* Mark slots as STACK_MISC if they belonged to spilled ptr/dynptr/iter. */ 4383 if (is_stack_slot_special(&state->stack[spi])) 4384 for (i = 0; i < BPF_REG_SIZE; i++) 4385 scrub_spilled_slot(&state->stack[spi].slot_type[i]); 4386 4387 /* only mark the slot as written if all 8 bytes were written 4388 * otherwise read propagation may incorrectly stop too soon 4389 * when stack slots are partially written. 4390 * This heuristic means that read propagation will be 4391 * conservative, since it will add reg_live_read marks 4392 * to stack slots all the way to first state when programs 4393 * writes+reads less than 8 bytes 4394 */ 4395 if (size == BPF_REG_SIZE) 4396 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 4397 4398 /* when we zero initialize stack slots mark them as such */ 4399 if ((reg && register_is_null(reg)) || 4400 (!reg && is_bpf_st_mem(insn) && insn->imm == 0)) { 4401 /* backtracking doesn't work for STACK_ZERO yet. */ 4402 err = mark_chain_precision(env, value_regno); 4403 if (err) 4404 return err; 4405 type = STACK_ZERO; 4406 } 4407 4408 /* Mark slots affected by this stack write. */ 4409 for (i = 0; i < size; i++) 4410 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = 4411 type; 4412 } 4413 return 0; 4414 } 4415 4416 /* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is 4417 * known to contain a variable offset. 4418 * This function checks whether the write is permitted and conservatively 4419 * tracks the effects of the write, considering that each stack slot in the 4420 * dynamic range is potentially written to. 4421 * 4422 * 'off' includes 'regno->off'. 4423 * 'value_regno' can be -1, meaning that an unknown value is being written to 4424 * the stack. 4425 * 4426 * Spilled pointers in range are not marked as written because we don't know 4427 * what's going to be actually written. This means that read propagation for 4428 * future reads cannot be terminated by this write. 4429 * 4430 * For privileged programs, uninitialized stack slots are considered 4431 * initialized by this write (even though we don't know exactly what offsets 4432 * are going to be written to). The idea is that we don't want the verifier to 4433 * reject future reads that access slots written to through variable offsets. 4434 */ 4435 static int check_stack_write_var_off(struct bpf_verifier_env *env, 4436 /* func where register points to */ 4437 struct bpf_func_state *state, 4438 int ptr_regno, int off, int size, 4439 int value_regno, int insn_idx) 4440 { 4441 struct bpf_func_state *cur; /* state of the current function */ 4442 int min_off, max_off; 4443 int i, err; 4444 struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL; 4445 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; 4446 bool writing_zero = false; 4447 /* set if the fact that we're writing a zero is used to let any 4448 * stack slots remain STACK_ZERO 4449 */ 4450 bool zero_used = false; 4451 4452 cur = env->cur_state->frame[env->cur_state->curframe]; 4453 ptr_reg = &cur->regs[ptr_regno]; 4454 min_off = ptr_reg->smin_value + off; 4455 max_off = ptr_reg->smax_value + off + size; 4456 if (value_regno >= 0) 4457 value_reg = &cur->regs[value_regno]; 4458 if ((value_reg && register_is_null(value_reg)) || 4459 (!value_reg && is_bpf_st_mem(insn) && insn->imm == 0)) 4460 writing_zero = true; 4461 4462 err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE)); 4463 if (err) 4464 return err; 4465 4466 for (i = min_off; i < max_off; i++) { 4467 int spi; 4468 4469 spi = __get_spi(i); 4470 err = destroy_if_dynptr_stack_slot(env, state, spi); 4471 if (err) 4472 return err; 4473 } 4474 4475 /* Variable offset writes destroy any spilled pointers in range. */ 4476 for (i = min_off; i < max_off; i++) { 4477 u8 new_type, *stype; 4478 int slot, spi; 4479 4480 slot = -i - 1; 4481 spi = slot / BPF_REG_SIZE; 4482 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 4483 mark_stack_slot_scratched(env, spi); 4484 4485 if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) { 4486 /* Reject the write if range we may write to has not 4487 * been initialized beforehand. If we didn't reject 4488 * here, the ptr status would be erased below (even 4489 * though not all slots are actually overwritten), 4490 * possibly opening the door to leaks. 4491 * 4492 * We do however catch STACK_INVALID case below, and 4493 * only allow reading possibly uninitialized memory 4494 * later for CAP_PERFMON, as the write may not happen to 4495 * that slot. 4496 */ 4497 verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d", 4498 insn_idx, i); 4499 return -EINVAL; 4500 } 4501 4502 /* Erase all spilled pointers. */ 4503 state->stack[spi].spilled_ptr.type = NOT_INIT; 4504 4505 /* Update the slot type. */ 4506 new_type = STACK_MISC; 4507 if (writing_zero && *stype == STACK_ZERO) { 4508 new_type = STACK_ZERO; 4509 zero_used = true; 4510 } 4511 /* If the slot is STACK_INVALID, we check whether it's OK to 4512 * pretend that it will be initialized by this write. The slot 4513 * might not actually be written to, and so if we mark it as 4514 * initialized future reads might leak uninitialized memory. 4515 * For privileged programs, we will accept such reads to slots 4516 * that may or may not be written because, if we're reject 4517 * them, the error would be too confusing. 4518 */ 4519 if (*stype == STACK_INVALID && !env->allow_uninit_stack) { 4520 verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d", 4521 insn_idx, i); 4522 return -EINVAL; 4523 } 4524 *stype = new_type; 4525 } 4526 if (zero_used) { 4527 /* backtracking doesn't work for STACK_ZERO yet. */ 4528 err = mark_chain_precision(env, value_regno); 4529 if (err) 4530 return err; 4531 } 4532 return 0; 4533 } 4534 4535 /* When register 'dst_regno' is assigned some values from stack[min_off, 4536 * max_off), we set the register's type according to the types of the 4537 * respective stack slots. If all the stack values are known to be zeros, then 4538 * so is the destination reg. Otherwise, the register is considered to be 4539 * SCALAR. This function does not deal with register filling; the caller must 4540 * ensure that all spilled registers in the stack range have been marked as 4541 * read. 4542 */ 4543 static void mark_reg_stack_read(struct bpf_verifier_env *env, 4544 /* func where src register points to */ 4545 struct bpf_func_state *ptr_state, 4546 int min_off, int max_off, int dst_regno) 4547 { 4548 struct bpf_verifier_state *vstate = env->cur_state; 4549 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 4550 int i, slot, spi; 4551 u8 *stype; 4552 int zeros = 0; 4553 4554 for (i = min_off; i < max_off; i++) { 4555 slot = -i - 1; 4556 spi = slot / BPF_REG_SIZE; 4557 mark_stack_slot_scratched(env, spi); 4558 stype = ptr_state->stack[spi].slot_type; 4559 if (stype[slot % BPF_REG_SIZE] != STACK_ZERO) 4560 break; 4561 zeros++; 4562 } 4563 if (zeros == max_off - min_off) { 4564 /* any access_size read into register is zero extended, 4565 * so the whole register == const_zero 4566 */ 4567 __mark_reg_const_zero(&state->regs[dst_regno]); 4568 /* backtracking doesn't support STACK_ZERO yet, 4569 * so mark it precise here, so that later 4570 * backtracking can stop here. 4571 * Backtracking may not need this if this register 4572 * doesn't participate in pointer adjustment. 4573 * Forward propagation of precise flag is not 4574 * necessary either. This mark is only to stop 4575 * backtracking. Any register that contributed 4576 * to const 0 was marked precise before spill. 4577 */ 4578 state->regs[dst_regno].precise = true; 4579 } else { 4580 /* have read misc data from the stack */ 4581 mark_reg_unknown(env, state->regs, dst_regno); 4582 } 4583 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; 4584 } 4585 4586 /* Read the stack at 'off' and put the results into the register indicated by 4587 * 'dst_regno'. It handles reg filling if the addressed stack slot is a 4588 * spilled reg. 4589 * 4590 * 'dst_regno' can be -1, meaning that the read value is not going to a 4591 * register. 4592 * 4593 * The access is assumed to be within the current stack bounds. 4594 */ 4595 static int check_stack_read_fixed_off(struct bpf_verifier_env *env, 4596 /* func where src register points to */ 4597 struct bpf_func_state *reg_state, 4598 int off, int size, int dst_regno) 4599 { 4600 struct bpf_verifier_state *vstate = env->cur_state; 4601 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 4602 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; 4603 struct bpf_reg_state *reg; 4604 u8 *stype, type; 4605 4606 stype = reg_state->stack[spi].slot_type; 4607 reg = ®_state->stack[spi].spilled_ptr; 4608 4609 mark_stack_slot_scratched(env, spi); 4610 4611 if (is_spilled_reg(®_state->stack[spi])) { 4612 u8 spill_size = 1; 4613 4614 for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--) 4615 spill_size++; 4616 4617 if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) { 4618 if (reg->type != SCALAR_VALUE) { 4619 verbose_linfo(env, env->insn_idx, "; "); 4620 verbose(env, "invalid size of register fill\n"); 4621 return -EACCES; 4622 } 4623 4624 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 4625 if (dst_regno < 0) 4626 return 0; 4627 4628 if (!(off % BPF_REG_SIZE) && size == spill_size) { 4629 /* The earlier check_reg_arg() has decided the 4630 * subreg_def for this insn. Save it first. 4631 */ 4632 s32 subreg_def = state->regs[dst_regno].subreg_def; 4633 4634 copy_register_state(&state->regs[dst_regno], reg); 4635 state->regs[dst_regno].subreg_def = subreg_def; 4636 } else { 4637 for (i = 0; i < size; i++) { 4638 type = stype[(slot - i) % BPF_REG_SIZE]; 4639 if (type == STACK_SPILL) 4640 continue; 4641 if (type == STACK_MISC) 4642 continue; 4643 if (type == STACK_INVALID && env->allow_uninit_stack) 4644 continue; 4645 verbose(env, "invalid read from stack off %d+%d size %d\n", 4646 off, i, size); 4647 return -EACCES; 4648 } 4649 mark_reg_unknown(env, state->regs, dst_regno); 4650 } 4651 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; 4652 return 0; 4653 } 4654 4655 if (dst_regno >= 0) { 4656 /* restore register state from stack */ 4657 copy_register_state(&state->regs[dst_regno], reg); 4658 /* mark reg as written since spilled pointer state likely 4659 * has its liveness marks cleared by is_state_visited() 4660 * which resets stack/reg liveness for state transitions 4661 */ 4662 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; 4663 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { 4664 /* If dst_regno==-1, the caller is asking us whether 4665 * it is acceptable to use this value as a SCALAR_VALUE 4666 * (e.g. for XADD). 4667 * We must not allow unprivileged callers to do that 4668 * with spilled pointers. 4669 */ 4670 verbose(env, "leaking pointer from stack off %d\n", 4671 off); 4672 return -EACCES; 4673 } 4674 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 4675 } else { 4676 for (i = 0; i < size; i++) { 4677 type = stype[(slot - i) % BPF_REG_SIZE]; 4678 if (type == STACK_MISC) 4679 continue; 4680 if (type == STACK_ZERO) 4681 continue; 4682 if (type == STACK_INVALID && env->allow_uninit_stack) 4683 continue; 4684 verbose(env, "invalid read from stack off %d+%d size %d\n", 4685 off, i, size); 4686 return -EACCES; 4687 } 4688 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 4689 if (dst_regno >= 0) 4690 mark_reg_stack_read(env, reg_state, off, off + size, dst_regno); 4691 } 4692 return 0; 4693 } 4694 4695 enum bpf_access_src { 4696 ACCESS_DIRECT = 1, /* the access is performed by an instruction */ 4697 ACCESS_HELPER = 2, /* the access is performed by a helper */ 4698 }; 4699 4700 static int check_stack_range_initialized(struct bpf_verifier_env *env, 4701 int regno, int off, int access_size, 4702 bool zero_size_allowed, 4703 enum bpf_access_src type, 4704 struct bpf_call_arg_meta *meta); 4705 4706 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) 4707 { 4708 return cur_regs(env) + regno; 4709 } 4710 4711 /* Read the stack at 'ptr_regno + off' and put the result into the register 4712 * 'dst_regno'. 4713 * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'), 4714 * but not its variable offset. 4715 * 'size' is assumed to be <= reg size and the access is assumed to be aligned. 4716 * 4717 * As opposed to check_stack_read_fixed_off, this function doesn't deal with 4718 * filling registers (i.e. reads of spilled register cannot be detected when 4719 * the offset is not fixed). We conservatively mark 'dst_regno' as containing 4720 * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable 4721 * offset; for a fixed offset check_stack_read_fixed_off should be used 4722 * instead. 4723 */ 4724 static int check_stack_read_var_off(struct bpf_verifier_env *env, 4725 int ptr_regno, int off, int size, int dst_regno) 4726 { 4727 /* The state of the source register. */ 4728 struct bpf_reg_state *reg = reg_state(env, ptr_regno); 4729 struct bpf_func_state *ptr_state = func(env, reg); 4730 int err; 4731 int min_off, max_off; 4732 4733 /* Note that we pass a NULL meta, so raw access will not be permitted. 4734 */ 4735 err = check_stack_range_initialized(env, ptr_regno, off, size, 4736 false, ACCESS_DIRECT, NULL); 4737 if (err) 4738 return err; 4739 4740 min_off = reg->smin_value + off; 4741 max_off = reg->smax_value + off; 4742 mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno); 4743 return 0; 4744 } 4745 4746 /* check_stack_read dispatches to check_stack_read_fixed_off or 4747 * check_stack_read_var_off. 4748 * 4749 * The caller must ensure that the offset falls within the allocated stack 4750 * bounds. 4751 * 4752 * 'dst_regno' is a register which will receive the value from the stack. It 4753 * can be -1, meaning that the read value is not going to a register. 4754 */ 4755 static int check_stack_read(struct bpf_verifier_env *env, 4756 int ptr_regno, int off, int size, 4757 int dst_regno) 4758 { 4759 struct bpf_reg_state *reg = reg_state(env, ptr_regno); 4760 struct bpf_func_state *state = func(env, reg); 4761 int err; 4762 /* Some accesses are only permitted with a static offset. */ 4763 bool var_off = !tnum_is_const(reg->var_off); 4764 4765 /* The offset is required to be static when reads don't go to a 4766 * register, in order to not leak pointers (see 4767 * check_stack_read_fixed_off). 4768 */ 4769 if (dst_regno < 0 && var_off) { 4770 char tn_buf[48]; 4771 4772 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 4773 verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n", 4774 tn_buf, off, size); 4775 return -EACCES; 4776 } 4777 /* Variable offset is prohibited for unprivileged mode for simplicity 4778 * since it requires corresponding support in Spectre masking for stack 4779 * ALU. See also retrieve_ptr_limit(). The check in 4780 * check_stack_access_for_ptr_arithmetic() called by 4781 * adjust_ptr_min_max_vals() prevents users from creating stack pointers 4782 * with variable offsets, therefore no check is required here. Further, 4783 * just checking it here would be insufficient as speculative stack 4784 * writes could still lead to unsafe speculative behaviour. 4785 */ 4786 if (!var_off) { 4787 off += reg->var_off.value; 4788 err = check_stack_read_fixed_off(env, state, off, size, 4789 dst_regno); 4790 } else { 4791 /* Variable offset stack reads need more conservative handling 4792 * than fixed offset ones. Note that dst_regno >= 0 on this 4793 * branch. 4794 */ 4795 err = check_stack_read_var_off(env, ptr_regno, off, size, 4796 dst_regno); 4797 } 4798 return err; 4799 } 4800 4801 4802 /* check_stack_write dispatches to check_stack_write_fixed_off or 4803 * check_stack_write_var_off. 4804 * 4805 * 'ptr_regno' is the register used as a pointer into the stack. 4806 * 'off' includes 'ptr_regno->off', but not its variable offset (if any). 4807 * 'value_regno' is the register whose value we're writing to the stack. It can 4808 * be -1, meaning that we're not writing from a register. 4809 * 4810 * The caller must ensure that the offset falls within the maximum stack size. 4811 */ 4812 static int check_stack_write(struct bpf_verifier_env *env, 4813 int ptr_regno, int off, int size, 4814 int value_regno, int insn_idx) 4815 { 4816 struct bpf_reg_state *reg = reg_state(env, ptr_regno); 4817 struct bpf_func_state *state = func(env, reg); 4818 int err; 4819 4820 if (tnum_is_const(reg->var_off)) { 4821 off += reg->var_off.value; 4822 err = check_stack_write_fixed_off(env, state, off, size, 4823 value_regno, insn_idx); 4824 } else { 4825 /* Variable offset stack reads need more conservative handling 4826 * than fixed offset ones. 4827 */ 4828 err = check_stack_write_var_off(env, state, 4829 ptr_regno, off, size, 4830 value_regno, insn_idx); 4831 } 4832 return err; 4833 } 4834 4835 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno, 4836 int off, int size, enum bpf_access_type type) 4837 { 4838 struct bpf_reg_state *regs = cur_regs(env); 4839 struct bpf_map *map = regs[regno].map_ptr; 4840 u32 cap = bpf_map_flags_to_cap(map); 4841 4842 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) { 4843 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n", 4844 map->value_size, off, size); 4845 return -EACCES; 4846 } 4847 4848 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) { 4849 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n", 4850 map->value_size, off, size); 4851 return -EACCES; 4852 } 4853 4854 return 0; 4855 } 4856 4857 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */ 4858 static int __check_mem_access(struct bpf_verifier_env *env, int regno, 4859 int off, int size, u32 mem_size, 4860 bool zero_size_allowed) 4861 { 4862 bool size_ok = size > 0 || (size == 0 && zero_size_allowed); 4863 struct bpf_reg_state *reg; 4864 4865 if (off >= 0 && size_ok && (u64)off + size <= mem_size) 4866 return 0; 4867 4868 reg = &cur_regs(env)[regno]; 4869 switch (reg->type) { 4870 case PTR_TO_MAP_KEY: 4871 verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n", 4872 mem_size, off, size); 4873 break; 4874 case PTR_TO_MAP_VALUE: 4875 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", 4876 mem_size, off, size); 4877 break; 4878 case PTR_TO_PACKET: 4879 case PTR_TO_PACKET_META: 4880 case PTR_TO_PACKET_END: 4881 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 4882 off, size, regno, reg->id, off, mem_size); 4883 break; 4884 case PTR_TO_MEM: 4885 default: 4886 verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n", 4887 mem_size, off, size); 4888 } 4889 4890 return -EACCES; 4891 } 4892 4893 /* check read/write into a memory region with possible variable offset */ 4894 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno, 4895 int off, int size, u32 mem_size, 4896 bool zero_size_allowed) 4897 { 4898 struct bpf_verifier_state *vstate = env->cur_state; 4899 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 4900 struct bpf_reg_state *reg = &state->regs[regno]; 4901 int err; 4902 4903 /* We may have adjusted the register pointing to memory region, so we 4904 * need to try adding each of min_value and max_value to off 4905 * to make sure our theoretical access will be safe. 4906 * 4907 * The minimum value is only important with signed 4908 * comparisons where we can't assume the floor of a 4909 * value is 0. If we are using signed variables for our 4910 * index'es we need to make sure that whatever we use 4911 * will have a set floor within our range. 4912 */ 4913 if (reg->smin_value < 0 && 4914 (reg->smin_value == S64_MIN || 4915 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || 4916 reg->smin_value + off < 0)) { 4917 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 4918 regno); 4919 return -EACCES; 4920 } 4921 err = __check_mem_access(env, regno, reg->smin_value + off, size, 4922 mem_size, zero_size_allowed); 4923 if (err) { 4924 verbose(env, "R%d min value is outside of the allowed memory range\n", 4925 regno); 4926 return err; 4927 } 4928 4929 /* If we haven't set a max value then we need to bail since we can't be 4930 * sure we won't do bad things. 4931 * If reg->umax_value + off could overflow, treat that as unbounded too. 4932 */ 4933 if (reg->umax_value >= BPF_MAX_VAR_OFF) { 4934 verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n", 4935 regno); 4936 return -EACCES; 4937 } 4938 err = __check_mem_access(env, regno, reg->umax_value + off, size, 4939 mem_size, zero_size_allowed); 4940 if (err) { 4941 verbose(env, "R%d max value is outside of the allowed memory range\n", 4942 regno); 4943 return err; 4944 } 4945 4946 return 0; 4947 } 4948 4949 static int __check_ptr_off_reg(struct bpf_verifier_env *env, 4950 const struct bpf_reg_state *reg, int regno, 4951 bool fixed_off_ok) 4952 { 4953 /* Access to this pointer-typed register or passing it to a helper 4954 * is only allowed in its original, unmodified form. 4955 */ 4956 4957 if (reg->off < 0) { 4958 verbose(env, "negative offset %s ptr R%d off=%d disallowed\n", 4959 reg_type_str(env, reg->type), regno, reg->off); 4960 return -EACCES; 4961 } 4962 4963 if (!fixed_off_ok && reg->off) { 4964 verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n", 4965 reg_type_str(env, reg->type), regno, reg->off); 4966 return -EACCES; 4967 } 4968 4969 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 4970 char tn_buf[48]; 4971 4972 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 4973 verbose(env, "variable %s access var_off=%s disallowed\n", 4974 reg_type_str(env, reg->type), tn_buf); 4975 return -EACCES; 4976 } 4977 4978 return 0; 4979 } 4980 4981 int check_ptr_off_reg(struct bpf_verifier_env *env, 4982 const struct bpf_reg_state *reg, int regno) 4983 { 4984 return __check_ptr_off_reg(env, reg, regno, false); 4985 } 4986 4987 static int map_kptr_match_type(struct bpf_verifier_env *env, 4988 struct btf_field *kptr_field, 4989 struct bpf_reg_state *reg, u32 regno) 4990 { 4991 const char *targ_name = btf_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id); 4992 int perm_flags; 4993 const char *reg_name = ""; 4994 4995 if (btf_is_kernel(reg->btf)) { 4996 perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED | MEM_RCU; 4997 4998 /* Only unreferenced case accepts untrusted pointers */ 4999 if (kptr_field->type == BPF_KPTR_UNREF) 5000 perm_flags |= PTR_UNTRUSTED; 5001 } else { 5002 perm_flags = PTR_MAYBE_NULL | MEM_ALLOC; 5003 } 5004 5005 if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags)) 5006 goto bad_type; 5007 5008 /* We need to verify reg->type and reg->btf, before accessing reg->btf */ 5009 reg_name = btf_type_name(reg->btf, reg->btf_id); 5010 5011 /* For ref_ptr case, release function check should ensure we get one 5012 * referenced PTR_TO_BTF_ID, and that its fixed offset is 0. For the 5013 * normal store of unreferenced kptr, we must ensure var_off is zero. 5014 * Since ref_ptr cannot be accessed directly by BPF insns, checks for 5015 * reg->off and reg->ref_obj_id are not needed here. 5016 */ 5017 if (__check_ptr_off_reg(env, reg, regno, true)) 5018 return -EACCES; 5019 5020 /* A full type match is needed, as BTF can be vmlinux, module or prog BTF, and 5021 * we also need to take into account the reg->off. 5022 * 5023 * We want to support cases like: 5024 * 5025 * struct foo { 5026 * struct bar br; 5027 * struct baz bz; 5028 * }; 5029 * 5030 * struct foo *v; 5031 * v = func(); // PTR_TO_BTF_ID 5032 * val->foo = v; // reg->off is zero, btf and btf_id match type 5033 * val->bar = &v->br; // reg->off is still zero, but we need to retry with 5034 * // first member type of struct after comparison fails 5035 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked 5036 * // to match type 5037 * 5038 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off 5039 * is zero. We must also ensure that btf_struct_ids_match does not walk 5040 * the struct to match type against first member of struct, i.e. reject 5041 * second case from above. Hence, when type is BPF_KPTR_REF, we set 5042 * strict mode to true for type match. 5043 */ 5044 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, 5045 kptr_field->kptr.btf, kptr_field->kptr.btf_id, 5046 kptr_field->type == BPF_KPTR_REF)) 5047 goto bad_type; 5048 return 0; 5049 bad_type: 5050 verbose(env, "invalid kptr access, R%d type=%s%s ", regno, 5051 reg_type_str(env, reg->type), reg_name); 5052 verbose(env, "expected=%s%s", reg_type_str(env, PTR_TO_BTF_ID), targ_name); 5053 if (kptr_field->type == BPF_KPTR_UNREF) 5054 verbose(env, " or %s%s\n", reg_type_str(env, PTR_TO_BTF_ID | PTR_UNTRUSTED), 5055 targ_name); 5056 else 5057 verbose(env, "\n"); 5058 return -EINVAL; 5059 } 5060 5061 /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock() 5062 * can dereference RCU protected pointers and result is PTR_TRUSTED. 5063 */ 5064 static bool in_rcu_cs(struct bpf_verifier_env *env) 5065 { 5066 return env->cur_state->active_rcu_lock || 5067 env->cur_state->active_lock.ptr || 5068 !env->prog->aux->sleepable; 5069 } 5070 5071 /* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */ 5072 BTF_SET_START(rcu_protected_types) 5073 BTF_ID(struct, prog_test_ref_kfunc) 5074 BTF_ID(struct, cgroup) 5075 BTF_ID(struct, bpf_cpumask) 5076 BTF_ID(struct, task_struct) 5077 BTF_SET_END(rcu_protected_types) 5078 5079 static bool rcu_protected_object(const struct btf *btf, u32 btf_id) 5080 { 5081 if (!btf_is_kernel(btf)) 5082 return false; 5083 return btf_id_set_contains(&rcu_protected_types, btf_id); 5084 } 5085 5086 static bool rcu_safe_kptr(const struct btf_field *field) 5087 { 5088 const struct btf_field_kptr *kptr = &field->kptr; 5089 5090 return field->type == BPF_KPTR_REF && rcu_protected_object(kptr->btf, kptr->btf_id); 5091 } 5092 5093 static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno, 5094 int value_regno, int insn_idx, 5095 struct btf_field *kptr_field) 5096 { 5097 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; 5098 int class = BPF_CLASS(insn->code); 5099 struct bpf_reg_state *val_reg; 5100 5101 /* Things we already checked for in check_map_access and caller: 5102 * - Reject cases where variable offset may touch kptr 5103 * - size of access (must be BPF_DW) 5104 * - tnum_is_const(reg->var_off) 5105 * - kptr_field->offset == off + reg->var_off.value 5106 */ 5107 /* Only BPF_[LDX,STX,ST] | BPF_MEM | BPF_DW is supported */ 5108 if (BPF_MODE(insn->code) != BPF_MEM) { 5109 verbose(env, "kptr in map can only be accessed using BPF_MEM instruction mode\n"); 5110 return -EACCES; 5111 } 5112 5113 /* We only allow loading referenced kptr, since it will be marked as 5114 * untrusted, similar to unreferenced kptr. 5115 */ 5116 if (class != BPF_LDX && kptr_field->type == BPF_KPTR_REF) { 5117 verbose(env, "store to referenced kptr disallowed\n"); 5118 return -EACCES; 5119 } 5120 5121 if (class == BPF_LDX) { 5122 val_reg = reg_state(env, value_regno); 5123 /* We can simply mark the value_regno receiving the pointer 5124 * value from map as PTR_TO_BTF_ID, with the correct type. 5125 */ 5126 mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf, 5127 kptr_field->kptr.btf_id, 5128 rcu_safe_kptr(kptr_field) && in_rcu_cs(env) ? 5129 PTR_MAYBE_NULL | MEM_RCU : 5130 PTR_MAYBE_NULL | PTR_UNTRUSTED); 5131 /* For mark_ptr_or_null_reg */ 5132 val_reg->id = ++env->id_gen; 5133 } else if (class == BPF_STX) { 5134 val_reg = reg_state(env, value_regno); 5135 if (!register_is_null(val_reg) && 5136 map_kptr_match_type(env, kptr_field, val_reg, value_regno)) 5137 return -EACCES; 5138 } else if (class == BPF_ST) { 5139 if (insn->imm) { 5140 verbose(env, "BPF_ST imm must be 0 when storing to kptr at off=%u\n", 5141 kptr_field->offset); 5142 return -EACCES; 5143 } 5144 } else { 5145 verbose(env, "kptr in map can only be accessed using BPF_LDX/BPF_STX/BPF_ST\n"); 5146 return -EACCES; 5147 } 5148 return 0; 5149 } 5150 5151 /* check read/write into a map element with possible variable offset */ 5152 static int check_map_access(struct bpf_verifier_env *env, u32 regno, 5153 int off, int size, bool zero_size_allowed, 5154 enum bpf_access_src src) 5155 { 5156 struct bpf_verifier_state *vstate = env->cur_state; 5157 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 5158 struct bpf_reg_state *reg = &state->regs[regno]; 5159 struct bpf_map *map = reg->map_ptr; 5160 struct btf_record *rec; 5161 int err, i; 5162 5163 err = check_mem_region_access(env, regno, off, size, map->value_size, 5164 zero_size_allowed); 5165 if (err) 5166 return err; 5167 5168 if (IS_ERR_OR_NULL(map->record)) 5169 return 0; 5170 rec = map->record; 5171 for (i = 0; i < rec->cnt; i++) { 5172 struct btf_field *field = &rec->fields[i]; 5173 u32 p = field->offset; 5174 5175 /* If any part of a field can be touched by load/store, reject 5176 * this program. To check that [x1, x2) overlaps with [y1, y2), 5177 * it is sufficient to check x1 < y2 && y1 < x2. 5178 */ 5179 if (reg->smin_value + off < p + btf_field_type_size(field->type) && 5180 p < reg->umax_value + off + size) { 5181 switch (field->type) { 5182 case BPF_KPTR_UNREF: 5183 case BPF_KPTR_REF: 5184 if (src != ACCESS_DIRECT) { 5185 verbose(env, "kptr cannot be accessed indirectly by helper\n"); 5186 return -EACCES; 5187 } 5188 if (!tnum_is_const(reg->var_off)) { 5189 verbose(env, "kptr access cannot have variable offset\n"); 5190 return -EACCES; 5191 } 5192 if (p != off + reg->var_off.value) { 5193 verbose(env, "kptr access misaligned expected=%u off=%llu\n", 5194 p, off + reg->var_off.value); 5195 return -EACCES; 5196 } 5197 if (size != bpf_size_to_bytes(BPF_DW)) { 5198 verbose(env, "kptr access size must be BPF_DW\n"); 5199 return -EACCES; 5200 } 5201 break; 5202 default: 5203 verbose(env, "%s cannot be accessed directly by load/store\n", 5204 btf_field_type_name(field->type)); 5205 return -EACCES; 5206 } 5207 } 5208 } 5209 return 0; 5210 } 5211 5212 #define MAX_PACKET_OFF 0xffff 5213 5214 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, 5215 const struct bpf_call_arg_meta *meta, 5216 enum bpf_access_type t) 5217 { 5218 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 5219 5220 switch (prog_type) { 5221 /* Program types only with direct read access go here! */ 5222 case BPF_PROG_TYPE_LWT_IN: 5223 case BPF_PROG_TYPE_LWT_OUT: 5224 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 5225 case BPF_PROG_TYPE_SK_REUSEPORT: 5226 case BPF_PROG_TYPE_FLOW_DISSECTOR: 5227 case BPF_PROG_TYPE_CGROUP_SKB: 5228 if (t == BPF_WRITE) 5229 return false; 5230 fallthrough; 5231 5232 /* Program types with direct read + write access go here! */ 5233 case BPF_PROG_TYPE_SCHED_CLS: 5234 case BPF_PROG_TYPE_SCHED_ACT: 5235 case BPF_PROG_TYPE_XDP: 5236 case BPF_PROG_TYPE_LWT_XMIT: 5237 case BPF_PROG_TYPE_SK_SKB: 5238 case BPF_PROG_TYPE_SK_MSG: 5239 if (meta) 5240 return meta->pkt_access; 5241 5242 env->seen_direct_write = true; 5243 return true; 5244 5245 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 5246 if (t == BPF_WRITE) 5247 env->seen_direct_write = true; 5248 5249 return true; 5250 5251 default: 5252 return false; 5253 } 5254 } 5255 5256 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, 5257 int size, bool zero_size_allowed) 5258 { 5259 struct bpf_reg_state *regs = cur_regs(env); 5260 struct bpf_reg_state *reg = ®s[regno]; 5261 int err; 5262 5263 /* We may have added a variable offset to the packet pointer; but any 5264 * reg->range we have comes after that. We are only checking the fixed 5265 * offset. 5266 */ 5267 5268 /* We don't allow negative numbers, because we aren't tracking enough 5269 * detail to prove they're safe. 5270 */ 5271 if (reg->smin_value < 0) { 5272 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 5273 regno); 5274 return -EACCES; 5275 } 5276 5277 err = reg->range < 0 ? -EINVAL : 5278 __check_mem_access(env, regno, off, size, reg->range, 5279 zero_size_allowed); 5280 if (err) { 5281 verbose(env, "R%d offset is outside of the packet\n", regno); 5282 return err; 5283 } 5284 5285 /* __check_mem_access has made sure "off + size - 1" is within u16. 5286 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, 5287 * otherwise find_good_pkt_pointers would have refused to set range info 5288 * that __check_mem_access would have rejected this pkt access. 5289 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. 5290 */ 5291 env->prog->aux->max_pkt_offset = 5292 max_t(u32, env->prog->aux->max_pkt_offset, 5293 off + reg->umax_value + size - 1); 5294 5295 return err; 5296 } 5297 5298 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ 5299 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, 5300 enum bpf_access_type t, enum bpf_reg_type *reg_type, 5301 struct btf **btf, u32 *btf_id) 5302 { 5303 struct bpf_insn_access_aux info = { 5304 .reg_type = *reg_type, 5305 .log = &env->log, 5306 }; 5307 5308 if (env->ops->is_valid_access && 5309 env->ops->is_valid_access(off, size, t, env->prog, &info)) { 5310 /* A non zero info.ctx_field_size indicates that this field is a 5311 * candidate for later verifier transformation to load the whole 5312 * field and then apply a mask when accessed with a narrower 5313 * access than actual ctx access size. A zero info.ctx_field_size 5314 * will only allow for whole field access and rejects any other 5315 * type of narrower access. 5316 */ 5317 *reg_type = info.reg_type; 5318 5319 if (base_type(*reg_type) == PTR_TO_BTF_ID) { 5320 *btf = info.btf; 5321 *btf_id = info.btf_id; 5322 } else { 5323 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; 5324 } 5325 /* remember the offset of last byte accessed in ctx */ 5326 if (env->prog->aux->max_ctx_offset < off + size) 5327 env->prog->aux->max_ctx_offset = off + size; 5328 return 0; 5329 } 5330 5331 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); 5332 return -EACCES; 5333 } 5334 5335 static int check_flow_keys_access(struct bpf_verifier_env *env, int off, 5336 int size) 5337 { 5338 if (size < 0 || off < 0 || 5339 (u64)off + size > sizeof(struct bpf_flow_keys)) { 5340 verbose(env, "invalid access to flow keys off=%d size=%d\n", 5341 off, size); 5342 return -EACCES; 5343 } 5344 return 0; 5345 } 5346 5347 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, 5348 u32 regno, int off, int size, 5349 enum bpf_access_type t) 5350 { 5351 struct bpf_reg_state *regs = cur_regs(env); 5352 struct bpf_reg_state *reg = ®s[regno]; 5353 struct bpf_insn_access_aux info = {}; 5354 bool valid; 5355 5356 if (reg->smin_value < 0) { 5357 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 5358 regno); 5359 return -EACCES; 5360 } 5361 5362 switch (reg->type) { 5363 case PTR_TO_SOCK_COMMON: 5364 valid = bpf_sock_common_is_valid_access(off, size, t, &info); 5365 break; 5366 case PTR_TO_SOCKET: 5367 valid = bpf_sock_is_valid_access(off, size, t, &info); 5368 break; 5369 case PTR_TO_TCP_SOCK: 5370 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info); 5371 break; 5372 case PTR_TO_XDP_SOCK: 5373 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info); 5374 break; 5375 default: 5376 valid = false; 5377 } 5378 5379 5380 if (valid) { 5381 env->insn_aux_data[insn_idx].ctx_field_size = 5382 info.ctx_field_size; 5383 return 0; 5384 } 5385 5386 verbose(env, "R%d invalid %s access off=%d size=%d\n", 5387 regno, reg_type_str(env, reg->type), off, size); 5388 5389 return -EACCES; 5390 } 5391 5392 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 5393 { 5394 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); 5395 } 5396 5397 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) 5398 { 5399 const struct bpf_reg_state *reg = reg_state(env, regno); 5400 5401 return reg->type == PTR_TO_CTX; 5402 } 5403 5404 static bool is_sk_reg(struct bpf_verifier_env *env, int regno) 5405 { 5406 const struct bpf_reg_state *reg = reg_state(env, regno); 5407 5408 return type_is_sk_pointer(reg->type); 5409 } 5410 5411 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) 5412 { 5413 const struct bpf_reg_state *reg = reg_state(env, regno); 5414 5415 return type_is_pkt_pointer(reg->type); 5416 } 5417 5418 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) 5419 { 5420 const struct bpf_reg_state *reg = reg_state(env, regno); 5421 5422 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ 5423 return reg->type == PTR_TO_FLOW_KEYS; 5424 } 5425 5426 static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = { 5427 #ifdef CONFIG_NET 5428 [PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK], 5429 [PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], 5430 [PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP], 5431 #endif 5432 [CONST_PTR_TO_MAP] = btf_bpf_map_id, 5433 }; 5434 5435 static bool is_trusted_reg(const struct bpf_reg_state *reg) 5436 { 5437 /* A referenced register is always trusted. */ 5438 if (reg->ref_obj_id) 5439 return true; 5440 5441 /* Types listed in the reg2btf_ids are always trusted */ 5442 if (reg2btf_ids[base_type(reg->type)]) 5443 return true; 5444 5445 /* If a register is not referenced, it is trusted if it has the 5446 * MEM_ALLOC or PTR_TRUSTED type modifiers, and no others. Some of the 5447 * other type modifiers may be safe, but we elect to take an opt-in 5448 * approach here as some (e.g. PTR_UNTRUSTED and PTR_MAYBE_NULL) are 5449 * not. 5450 * 5451 * Eventually, we should make PTR_TRUSTED the single source of truth 5452 * for whether a register is trusted. 5453 */ 5454 return type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS && 5455 !bpf_type_has_unsafe_modifiers(reg->type); 5456 } 5457 5458 static bool is_rcu_reg(const struct bpf_reg_state *reg) 5459 { 5460 return reg->type & MEM_RCU; 5461 } 5462 5463 static void clear_trusted_flags(enum bpf_type_flag *flag) 5464 { 5465 *flag &= ~(BPF_REG_TRUSTED_MODIFIERS | MEM_RCU); 5466 } 5467 5468 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, 5469 const struct bpf_reg_state *reg, 5470 int off, int size, bool strict) 5471 { 5472 struct tnum reg_off; 5473 int ip_align; 5474 5475 /* Byte size accesses are always allowed. */ 5476 if (!strict || size == 1) 5477 return 0; 5478 5479 /* For platforms that do not have a Kconfig enabling 5480 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of 5481 * NET_IP_ALIGN is universally set to '2'. And on platforms 5482 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get 5483 * to this code only in strict mode where we want to emulate 5484 * the NET_IP_ALIGN==2 checking. Therefore use an 5485 * unconditional IP align value of '2'. 5486 */ 5487 ip_align = 2; 5488 5489 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); 5490 if (!tnum_is_aligned(reg_off, size)) { 5491 char tn_buf[48]; 5492 5493 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 5494 verbose(env, 5495 "misaligned packet access off %d+%s+%d+%d size %d\n", 5496 ip_align, tn_buf, reg->off, off, size); 5497 return -EACCES; 5498 } 5499 5500 return 0; 5501 } 5502 5503 static int check_generic_ptr_alignment(struct bpf_verifier_env *env, 5504 const struct bpf_reg_state *reg, 5505 const char *pointer_desc, 5506 int off, int size, bool strict) 5507 { 5508 struct tnum reg_off; 5509 5510 /* Byte size accesses are always allowed. */ 5511 if (!strict || size == 1) 5512 return 0; 5513 5514 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); 5515 if (!tnum_is_aligned(reg_off, size)) { 5516 char tn_buf[48]; 5517 5518 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 5519 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", 5520 pointer_desc, tn_buf, reg->off, off, size); 5521 return -EACCES; 5522 } 5523 5524 return 0; 5525 } 5526 5527 static int check_ptr_alignment(struct bpf_verifier_env *env, 5528 const struct bpf_reg_state *reg, int off, 5529 int size, bool strict_alignment_once) 5530 { 5531 bool strict = env->strict_alignment || strict_alignment_once; 5532 const char *pointer_desc = ""; 5533 5534 switch (reg->type) { 5535 case PTR_TO_PACKET: 5536 case PTR_TO_PACKET_META: 5537 /* Special case, because of NET_IP_ALIGN. Given metadata sits 5538 * right in front, treat it the very same way. 5539 */ 5540 return check_pkt_ptr_alignment(env, reg, off, size, strict); 5541 case PTR_TO_FLOW_KEYS: 5542 pointer_desc = "flow keys "; 5543 break; 5544 case PTR_TO_MAP_KEY: 5545 pointer_desc = "key "; 5546 break; 5547 case PTR_TO_MAP_VALUE: 5548 pointer_desc = "value "; 5549 break; 5550 case PTR_TO_CTX: 5551 pointer_desc = "context "; 5552 break; 5553 case PTR_TO_STACK: 5554 pointer_desc = "stack "; 5555 /* The stack spill tracking logic in check_stack_write_fixed_off() 5556 * and check_stack_read_fixed_off() relies on stack accesses being 5557 * aligned. 5558 */ 5559 strict = true; 5560 break; 5561 case PTR_TO_SOCKET: 5562 pointer_desc = "sock "; 5563 break; 5564 case PTR_TO_SOCK_COMMON: 5565 pointer_desc = "sock_common "; 5566 break; 5567 case PTR_TO_TCP_SOCK: 5568 pointer_desc = "tcp_sock "; 5569 break; 5570 case PTR_TO_XDP_SOCK: 5571 pointer_desc = "xdp_sock "; 5572 break; 5573 default: 5574 break; 5575 } 5576 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, 5577 strict); 5578 } 5579 5580 static int update_stack_depth(struct bpf_verifier_env *env, 5581 const struct bpf_func_state *func, 5582 int off) 5583 { 5584 u16 stack = env->subprog_info[func->subprogno].stack_depth; 5585 5586 if (stack >= -off) 5587 return 0; 5588 5589 /* update known max for given subprogram */ 5590 env->subprog_info[func->subprogno].stack_depth = -off; 5591 return 0; 5592 } 5593 5594 /* starting from main bpf function walk all instructions of the function 5595 * and recursively walk all callees that given function can call. 5596 * Ignore jump and exit insns. 5597 * Since recursion is prevented by check_cfg() this algorithm 5598 * only needs a local stack of MAX_CALL_FRAMES to remember callsites 5599 */ 5600 static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx) 5601 { 5602 struct bpf_subprog_info *subprog = env->subprog_info; 5603 struct bpf_insn *insn = env->prog->insnsi; 5604 int depth = 0, frame = 0, i, subprog_end; 5605 bool tail_call_reachable = false; 5606 int ret_insn[MAX_CALL_FRAMES]; 5607 int ret_prog[MAX_CALL_FRAMES]; 5608 int j; 5609 5610 i = subprog[idx].start; 5611 process_func: 5612 /* protect against potential stack overflow that might happen when 5613 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack 5614 * depth for such case down to 256 so that the worst case scenario 5615 * would result in 8k stack size (32 which is tailcall limit * 256 = 5616 * 8k). 5617 * 5618 * To get the idea what might happen, see an example: 5619 * func1 -> sub rsp, 128 5620 * subfunc1 -> sub rsp, 256 5621 * tailcall1 -> add rsp, 256 5622 * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320) 5623 * subfunc2 -> sub rsp, 64 5624 * subfunc22 -> sub rsp, 128 5625 * tailcall2 -> add rsp, 128 5626 * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416) 5627 * 5628 * tailcall will unwind the current stack frame but it will not get rid 5629 * of caller's stack as shown on the example above. 5630 */ 5631 if (idx && subprog[idx].has_tail_call && depth >= 256) { 5632 verbose(env, 5633 "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n", 5634 depth); 5635 return -EACCES; 5636 } 5637 /* round up to 32-bytes, since this is granularity 5638 * of interpreter stack size 5639 */ 5640 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 5641 if (depth > MAX_BPF_STACK) { 5642 verbose(env, "combined stack size of %d calls is %d. Too large\n", 5643 frame + 1, depth); 5644 return -EACCES; 5645 } 5646 continue_func: 5647 subprog_end = subprog[idx + 1].start; 5648 for (; i < subprog_end; i++) { 5649 int next_insn, sidx; 5650 5651 if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i)) 5652 continue; 5653 /* remember insn and function to return to */ 5654 ret_insn[frame] = i + 1; 5655 ret_prog[frame] = idx; 5656 5657 /* find the callee */ 5658 next_insn = i + insn[i].imm + 1; 5659 sidx = find_subprog(env, next_insn); 5660 if (sidx < 0) { 5661 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 5662 next_insn); 5663 return -EFAULT; 5664 } 5665 if (subprog[sidx].is_async_cb) { 5666 if (subprog[sidx].has_tail_call) { 5667 verbose(env, "verifier bug. subprog has tail_call and async cb\n"); 5668 return -EFAULT; 5669 } 5670 /* async callbacks don't increase bpf prog stack size unless called directly */ 5671 if (!bpf_pseudo_call(insn + i)) 5672 continue; 5673 } 5674 i = next_insn; 5675 idx = sidx; 5676 5677 if (subprog[idx].has_tail_call) 5678 tail_call_reachable = true; 5679 5680 frame++; 5681 if (frame >= MAX_CALL_FRAMES) { 5682 verbose(env, "the call stack of %d frames is too deep !\n", 5683 frame); 5684 return -E2BIG; 5685 } 5686 goto process_func; 5687 } 5688 /* if tail call got detected across bpf2bpf calls then mark each of the 5689 * currently present subprog frames as tail call reachable subprogs; 5690 * this info will be utilized by JIT so that we will be preserving the 5691 * tail call counter throughout bpf2bpf calls combined with tailcalls 5692 */ 5693 if (tail_call_reachable) 5694 for (j = 0; j < frame; j++) 5695 subprog[ret_prog[j]].tail_call_reachable = true; 5696 if (subprog[0].tail_call_reachable) 5697 env->prog->aux->tail_call_reachable = true; 5698 5699 /* end of for() loop means the last insn of the 'subprog' 5700 * was reached. Doesn't matter whether it was JA or EXIT 5701 */ 5702 if (frame == 0) 5703 return 0; 5704 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 5705 frame--; 5706 i = ret_insn[frame]; 5707 idx = ret_prog[frame]; 5708 goto continue_func; 5709 } 5710 5711 static int check_max_stack_depth(struct bpf_verifier_env *env) 5712 { 5713 struct bpf_subprog_info *si = env->subprog_info; 5714 int ret; 5715 5716 for (int i = 0; i < env->subprog_cnt; i++) { 5717 if (!i || si[i].is_async_cb) { 5718 ret = check_max_stack_depth_subprog(env, i); 5719 if (ret < 0) 5720 return ret; 5721 } 5722 continue; 5723 } 5724 return 0; 5725 } 5726 5727 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 5728 static int get_callee_stack_depth(struct bpf_verifier_env *env, 5729 const struct bpf_insn *insn, int idx) 5730 { 5731 int start = idx + insn->imm + 1, subprog; 5732 5733 subprog = find_subprog(env, start); 5734 if (subprog < 0) { 5735 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 5736 start); 5737 return -EFAULT; 5738 } 5739 return env->subprog_info[subprog].stack_depth; 5740 } 5741 #endif 5742 5743 static int __check_buffer_access(struct bpf_verifier_env *env, 5744 const char *buf_info, 5745 const struct bpf_reg_state *reg, 5746 int regno, int off, int size) 5747 { 5748 if (off < 0) { 5749 verbose(env, 5750 "R%d invalid %s buffer access: off=%d, size=%d\n", 5751 regno, buf_info, off, size); 5752 return -EACCES; 5753 } 5754 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 5755 char tn_buf[48]; 5756 5757 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 5758 verbose(env, 5759 "R%d invalid variable buffer offset: off=%d, var_off=%s\n", 5760 regno, off, tn_buf); 5761 return -EACCES; 5762 } 5763 5764 return 0; 5765 } 5766 5767 static int check_tp_buffer_access(struct bpf_verifier_env *env, 5768 const struct bpf_reg_state *reg, 5769 int regno, int off, int size) 5770 { 5771 int err; 5772 5773 err = __check_buffer_access(env, "tracepoint", reg, regno, off, size); 5774 if (err) 5775 return err; 5776 5777 if (off + size > env->prog->aux->max_tp_access) 5778 env->prog->aux->max_tp_access = off + size; 5779 5780 return 0; 5781 } 5782 5783 static int check_buffer_access(struct bpf_verifier_env *env, 5784 const struct bpf_reg_state *reg, 5785 int regno, int off, int size, 5786 bool zero_size_allowed, 5787 u32 *max_access) 5788 { 5789 const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr"; 5790 int err; 5791 5792 err = __check_buffer_access(env, buf_info, reg, regno, off, size); 5793 if (err) 5794 return err; 5795 5796 if (off + size > *max_access) 5797 *max_access = off + size; 5798 5799 return 0; 5800 } 5801 5802 /* BPF architecture zero extends alu32 ops into 64-bit registesr */ 5803 static void zext_32_to_64(struct bpf_reg_state *reg) 5804 { 5805 reg->var_off = tnum_subreg(reg->var_off); 5806 __reg_assign_32_into_64(reg); 5807 } 5808 5809 /* truncate register to smaller size (in bytes) 5810 * must be called with size < BPF_REG_SIZE 5811 */ 5812 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) 5813 { 5814 u64 mask; 5815 5816 /* clear high bits in bit representation */ 5817 reg->var_off = tnum_cast(reg->var_off, size); 5818 5819 /* fix arithmetic bounds */ 5820 mask = ((u64)1 << (size * 8)) - 1; 5821 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { 5822 reg->umin_value &= mask; 5823 reg->umax_value &= mask; 5824 } else { 5825 reg->umin_value = 0; 5826 reg->umax_value = mask; 5827 } 5828 reg->smin_value = reg->umin_value; 5829 reg->smax_value = reg->umax_value; 5830 5831 /* If size is smaller than 32bit register the 32bit register 5832 * values are also truncated so we push 64-bit bounds into 5833 * 32-bit bounds. Above were truncated < 32-bits already. 5834 */ 5835 if (size >= 4) 5836 return; 5837 __reg_combine_64_into_32(reg); 5838 } 5839 5840 static void set_sext64_default_val(struct bpf_reg_state *reg, int size) 5841 { 5842 if (size == 1) { 5843 reg->smin_value = reg->s32_min_value = S8_MIN; 5844 reg->smax_value = reg->s32_max_value = S8_MAX; 5845 } else if (size == 2) { 5846 reg->smin_value = reg->s32_min_value = S16_MIN; 5847 reg->smax_value = reg->s32_max_value = S16_MAX; 5848 } else { 5849 /* size == 4 */ 5850 reg->smin_value = reg->s32_min_value = S32_MIN; 5851 reg->smax_value = reg->s32_max_value = S32_MAX; 5852 } 5853 reg->umin_value = reg->u32_min_value = 0; 5854 reg->umax_value = U64_MAX; 5855 reg->u32_max_value = U32_MAX; 5856 reg->var_off = tnum_unknown; 5857 } 5858 5859 static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size) 5860 { 5861 s64 init_s64_max, init_s64_min, s64_max, s64_min, u64_cval; 5862 u64 top_smax_value, top_smin_value; 5863 u64 num_bits = size * 8; 5864 5865 if (tnum_is_const(reg->var_off)) { 5866 u64_cval = reg->var_off.value; 5867 if (size == 1) 5868 reg->var_off = tnum_const((s8)u64_cval); 5869 else if (size == 2) 5870 reg->var_off = tnum_const((s16)u64_cval); 5871 else 5872 /* size == 4 */ 5873 reg->var_off = tnum_const((s32)u64_cval); 5874 5875 u64_cval = reg->var_off.value; 5876 reg->smax_value = reg->smin_value = u64_cval; 5877 reg->umax_value = reg->umin_value = u64_cval; 5878 reg->s32_max_value = reg->s32_min_value = u64_cval; 5879 reg->u32_max_value = reg->u32_min_value = u64_cval; 5880 return; 5881 } 5882 5883 top_smax_value = ((u64)reg->smax_value >> num_bits) << num_bits; 5884 top_smin_value = ((u64)reg->smin_value >> num_bits) << num_bits; 5885 5886 if (top_smax_value != top_smin_value) 5887 goto out; 5888 5889 /* find the s64_min and s64_min after sign extension */ 5890 if (size == 1) { 5891 init_s64_max = (s8)reg->smax_value; 5892 init_s64_min = (s8)reg->smin_value; 5893 } else if (size == 2) { 5894 init_s64_max = (s16)reg->smax_value; 5895 init_s64_min = (s16)reg->smin_value; 5896 } else { 5897 init_s64_max = (s32)reg->smax_value; 5898 init_s64_min = (s32)reg->smin_value; 5899 } 5900 5901 s64_max = max(init_s64_max, init_s64_min); 5902 s64_min = min(init_s64_max, init_s64_min); 5903 5904 /* both of s64_max/s64_min positive or negative */ 5905 if ((s64_max >= 0) == (s64_min >= 0)) { 5906 reg->smin_value = reg->s32_min_value = s64_min; 5907 reg->smax_value = reg->s32_max_value = s64_max; 5908 reg->umin_value = reg->u32_min_value = s64_min; 5909 reg->umax_value = reg->u32_max_value = s64_max; 5910 reg->var_off = tnum_range(s64_min, s64_max); 5911 return; 5912 } 5913 5914 out: 5915 set_sext64_default_val(reg, size); 5916 } 5917 5918 static void set_sext32_default_val(struct bpf_reg_state *reg, int size) 5919 { 5920 if (size == 1) { 5921 reg->s32_min_value = S8_MIN; 5922 reg->s32_max_value = S8_MAX; 5923 } else { 5924 /* size == 2 */ 5925 reg->s32_min_value = S16_MIN; 5926 reg->s32_max_value = S16_MAX; 5927 } 5928 reg->u32_min_value = 0; 5929 reg->u32_max_value = U32_MAX; 5930 } 5931 5932 static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size) 5933 { 5934 s32 init_s32_max, init_s32_min, s32_max, s32_min, u32_val; 5935 u32 top_smax_value, top_smin_value; 5936 u32 num_bits = size * 8; 5937 5938 if (tnum_is_const(reg->var_off)) { 5939 u32_val = reg->var_off.value; 5940 if (size == 1) 5941 reg->var_off = tnum_const((s8)u32_val); 5942 else 5943 reg->var_off = tnum_const((s16)u32_val); 5944 5945 u32_val = reg->var_off.value; 5946 reg->s32_min_value = reg->s32_max_value = u32_val; 5947 reg->u32_min_value = reg->u32_max_value = u32_val; 5948 return; 5949 } 5950 5951 top_smax_value = ((u32)reg->s32_max_value >> num_bits) << num_bits; 5952 top_smin_value = ((u32)reg->s32_min_value >> num_bits) << num_bits; 5953 5954 if (top_smax_value != top_smin_value) 5955 goto out; 5956 5957 /* find the s32_min and s32_min after sign extension */ 5958 if (size == 1) { 5959 init_s32_max = (s8)reg->s32_max_value; 5960 init_s32_min = (s8)reg->s32_min_value; 5961 } else { 5962 /* size == 2 */ 5963 init_s32_max = (s16)reg->s32_max_value; 5964 init_s32_min = (s16)reg->s32_min_value; 5965 } 5966 s32_max = max(init_s32_max, init_s32_min); 5967 s32_min = min(init_s32_max, init_s32_min); 5968 5969 if ((s32_min >= 0) == (s32_max >= 0)) { 5970 reg->s32_min_value = s32_min; 5971 reg->s32_max_value = s32_max; 5972 reg->u32_min_value = (u32)s32_min; 5973 reg->u32_max_value = (u32)s32_max; 5974 return; 5975 } 5976 5977 out: 5978 set_sext32_default_val(reg, size); 5979 } 5980 5981 static bool bpf_map_is_rdonly(const struct bpf_map *map) 5982 { 5983 /* A map is considered read-only if the following condition are true: 5984 * 5985 * 1) BPF program side cannot change any of the map content. The 5986 * BPF_F_RDONLY_PROG flag is throughout the lifetime of a map 5987 * and was set at map creation time. 5988 * 2) The map value(s) have been initialized from user space by a 5989 * loader and then "frozen", such that no new map update/delete 5990 * operations from syscall side are possible for the rest of 5991 * the map's lifetime from that point onwards. 5992 * 3) Any parallel/pending map update/delete operations from syscall 5993 * side have been completed. Only after that point, it's safe to 5994 * assume that map value(s) are immutable. 5995 */ 5996 return (map->map_flags & BPF_F_RDONLY_PROG) && 5997 READ_ONCE(map->frozen) && 5998 !bpf_map_write_active(map); 5999 } 6000 6001 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val, 6002 bool is_ldsx) 6003 { 6004 void *ptr; 6005 u64 addr; 6006 int err; 6007 6008 err = map->ops->map_direct_value_addr(map, &addr, off); 6009 if (err) 6010 return err; 6011 ptr = (void *)(long)addr + off; 6012 6013 switch (size) { 6014 case sizeof(u8): 6015 *val = is_ldsx ? (s64)*(s8 *)ptr : (u64)*(u8 *)ptr; 6016 break; 6017 case sizeof(u16): 6018 *val = is_ldsx ? (s64)*(s16 *)ptr : (u64)*(u16 *)ptr; 6019 break; 6020 case sizeof(u32): 6021 *val = is_ldsx ? (s64)*(s32 *)ptr : (u64)*(u32 *)ptr; 6022 break; 6023 case sizeof(u64): 6024 *val = *(u64 *)ptr; 6025 break; 6026 default: 6027 return -EINVAL; 6028 } 6029 return 0; 6030 } 6031 6032 #define BTF_TYPE_SAFE_RCU(__type) __PASTE(__type, __safe_rcu) 6033 #define BTF_TYPE_SAFE_RCU_OR_NULL(__type) __PASTE(__type, __safe_rcu_or_null) 6034 #define BTF_TYPE_SAFE_TRUSTED(__type) __PASTE(__type, __safe_trusted) 6035 6036 /* 6037 * Allow list few fields as RCU trusted or full trusted. 6038 * This logic doesn't allow mix tagging and will be removed once GCC supports 6039 * btf_type_tag. 6040 */ 6041 6042 /* RCU trusted: these fields are trusted in RCU CS and never NULL */ 6043 BTF_TYPE_SAFE_RCU(struct task_struct) { 6044 const cpumask_t *cpus_ptr; 6045 struct css_set __rcu *cgroups; 6046 struct task_struct __rcu *real_parent; 6047 struct task_struct *group_leader; 6048 }; 6049 6050 BTF_TYPE_SAFE_RCU(struct cgroup) { 6051 /* cgrp->kn is always accessible as documented in kernel/cgroup/cgroup.c */ 6052 struct kernfs_node *kn; 6053 }; 6054 6055 BTF_TYPE_SAFE_RCU(struct css_set) { 6056 struct cgroup *dfl_cgrp; 6057 }; 6058 6059 /* RCU trusted: these fields are trusted in RCU CS and can be NULL */ 6060 BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct) { 6061 struct file __rcu *exe_file; 6062 }; 6063 6064 /* skb->sk, req->sk are not RCU protected, but we mark them as such 6065 * because bpf prog accessible sockets are SOCK_RCU_FREE. 6066 */ 6067 BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff) { 6068 struct sock *sk; 6069 }; 6070 6071 BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock) { 6072 struct sock *sk; 6073 }; 6074 6075 /* full trusted: these fields are trusted even outside of RCU CS and never NULL */ 6076 BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta) { 6077 struct seq_file *seq; 6078 }; 6079 6080 BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task) { 6081 struct bpf_iter_meta *meta; 6082 struct task_struct *task; 6083 }; 6084 6085 BTF_TYPE_SAFE_TRUSTED(struct linux_binprm) { 6086 struct file *file; 6087 }; 6088 6089 BTF_TYPE_SAFE_TRUSTED(struct file) { 6090 struct inode *f_inode; 6091 }; 6092 6093 BTF_TYPE_SAFE_TRUSTED(struct dentry) { 6094 /* no negative dentry-s in places where bpf can see it */ 6095 struct inode *d_inode; 6096 }; 6097 6098 BTF_TYPE_SAFE_TRUSTED(struct socket) { 6099 struct sock *sk; 6100 }; 6101 6102 static bool type_is_rcu(struct bpf_verifier_env *env, 6103 struct bpf_reg_state *reg, 6104 const char *field_name, u32 btf_id) 6105 { 6106 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct task_struct)); 6107 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct cgroup)); 6108 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct css_set)); 6109 6110 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu"); 6111 } 6112 6113 static bool type_is_rcu_or_null(struct bpf_verifier_env *env, 6114 struct bpf_reg_state *reg, 6115 const char *field_name, u32 btf_id) 6116 { 6117 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct)); 6118 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff)); 6119 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock)); 6120 6121 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu_or_null"); 6122 } 6123 6124 static bool type_is_trusted(struct bpf_verifier_env *env, 6125 struct bpf_reg_state *reg, 6126 const char *field_name, u32 btf_id) 6127 { 6128 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta)); 6129 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task)); 6130 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct linux_binprm)); 6131 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct file)); 6132 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct dentry)); 6133 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct socket)); 6134 6135 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_trusted"); 6136 } 6137 6138 static int check_ptr_to_btf_access(struct bpf_verifier_env *env, 6139 struct bpf_reg_state *regs, 6140 int regno, int off, int size, 6141 enum bpf_access_type atype, 6142 int value_regno) 6143 { 6144 struct bpf_reg_state *reg = regs + regno; 6145 const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id); 6146 const char *tname = btf_name_by_offset(reg->btf, t->name_off); 6147 const char *field_name = NULL; 6148 enum bpf_type_flag flag = 0; 6149 u32 btf_id = 0; 6150 int ret; 6151 6152 if (!env->allow_ptr_leaks) { 6153 verbose(env, 6154 "'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n", 6155 tname); 6156 return -EPERM; 6157 } 6158 if (!env->prog->gpl_compatible && btf_is_kernel(reg->btf)) { 6159 verbose(env, 6160 "Cannot access kernel 'struct %s' from non-GPL compatible program\n", 6161 tname); 6162 return -EINVAL; 6163 } 6164 if (off < 0) { 6165 verbose(env, 6166 "R%d is ptr_%s invalid negative access: off=%d\n", 6167 regno, tname, off); 6168 return -EACCES; 6169 } 6170 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 6171 char tn_buf[48]; 6172 6173 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 6174 verbose(env, 6175 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n", 6176 regno, tname, off, tn_buf); 6177 return -EACCES; 6178 } 6179 6180 if (reg->type & MEM_USER) { 6181 verbose(env, 6182 "R%d is ptr_%s access user memory: off=%d\n", 6183 regno, tname, off); 6184 return -EACCES; 6185 } 6186 6187 if (reg->type & MEM_PERCPU) { 6188 verbose(env, 6189 "R%d is ptr_%s access percpu memory: off=%d\n", 6190 regno, tname, off); 6191 return -EACCES; 6192 } 6193 6194 if (env->ops->btf_struct_access && !type_is_alloc(reg->type) && atype == BPF_WRITE) { 6195 if (!btf_is_kernel(reg->btf)) { 6196 verbose(env, "verifier internal error: reg->btf must be kernel btf\n"); 6197 return -EFAULT; 6198 } 6199 ret = env->ops->btf_struct_access(&env->log, reg, off, size); 6200 } else { 6201 /* Writes are permitted with default btf_struct_access for 6202 * program allocated objects (which always have ref_obj_id > 0), 6203 * but not for untrusted PTR_TO_BTF_ID | MEM_ALLOC. 6204 */ 6205 if (atype != BPF_READ && !type_is_ptr_alloc_obj(reg->type)) { 6206 verbose(env, "only read is supported\n"); 6207 return -EACCES; 6208 } 6209 6210 if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) && 6211 !reg->ref_obj_id) { 6212 verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n"); 6213 return -EFAULT; 6214 } 6215 6216 ret = btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag, &field_name); 6217 } 6218 6219 if (ret < 0) 6220 return ret; 6221 6222 if (ret != PTR_TO_BTF_ID) { 6223 /* just mark; */ 6224 6225 } else if (type_flag(reg->type) & PTR_UNTRUSTED) { 6226 /* If this is an untrusted pointer, all pointers formed by walking it 6227 * also inherit the untrusted flag. 6228 */ 6229 flag = PTR_UNTRUSTED; 6230 6231 } else if (is_trusted_reg(reg) || is_rcu_reg(reg)) { 6232 /* By default any pointer obtained from walking a trusted pointer is no 6233 * longer trusted, unless the field being accessed has explicitly been 6234 * marked as inheriting its parent's state of trust (either full or RCU). 6235 * For example: 6236 * 'cgroups' pointer is untrusted if task->cgroups dereference 6237 * happened in a sleepable program outside of bpf_rcu_read_lock() 6238 * section. In a non-sleepable program it's trusted while in RCU CS (aka MEM_RCU). 6239 * Note bpf_rcu_read_unlock() converts MEM_RCU pointers to PTR_UNTRUSTED. 6240 * 6241 * A regular RCU-protected pointer with __rcu tag can also be deemed 6242 * trusted if we are in an RCU CS. Such pointer can be NULL. 6243 */ 6244 if (type_is_trusted(env, reg, field_name, btf_id)) { 6245 flag |= PTR_TRUSTED; 6246 } else if (in_rcu_cs(env) && !type_may_be_null(reg->type)) { 6247 if (type_is_rcu(env, reg, field_name, btf_id)) { 6248 /* ignore __rcu tag and mark it MEM_RCU */ 6249 flag |= MEM_RCU; 6250 } else if (flag & MEM_RCU || 6251 type_is_rcu_or_null(env, reg, field_name, btf_id)) { 6252 /* __rcu tagged pointers can be NULL */ 6253 flag |= MEM_RCU | PTR_MAYBE_NULL; 6254 6255 /* We always trust them */ 6256 if (type_is_rcu_or_null(env, reg, field_name, btf_id) && 6257 flag & PTR_UNTRUSTED) 6258 flag &= ~PTR_UNTRUSTED; 6259 } else if (flag & (MEM_PERCPU | MEM_USER)) { 6260 /* keep as-is */ 6261 } else { 6262 /* walking unknown pointers yields old deprecated PTR_TO_BTF_ID */ 6263 clear_trusted_flags(&flag); 6264 } 6265 } else { 6266 /* 6267 * If not in RCU CS or MEM_RCU pointer can be NULL then 6268 * aggressively mark as untrusted otherwise such 6269 * pointers will be plain PTR_TO_BTF_ID without flags 6270 * and will be allowed to be passed into helpers for 6271 * compat reasons. 6272 */ 6273 flag = PTR_UNTRUSTED; 6274 } 6275 } else { 6276 /* Old compat. Deprecated */ 6277 clear_trusted_flags(&flag); 6278 } 6279 6280 if (atype == BPF_READ && value_regno >= 0) 6281 mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag); 6282 6283 return 0; 6284 } 6285 6286 static int check_ptr_to_map_access(struct bpf_verifier_env *env, 6287 struct bpf_reg_state *regs, 6288 int regno, int off, int size, 6289 enum bpf_access_type atype, 6290 int value_regno) 6291 { 6292 struct bpf_reg_state *reg = regs + regno; 6293 struct bpf_map *map = reg->map_ptr; 6294 struct bpf_reg_state map_reg; 6295 enum bpf_type_flag flag = 0; 6296 const struct btf_type *t; 6297 const char *tname; 6298 u32 btf_id; 6299 int ret; 6300 6301 if (!btf_vmlinux) { 6302 verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n"); 6303 return -ENOTSUPP; 6304 } 6305 6306 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { 6307 verbose(env, "map_ptr access not supported for map type %d\n", 6308 map->map_type); 6309 return -ENOTSUPP; 6310 } 6311 6312 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); 6313 tname = btf_name_by_offset(btf_vmlinux, t->name_off); 6314 6315 if (!env->allow_ptr_leaks) { 6316 verbose(env, 6317 "'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n", 6318 tname); 6319 return -EPERM; 6320 } 6321 6322 if (off < 0) { 6323 verbose(env, "R%d is %s invalid negative access: off=%d\n", 6324 regno, tname, off); 6325 return -EACCES; 6326 } 6327 6328 if (atype != BPF_READ) { 6329 verbose(env, "only read from %s is supported\n", tname); 6330 return -EACCES; 6331 } 6332 6333 /* Simulate access to a PTR_TO_BTF_ID */ 6334 memset(&map_reg, 0, sizeof(map_reg)); 6335 mark_btf_ld_reg(env, &map_reg, 0, PTR_TO_BTF_ID, btf_vmlinux, *map->ops->map_btf_id, 0); 6336 ret = btf_struct_access(&env->log, &map_reg, off, size, atype, &btf_id, &flag, NULL); 6337 if (ret < 0) 6338 return ret; 6339 6340 if (value_regno >= 0) 6341 mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag); 6342 6343 return 0; 6344 } 6345 6346 /* Check that the stack access at the given offset is within bounds. The 6347 * maximum valid offset is -1. 6348 * 6349 * The minimum valid offset is -MAX_BPF_STACK for writes, and 6350 * -state->allocated_stack for reads. 6351 */ 6352 static int check_stack_slot_within_bounds(int off, 6353 struct bpf_func_state *state, 6354 enum bpf_access_type t) 6355 { 6356 int min_valid_off; 6357 6358 if (t == BPF_WRITE) 6359 min_valid_off = -MAX_BPF_STACK; 6360 else 6361 min_valid_off = -state->allocated_stack; 6362 6363 if (off < min_valid_off || off > -1) 6364 return -EACCES; 6365 return 0; 6366 } 6367 6368 /* Check that the stack access at 'regno + off' falls within the maximum stack 6369 * bounds. 6370 * 6371 * 'off' includes `regno->offset`, but not its dynamic part (if any). 6372 */ 6373 static int check_stack_access_within_bounds( 6374 struct bpf_verifier_env *env, 6375 int regno, int off, int access_size, 6376 enum bpf_access_src src, enum bpf_access_type type) 6377 { 6378 struct bpf_reg_state *regs = cur_regs(env); 6379 struct bpf_reg_state *reg = regs + regno; 6380 struct bpf_func_state *state = func(env, reg); 6381 int min_off, max_off; 6382 int err; 6383 char *err_extra; 6384 6385 if (src == ACCESS_HELPER) 6386 /* We don't know if helpers are reading or writing (or both). */ 6387 err_extra = " indirect access to"; 6388 else if (type == BPF_READ) 6389 err_extra = " read from"; 6390 else 6391 err_extra = " write to"; 6392 6393 if (tnum_is_const(reg->var_off)) { 6394 min_off = reg->var_off.value + off; 6395 if (access_size > 0) 6396 max_off = min_off + access_size - 1; 6397 else 6398 max_off = min_off; 6399 } else { 6400 if (reg->smax_value >= BPF_MAX_VAR_OFF || 6401 reg->smin_value <= -BPF_MAX_VAR_OFF) { 6402 verbose(env, "invalid unbounded variable-offset%s stack R%d\n", 6403 err_extra, regno); 6404 return -EACCES; 6405 } 6406 min_off = reg->smin_value + off; 6407 if (access_size > 0) 6408 max_off = reg->smax_value + off + access_size - 1; 6409 else 6410 max_off = min_off; 6411 } 6412 6413 err = check_stack_slot_within_bounds(min_off, state, type); 6414 if (!err) 6415 err = check_stack_slot_within_bounds(max_off, state, type); 6416 6417 if (err) { 6418 if (tnum_is_const(reg->var_off)) { 6419 verbose(env, "invalid%s stack R%d off=%d size=%d\n", 6420 err_extra, regno, off, access_size); 6421 } else { 6422 char tn_buf[48]; 6423 6424 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 6425 verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n", 6426 err_extra, regno, tn_buf, access_size); 6427 } 6428 } 6429 return err; 6430 } 6431 6432 /* check whether memory at (regno + off) is accessible for t = (read | write) 6433 * if t==write, value_regno is a register which value is stored into memory 6434 * if t==read, value_regno is a register which will receive the value from memory 6435 * if t==write && value_regno==-1, some unknown value is stored into memory 6436 * if t==read && value_regno==-1, don't care what we read from memory 6437 */ 6438 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, 6439 int off, int bpf_size, enum bpf_access_type t, 6440 int value_regno, bool strict_alignment_once, bool is_ldsx) 6441 { 6442 struct bpf_reg_state *regs = cur_regs(env); 6443 struct bpf_reg_state *reg = regs + regno; 6444 struct bpf_func_state *state; 6445 int size, err = 0; 6446 6447 size = bpf_size_to_bytes(bpf_size); 6448 if (size < 0) 6449 return size; 6450 6451 /* alignment checks will add in reg->off themselves */ 6452 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); 6453 if (err) 6454 return err; 6455 6456 /* for access checks, reg->off is just part of off */ 6457 off += reg->off; 6458 6459 if (reg->type == PTR_TO_MAP_KEY) { 6460 if (t == BPF_WRITE) { 6461 verbose(env, "write to change key R%d not allowed\n", regno); 6462 return -EACCES; 6463 } 6464 6465 err = check_mem_region_access(env, regno, off, size, 6466 reg->map_ptr->key_size, false); 6467 if (err) 6468 return err; 6469 if (value_regno >= 0) 6470 mark_reg_unknown(env, regs, value_regno); 6471 } else if (reg->type == PTR_TO_MAP_VALUE) { 6472 struct btf_field *kptr_field = NULL; 6473 6474 if (t == BPF_WRITE && value_regno >= 0 && 6475 is_pointer_value(env, value_regno)) { 6476 verbose(env, "R%d leaks addr into map\n", value_regno); 6477 return -EACCES; 6478 } 6479 err = check_map_access_type(env, regno, off, size, t); 6480 if (err) 6481 return err; 6482 err = check_map_access(env, regno, off, size, false, ACCESS_DIRECT); 6483 if (err) 6484 return err; 6485 if (tnum_is_const(reg->var_off)) 6486 kptr_field = btf_record_find(reg->map_ptr->record, 6487 off + reg->var_off.value, BPF_KPTR); 6488 if (kptr_field) { 6489 err = check_map_kptr_access(env, regno, value_regno, insn_idx, kptr_field); 6490 } else if (t == BPF_READ && value_regno >= 0) { 6491 struct bpf_map *map = reg->map_ptr; 6492 6493 /* if map is read-only, track its contents as scalars */ 6494 if (tnum_is_const(reg->var_off) && 6495 bpf_map_is_rdonly(map) && 6496 map->ops->map_direct_value_addr) { 6497 int map_off = off + reg->var_off.value; 6498 u64 val = 0; 6499 6500 err = bpf_map_direct_read(map, map_off, size, 6501 &val, is_ldsx); 6502 if (err) 6503 return err; 6504 6505 regs[value_regno].type = SCALAR_VALUE; 6506 __mark_reg_known(®s[value_regno], val); 6507 } else { 6508 mark_reg_unknown(env, regs, value_regno); 6509 } 6510 } 6511 } else if (base_type(reg->type) == PTR_TO_MEM) { 6512 bool rdonly_mem = type_is_rdonly_mem(reg->type); 6513 6514 if (type_may_be_null(reg->type)) { 6515 verbose(env, "R%d invalid mem access '%s'\n", regno, 6516 reg_type_str(env, reg->type)); 6517 return -EACCES; 6518 } 6519 6520 if (t == BPF_WRITE && rdonly_mem) { 6521 verbose(env, "R%d cannot write into %s\n", 6522 regno, reg_type_str(env, reg->type)); 6523 return -EACCES; 6524 } 6525 6526 if (t == BPF_WRITE && value_regno >= 0 && 6527 is_pointer_value(env, value_regno)) { 6528 verbose(env, "R%d leaks addr into mem\n", value_regno); 6529 return -EACCES; 6530 } 6531 6532 err = check_mem_region_access(env, regno, off, size, 6533 reg->mem_size, false); 6534 if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem)) 6535 mark_reg_unknown(env, regs, value_regno); 6536 } else if (reg->type == PTR_TO_CTX) { 6537 enum bpf_reg_type reg_type = SCALAR_VALUE; 6538 struct btf *btf = NULL; 6539 u32 btf_id = 0; 6540 6541 if (t == BPF_WRITE && value_regno >= 0 && 6542 is_pointer_value(env, value_regno)) { 6543 verbose(env, "R%d leaks addr into ctx\n", value_regno); 6544 return -EACCES; 6545 } 6546 6547 err = check_ptr_off_reg(env, reg, regno); 6548 if (err < 0) 6549 return err; 6550 6551 err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf, 6552 &btf_id); 6553 if (err) 6554 verbose_linfo(env, insn_idx, "; "); 6555 if (!err && t == BPF_READ && value_regno >= 0) { 6556 /* ctx access returns either a scalar, or a 6557 * PTR_TO_PACKET[_META,_END]. In the latter 6558 * case, we know the offset is zero. 6559 */ 6560 if (reg_type == SCALAR_VALUE) { 6561 mark_reg_unknown(env, regs, value_regno); 6562 } else { 6563 mark_reg_known_zero(env, regs, 6564 value_regno); 6565 if (type_may_be_null(reg_type)) 6566 regs[value_regno].id = ++env->id_gen; 6567 /* A load of ctx field could have different 6568 * actual load size with the one encoded in the 6569 * insn. When the dst is PTR, it is for sure not 6570 * a sub-register. 6571 */ 6572 regs[value_regno].subreg_def = DEF_NOT_SUBREG; 6573 if (base_type(reg_type) == PTR_TO_BTF_ID) { 6574 regs[value_regno].btf = btf; 6575 regs[value_regno].btf_id = btf_id; 6576 } 6577 } 6578 regs[value_regno].type = reg_type; 6579 } 6580 6581 } else if (reg->type == PTR_TO_STACK) { 6582 /* Basic bounds checks. */ 6583 err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t); 6584 if (err) 6585 return err; 6586 6587 state = func(env, reg); 6588 err = update_stack_depth(env, state, off); 6589 if (err) 6590 return err; 6591 6592 if (t == BPF_READ) 6593 err = check_stack_read(env, regno, off, size, 6594 value_regno); 6595 else 6596 err = check_stack_write(env, regno, off, size, 6597 value_regno, insn_idx); 6598 } else if (reg_is_pkt_pointer(reg)) { 6599 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { 6600 verbose(env, "cannot write into packet\n"); 6601 return -EACCES; 6602 } 6603 if (t == BPF_WRITE && value_regno >= 0 && 6604 is_pointer_value(env, value_regno)) { 6605 verbose(env, "R%d leaks addr into packet\n", 6606 value_regno); 6607 return -EACCES; 6608 } 6609 err = check_packet_access(env, regno, off, size, false); 6610 if (!err && t == BPF_READ && value_regno >= 0) 6611 mark_reg_unknown(env, regs, value_regno); 6612 } else if (reg->type == PTR_TO_FLOW_KEYS) { 6613 if (t == BPF_WRITE && value_regno >= 0 && 6614 is_pointer_value(env, value_regno)) { 6615 verbose(env, "R%d leaks addr into flow keys\n", 6616 value_regno); 6617 return -EACCES; 6618 } 6619 6620 err = check_flow_keys_access(env, off, size); 6621 if (!err && t == BPF_READ && value_regno >= 0) 6622 mark_reg_unknown(env, regs, value_regno); 6623 } else if (type_is_sk_pointer(reg->type)) { 6624 if (t == BPF_WRITE) { 6625 verbose(env, "R%d cannot write into %s\n", 6626 regno, reg_type_str(env, reg->type)); 6627 return -EACCES; 6628 } 6629 err = check_sock_access(env, insn_idx, regno, off, size, t); 6630 if (!err && value_regno >= 0) 6631 mark_reg_unknown(env, regs, value_regno); 6632 } else if (reg->type == PTR_TO_TP_BUFFER) { 6633 err = check_tp_buffer_access(env, reg, regno, off, size); 6634 if (!err && t == BPF_READ && value_regno >= 0) 6635 mark_reg_unknown(env, regs, value_regno); 6636 } else if (base_type(reg->type) == PTR_TO_BTF_ID && 6637 !type_may_be_null(reg->type)) { 6638 err = check_ptr_to_btf_access(env, regs, regno, off, size, t, 6639 value_regno); 6640 } else if (reg->type == CONST_PTR_TO_MAP) { 6641 err = check_ptr_to_map_access(env, regs, regno, off, size, t, 6642 value_regno); 6643 } else if (base_type(reg->type) == PTR_TO_BUF) { 6644 bool rdonly_mem = type_is_rdonly_mem(reg->type); 6645 u32 *max_access; 6646 6647 if (rdonly_mem) { 6648 if (t == BPF_WRITE) { 6649 verbose(env, "R%d cannot write into %s\n", 6650 regno, reg_type_str(env, reg->type)); 6651 return -EACCES; 6652 } 6653 max_access = &env->prog->aux->max_rdonly_access; 6654 } else { 6655 max_access = &env->prog->aux->max_rdwr_access; 6656 } 6657 6658 err = check_buffer_access(env, reg, regno, off, size, false, 6659 max_access); 6660 6661 if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ)) 6662 mark_reg_unknown(env, regs, value_regno); 6663 } else { 6664 verbose(env, "R%d invalid mem access '%s'\n", regno, 6665 reg_type_str(env, reg->type)); 6666 return -EACCES; 6667 } 6668 6669 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 6670 regs[value_regno].type == SCALAR_VALUE) { 6671 if (!is_ldsx) 6672 /* b/h/w load zero-extends, mark upper bits as known 0 */ 6673 coerce_reg_to_size(®s[value_regno], size); 6674 else 6675 coerce_reg_to_size_sx(®s[value_regno], size); 6676 } 6677 return err; 6678 } 6679 6680 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) 6681 { 6682 int load_reg; 6683 int err; 6684 6685 switch (insn->imm) { 6686 case BPF_ADD: 6687 case BPF_ADD | BPF_FETCH: 6688 case BPF_AND: 6689 case BPF_AND | BPF_FETCH: 6690 case BPF_OR: 6691 case BPF_OR | BPF_FETCH: 6692 case BPF_XOR: 6693 case BPF_XOR | BPF_FETCH: 6694 case BPF_XCHG: 6695 case BPF_CMPXCHG: 6696 break; 6697 default: 6698 verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm); 6699 return -EINVAL; 6700 } 6701 6702 if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) { 6703 verbose(env, "invalid atomic operand size\n"); 6704 return -EINVAL; 6705 } 6706 6707 /* check src1 operand */ 6708 err = check_reg_arg(env, insn->src_reg, SRC_OP); 6709 if (err) 6710 return err; 6711 6712 /* check src2 operand */ 6713 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 6714 if (err) 6715 return err; 6716 6717 if (insn->imm == BPF_CMPXCHG) { 6718 /* Check comparison of R0 with memory location */ 6719 const u32 aux_reg = BPF_REG_0; 6720 6721 err = check_reg_arg(env, aux_reg, SRC_OP); 6722 if (err) 6723 return err; 6724 6725 if (is_pointer_value(env, aux_reg)) { 6726 verbose(env, "R%d leaks addr into mem\n", aux_reg); 6727 return -EACCES; 6728 } 6729 } 6730 6731 if (is_pointer_value(env, insn->src_reg)) { 6732 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); 6733 return -EACCES; 6734 } 6735 6736 if (is_ctx_reg(env, insn->dst_reg) || 6737 is_pkt_reg(env, insn->dst_reg) || 6738 is_flow_key_reg(env, insn->dst_reg) || 6739 is_sk_reg(env, insn->dst_reg)) { 6740 verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n", 6741 insn->dst_reg, 6742 reg_type_str(env, reg_state(env, insn->dst_reg)->type)); 6743 return -EACCES; 6744 } 6745 6746 if (insn->imm & BPF_FETCH) { 6747 if (insn->imm == BPF_CMPXCHG) 6748 load_reg = BPF_REG_0; 6749 else 6750 load_reg = insn->src_reg; 6751 6752 /* check and record load of old value */ 6753 err = check_reg_arg(env, load_reg, DST_OP); 6754 if (err) 6755 return err; 6756 } else { 6757 /* This instruction accesses a memory location but doesn't 6758 * actually load it into a register. 6759 */ 6760 load_reg = -1; 6761 } 6762 6763 /* Check whether we can read the memory, with second call for fetch 6764 * case to simulate the register fill. 6765 */ 6766 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 6767 BPF_SIZE(insn->code), BPF_READ, -1, true, false); 6768 if (!err && load_reg >= 0) 6769 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 6770 BPF_SIZE(insn->code), BPF_READ, load_reg, 6771 true, false); 6772 if (err) 6773 return err; 6774 6775 /* Check whether we can write into the same memory. */ 6776 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 6777 BPF_SIZE(insn->code), BPF_WRITE, -1, true, false); 6778 if (err) 6779 return err; 6780 6781 return 0; 6782 } 6783 6784 /* When register 'regno' is used to read the stack (either directly or through 6785 * a helper function) make sure that it's within stack boundary and, depending 6786 * on the access type, that all elements of the stack are initialized. 6787 * 6788 * 'off' includes 'regno->off', but not its dynamic part (if any). 6789 * 6790 * All registers that have been spilled on the stack in the slots within the 6791 * read offsets are marked as read. 6792 */ 6793 static int check_stack_range_initialized( 6794 struct bpf_verifier_env *env, int regno, int off, 6795 int access_size, bool zero_size_allowed, 6796 enum bpf_access_src type, struct bpf_call_arg_meta *meta) 6797 { 6798 struct bpf_reg_state *reg = reg_state(env, regno); 6799 struct bpf_func_state *state = func(env, reg); 6800 int err, min_off, max_off, i, j, slot, spi; 6801 char *err_extra = type == ACCESS_HELPER ? " indirect" : ""; 6802 enum bpf_access_type bounds_check_type; 6803 /* Some accesses can write anything into the stack, others are 6804 * read-only. 6805 */ 6806 bool clobber = false; 6807 6808 if (access_size == 0 && !zero_size_allowed) { 6809 verbose(env, "invalid zero-sized read\n"); 6810 return -EACCES; 6811 } 6812 6813 if (type == ACCESS_HELPER) { 6814 /* The bounds checks for writes are more permissive than for 6815 * reads. However, if raw_mode is not set, we'll do extra 6816 * checks below. 6817 */ 6818 bounds_check_type = BPF_WRITE; 6819 clobber = true; 6820 } else { 6821 bounds_check_type = BPF_READ; 6822 } 6823 err = check_stack_access_within_bounds(env, regno, off, access_size, 6824 type, bounds_check_type); 6825 if (err) 6826 return err; 6827 6828 6829 if (tnum_is_const(reg->var_off)) { 6830 min_off = max_off = reg->var_off.value + off; 6831 } else { 6832 /* Variable offset is prohibited for unprivileged mode for 6833 * simplicity since it requires corresponding support in 6834 * Spectre masking for stack ALU. 6835 * See also retrieve_ptr_limit(). 6836 */ 6837 if (!env->bypass_spec_v1) { 6838 char tn_buf[48]; 6839 6840 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 6841 verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n", 6842 regno, err_extra, tn_buf); 6843 return -EACCES; 6844 } 6845 /* Only initialized buffer on stack is allowed to be accessed 6846 * with variable offset. With uninitialized buffer it's hard to 6847 * guarantee that whole memory is marked as initialized on 6848 * helper return since specific bounds are unknown what may 6849 * cause uninitialized stack leaking. 6850 */ 6851 if (meta && meta->raw_mode) 6852 meta = NULL; 6853 6854 min_off = reg->smin_value + off; 6855 max_off = reg->smax_value + off; 6856 } 6857 6858 if (meta && meta->raw_mode) { 6859 /* Ensure we won't be overwriting dynptrs when simulating byte 6860 * by byte access in check_helper_call using meta.access_size. 6861 * This would be a problem if we have a helper in the future 6862 * which takes: 6863 * 6864 * helper(uninit_mem, len, dynptr) 6865 * 6866 * Now, uninint_mem may overlap with dynptr pointer. Hence, it 6867 * may end up writing to dynptr itself when touching memory from 6868 * arg 1. This can be relaxed on a case by case basis for known 6869 * safe cases, but reject due to the possibilitiy of aliasing by 6870 * default. 6871 */ 6872 for (i = min_off; i < max_off + access_size; i++) { 6873 int stack_off = -i - 1; 6874 6875 spi = __get_spi(i); 6876 /* raw_mode may write past allocated_stack */ 6877 if (state->allocated_stack <= stack_off) 6878 continue; 6879 if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) { 6880 verbose(env, "potential write to dynptr at off=%d disallowed\n", i); 6881 return -EACCES; 6882 } 6883 } 6884 meta->access_size = access_size; 6885 meta->regno = regno; 6886 return 0; 6887 } 6888 6889 for (i = min_off; i < max_off + access_size; i++) { 6890 u8 *stype; 6891 6892 slot = -i - 1; 6893 spi = slot / BPF_REG_SIZE; 6894 if (state->allocated_stack <= slot) 6895 goto err; 6896 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 6897 if (*stype == STACK_MISC) 6898 goto mark; 6899 if ((*stype == STACK_ZERO) || 6900 (*stype == STACK_INVALID && env->allow_uninit_stack)) { 6901 if (clobber) { 6902 /* helper can write anything into the stack */ 6903 *stype = STACK_MISC; 6904 } 6905 goto mark; 6906 } 6907 6908 if (is_spilled_reg(&state->stack[spi]) && 6909 (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || 6910 env->allow_ptr_leaks)) { 6911 if (clobber) { 6912 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); 6913 for (j = 0; j < BPF_REG_SIZE; j++) 6914 scrub_spilled_slot(&state->stack[spi].slot_type[j]); 6915 } 6916 goto mark; 6917 } 6918 6919 err: 6920 if (tnum_is_const(reg->var_off)) { 6921 verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n", 6922 err_extra, regno, min_off, i - min_off, access_size); 6923 } else { 6924 char tn_buf[48]; 6925 6926 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 6927 verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n", 6928 err_extra, regno, tn_buf, i - min_off, access_size); 6929 } 6930 return -EACCES; 6931 mark: 6932 /* reading any byte out of 8-byte 'spill_slot' will cause 6933 * the whole slot to be marked as 'read' 6934 */ 6935 mark_reg_read(env, &state->stack[spi].spilled_ptr, 6936 state->stack[spi].spilled_ptr.parent, 6937 REG_LIVE_READ64); 6938 /* We do not set REG_LIVE_WRITTEN for stack slot, as we can not 6939 * be sure that whether stack slot is written to or not. Hence, 6940 * we must still conservatively propagate reads upwards even if 6941 * helper may write to the entire memory range. 6942 */ 6943 } 6944 return update_stack_depth(env, state, min_off); 6945 } 6946 6947 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, 6948 int access_size, bool zero_size_allowed, 6949 struct bpf_call_arg_meta *meta) 6950 { 6951 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 6952 u32 *max_access; 6953 6954 switch (base_type(reg->type)) { 6955 case PTR_TO_PACKET: 6956 case PTR_TO_PACKET_META: 6957 return check_packet_access(env, regno, reg->off, access_size, 6958 zero_size_allowed); 6959 case PTR_TO_MAP_KEY: 6960 if (meta && meta->raw_mode) { 6961 verbose(env, "R%d cannot write into %s\n", regno, 6962 reg_type_str(env, reg->type)); 6963 return -EACCES; 6964 } 6965 return check_mem_region_access(env, regno, reg->off, access_size, 6966 reg->map_ptr->key_size, false); 6967 case PTR_TO_MAP_VALUE: 6968 if (check_map_access_type(env, regno, reg->off, access_size, 6969 meta && meta->raw_mode ? BPF_WRITE : 6970 BPF_READ)) 6971 return -EACCES; 6972 return check_map_access(env, regno, reg->off, access_size, 6973 zero_size_allowed, ACCESS_HELPER); 6974 case PTR_TO_MEM: 6975 if (type_is_rdonly_mem(reg->type)) { 6976 if (meta && meta->raw_mode) { 6977 verbose(env, "R%d cannot write into %s\n", regno, 6978 reg_type_str(env, reg->type)); 6979 return -EACCES; 6980 } 6981 } 6982 return check_mem_region_access(env, regno, reg->off, 6983 access_size, reg->mem_size, 6984 zero_size_allowed); 6985 case PTR_TO_BUF: 6986 if (type_is_rdonly_mem(reg->type)) { 6987 if (meta && meta->raw_mode) { 6988 verbose(env, "R%d cannot write into %s\n", regno, 6989 reg_type_str(env, reg->type)); 6990 return -EACCES; 6991 } 6992 6993 max_access = &env->prog->aux->max_rdonly_access; 6994 } else { 6995 max_access = &env->prog->aux->max_rdwr_access; 6996 } 6997 return check_buffer_access(env, reg, regno, reg->off, 6998 access_size, zero_size_allowed, 6999 max_access); 7000 case PTR_TO_STACK: 7001 return check_stack_range_initialized( 7002 env, 7003 regno, reg->off, access_size, 7004 zero_size_allowed, ACCESS_HELPER, meta); 7005 case PTR_TO_BTF_ID: 7006 return check_ptr_to_btf_access(env, regs, regno, reg->off, 7007 access_size, BPF_READ, -1); 7008 case PTR_TO_CTX: 7009 /* in case the function doesn't know how to access the context, 7010 * (because we are in a program of type SYSCALL for example), we 7011 * can not statically check its size. 7012 * Dynamically check it now. 7013 */ 7014 if (!env->ops->convert_ctx_access) { 7015 enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ; 7016 int offset = access_size - 1; 7017 7018 /* Allow zero-byte read from PTR_TO_CTX */ 7019 if (access_size == 0) 7020 return zero_size_allowed ? 0 : -EACCES; 7021 7022 return check_mem_access(env, env->insn_idx, regno, offset, BPF_B, 7023 atype, -1, false, false); 7024 } 7025 7026 fallthrough; 7027 default: /* scalar_value or invalid ptr */ 7028 /* Allow zero-byte read from NULL, regardless of pointer type */ 7029 if (zero_size_allowed && access_size == 0 && 7030 register_is_null(reg)) 7031 return 0; 7032 7033 verbose(env, "R%d type=%s ", regno, 7034 reg_type_str(env, reg->type)); 7035 verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK)); 7036 return -EACCES; 7037 } 7038 } 7039 7040 static int check_mem_size_reg(struct bpf_verifier_env *env, 7041 struct bpf_reg_state *reg, u32 regno, 7042 bool zero_size_allowed, 7043 struct bpf_call_arg_meta *meta) 7044 { 7045 int err; 7046 7047 /* This is used to refine r0 return value bounds for helpers 7048 * that enforce this value as an upper bound on return values. 7049 * See do_refine_retval_range() for helpers that can refine 7050 * the return value. C type of helper is u32 so we pull register 7051 * bound from umax_value however, if negative verifier errors 7052 * out. Only upper bounds can be learned because retval is an 7053 * int type and negative retvals are allowed. 7054 */ 7055 meta->msize_max_value = reg->umax_value; 7056 7057 /* The register is SCALAR_VALUE; the access check 7058 * happens using its boundaries. 7059 */ 7060 if (!tnum_is_const(reg->var_off)) 7061 /* For unprivileged variable accesses, disable raw 7062 * mode so that the program is required to 7063 * initialize all the memory that the helper could 7064 * just partially fill up. 7065 */ 7066 meta = NULL; 7067 7068 if (reg->smin_value < 0) { 7069 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", 7070 regno); 7071 return -EACCES; 7072 } 7073 7074 if (reg->umin_value == 0) { 7075 err = check_helper_mem_access(env, regno - 1, 0, 7076 zero_size_allowed, 7077 meta); 7078 if (err) 7079 return err; 7080 } 7081 7082 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { 7083 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", 7084 regno); 7085 return -EACCES; 7086 } 7087 err = check_helper_mem_access(env, regno - 1, 7088 reg->umax_value, 7089 zero_size_allowed, meta); 7090 if (!err) 7091 err = mark_chain_precision(env, regno); 7092 return err; 7093 } 7094 7095 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 7096 u32 regno, u32 mem_size) 7097 { 7098 bool may_be_null = type_may_be_null(reg->type); 7099 struct bpf_reg_state saved_reg; 7100 struct bpf_call_arg_meta meta; 7101 int err; 7102 7103 if (register_is_null(reg)) 7104 return 0; 7105 7106 memset(&meta, 0, sizeof(meta)); 7107 /* Assuming that the register contains a value check if the memory 7108 * access is safe. Temporarily save and restore the register's state as 7109 * the conversion shouldn't be visible to a caller. 7110 */ 7111 if (may_be_null) { 7112 saved_reg = *reg; 7113 mark_ptr_not_null_reg(reg); 7114 } 7115 7116 err = check_helper_mem_access(env, regno, mem_size, true, &meta); 7117 /* Check access for BPF_WRITE */ 7118 meta.raw_mode = true; 7119 err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta); 7120 7121 if (may_be_null) 7122 *reg = saved_reg; 7123 7124 return err; 7125 } 7126 7127 static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 7128 u32 regno) 7129 { 7130 struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1]; 7131 bool may_be_null = type_may_be_null(mem_reg->type); 7132 struct bpf_reg_state saved_reg; 7133 struct bpf_call_arg_meta meta; 7134 int err; 7135 7136 WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5); 7137 7138 memset(&meta, 0, sizeof(meta)); 7139 7140 if (may_be_null) { 7141 saved_reg = *mem_reg; 7142 mark_ptr_not_null_reg(mem_reg); 7143 } 7144 7145 err = check_mem_size_reg(env, reg, regno, true, &meta); 7146 /* Check access for BPF_WRITE */ 7147 meta.raw_mode = true; 7148 err = err ?: check_mem_size_reg(env, reg, regno, true, &meta); 7149 7150 if (may_be_null) 7151 *mem_reg = saved_reg; 7152 return err; 7153 } 7154 7155 /* Implementation details: 7156 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL. 7157 * bpf_obj_new returns PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL. 7158 * Two bpf_map_lookups (even with the same key) will have different reg->id. 7159 * Two separate bpf_obj_new will also have different reg->id. 7160 * For traditional PTR_TO_MAP_VALUE or PTR_TO_BTF_ID | MEM_ALLOC, the verifier 7161 * clears reg->id after value_or_null->value transition, since the verifier only 7162 * cares about the range of access to valid map value pointer and doesn't care 7163 * about actual address of the map element. 7164 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps 7165 * reg->id > 0 after value_or_null->value transition. By doing so 7166 * two bpf_map_lookups will be considered two different pointers that 7167 * point to different bpf_spin_locks. Likewise for pointers to allocated objects 7168 * returned from bpf_obj_new. 7169 * The verifier allows taking only one bpf_spin_lock at a time to avoid 7170 * dead-locks. 7171 * Since only one bpf_spin_lock is allowed the checks are simpler than 7172 * reg_is_refcounted() logic. The verifier needs to remember only 7173 * one spin_lock instead of array of acquired_refs. 7174 * cur_state->active_lock remembers which map value element or allocated 7175 * object got locked and clears it after bpf_spin_unlock. 7176 */ 7177 static int process_spin_lock(struct bpf_verifier_env *env, int regno, 7178 bool is_lock) 7179 { 7180 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 7181 struct bpf_verifier_state *cur = env->cur_state; 7182 bool is_const = tnum_is_const(reg->var_off); 7183 u64 val = reg->var_off.value; 7184 struct bpf_map *map = NULL; 7185 struct btf *btf = NULL; 7186 struct btf_record *rec; 7187 7188 if (!is_const) { 7189 verbose(env, 7190 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n", 7191 regno); 7192 return -EINVAL; 7193 } 7194 if (reg->type == PTR_TO_MAP_VALUE) { 7195 map = reg->map_ptr; 7196 if (!map->btf) { 7197 verbose(env, 7198 "map '%s' has to have BTF in order to use bpf_spin_lock\n", 7199 map->name); 7200 return -EINVAL; 7201 } 7202 } else { 7203 btf = reg->btf; 7204 } 7205 7206 rec = reg_btf_record(reg); 7207 if (!btf_record_has_field(rec, BPF_SPIN_LOCK)) { 7208 verbose(env, "%s '%s' has no valid bpf_spin_lock\n", map ? "map" : "local", 7209 map ? map->name : "kptr"); 7210 return -EINVAL; 7211 } 7212 if (rec->spin_lock_off != val + reg->off) { 7213 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock' that is at %d\n", 7214 val + reg->off, rec->spin_lock_off); 7215 return -EINVAL; 7216 } 7217 if (is_lock) { 7218 if (cur->active_lock.ptr) { 7219 verbose(env, 7220 "Locking two bpf_spin_locks are not allowed\n"); 7221 return -EINVAL; 7222 } 7223 if (map) 7224 cur->active_lock.ptr = map; 7225 else 7226 cur->active_lock.ptr = btf; 7227 cur->active_lock.id = reg->id; 7228 } else { 7229 void *ptr; 7230 7231 if (map) 7232 ptr = map; 7233 else 7234 ptr = btf; 7235 7236 if (!cur->active_lock.ptr) { 7237 verbose(env, "bpf_spin_unlock without taking a lock\n"); 7238 return -EINVAL; 7239 } 7240 if (cur->active_lock.ptr != ptr || 7241 cur->active_lock.id != reg->id) { 7242 verbose(env, "bpf_spin_unlock of different lock\n"); 7243 return -EINVAL; 7244 } 7245 7246 invalidate_non_owning_refs(env); 7247 7248 cur->active_lock.ptr = NULL; 7249 cur->active_lock.id = 0; 7250 } 7251 return 0; 7252 } 7253 7254 static int process_timer_func(struct bpf_verifier_env *env, int regno, 7255 struct bpf_call_arg_meta *meta) 7256 { 7257 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 7258 bool is_const = tnum_is_const(reg->var_off); 7259 struct bpf_map *map = reg->map_ptr; 7260 u64 val = reg->var_off.value; 7261 7262 if (!is_const) { 7263 verbose(env, 7264 "R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n", 7265 regno); 7266 return -EINVAL; 7267 } 7268 if (!map->btf) { 7269 verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n", 7270 map->name); 7271 return -EINVAL; 7272 } 7273 if (!btf_record_has_field(map->record, BPF_TIMER)) { 7274 verbose(env, "map '%s' has no valid bpf_timer\n", map->name); 7275 return -EINVAL; 7276 } 7277 if (map->record->timer_off != val + reg->off) { 7278 verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n", 7279 val + reg->off, map->record->timer_off); 7280 return -EINVAL; 7281 } 7282 if (meta->map_ptr) { 7283 verbose(env, "verifier bug. Two map pointers in a timer helper\n"); 7284 return -EFAULT; 7285 } 7286 meta->map_uid = reg->map_uid; 7287 meta->map_ptr = map; 7288 return 0; 7289 } 7290 7291 static int process_kptr_func(struct bpf_verifier_env *env, int regno, 7292 struct bpf_call_arg_meta *meta) 7293 { 7294 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 7295 struct bpf_map *map_ptr = reg->map_ptr; 7296 struct btf_field *kptr_field; 7297 u32 kptr_off; 7298 7299 if (!tnum_is_const(reg->var_off)) { 7300 verbose(env, 7301 "R%d doesn't have constant offset. kptr has to be at the constant offset\n", 7302 regno); 7303 return -EINVAL; 7304 } 7305 if (!map_ptr->btf) { 7306 verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n", 7307 map_ptr->name); 7308 return -EINVAL; 7309 } 7310 if (!btf_record_has_field(map_ptr->record, BPF_KPTR)) { 7311 verbose(env, "map '%s' has no valid kptr\n", map_ptr->name); 7312 return -EINVAL; 7313 } 7314 7315 meta->map_ptr = map_ptr; 7316 kptr_off = reg->off + reg->var_off.value; 7317 kptr_field = btf_record_find(map_ptr->record, kptr_off, BPF_KPTR); 7318 if (!kptr_field) { 7319 verbose(env, "off=%d doesn't point to kptr\n", kptr_off); 7320 return -EACCES; 7321 } 7322 if (kptr_field->type != BPF_KPTR_REF) { 7323 verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off); 7324 return -EACCES; 7325 } 7326 meta->kptr_field = kptr_field; 7327 return 0; 7328 } 7329 7330 /* There are two register types representing a bpf_dynptr, one is PTR_TO_STACK 7331 * which points to a stack slot, and the other is CONST_PTR_TO_DYNPTR. 7332 * 7333 * In both cases we deal with the first 8 bytes, but need to mark the next 8 7334 * bytes as STACK_DYNPTR in case of PTR_TO_STACK. In case of 7335 * CONST_PTR_TO_DYNPTR, we are guaranteed to get the beginning of the object. 7336 * 7337 * Mutability of bpf_dynptr is at two levels, one is at the level of struct 7338 * bpf_dynptr itself, i.e. whether the helper is receiving a pointer to struct 7339 * bpf_dynptr or pointer to const struct bpf_dynptr. In the former case, it can 7340 * mutate the view of the dynptr and also possibly destroy it. In the latter 7341 * case, it cannot mutate the bpf_dynptr itself but it can still mutate the 7342 * memory that dynptr points to. 7343 * 7344 * The verifier will keep track both levels of mutation (bpf_dynptr's in 7345 * reg->type and the memory's in reg->dynptr.type), but there is no support for 7346 * readonly dynptr view yet, hence only the first case is tracked and checked. 7347 * 7348 * This is consistent with how C applies the const modifier to a struct object, 7349 * where the pointer itself inside bpf_dynptr becomes const but not what it 7350 * points to. 7351 * 7352 * Helpers which do not mutate the bpf_dynptr set MEM_RDONLY in their argument 7353 * type, and declare it as 'const struct bpf_dynptr *' in their prototype. 7354 */ 7355 static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn_idx, 7356 enum bpf_arg_type arg_type, int clone_ref_obj_id) 7357 { 7358 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 7359 int err; 7360 7361 /* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an 7362 * ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*): 7363 */ 7364 if ((arg_type & (MEM_UNINIT | MEM_RDONLY)) == (MEM_UNINIT | MEM_RDONLY)) { 7365 verbose(env, "verifier internal error: misconfigured dynptr helper type flags\n"); 7366 return -EFAULT; 7367 } 7368 7369 /* MEM_UNINIT - Points to memory that is an appropriate candidate for 7370 * constructing a mutable bpf_dynptr object. 7371 * 7372 * Currently, this is only possible with PTR_TO_STACK 7373 * pointing to a region of at least 16 bytes which doesn't 7374 * contain an existing bpf_dynptr. 7375 * 7376 * MEM_RDONLY - Points to a initialized bpf_dynptr that will not be 7377 * mutated or destroyed. However, the memory it points to 7378 * may be mutated. 7379 * 7380 * None - Points to a initialized dynptr that can be mutated and 7381 * destroyed, including mutation of the memory it points 7382 * to. 7383 */ 7384 if (arg_type & MEM_UNINIT) { 7385 int i; 7386 7387 if (!is_dynptr_reg_valid_uninit(env, reg)) { 7388 verbose(env, "Dynptr has to be an uninitialized dynptr\n"); 7389 return -EINVAL; 7390 } 7391 7392 /* we write BPF_DW bits (8 bytes) at a time */ 7393 for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) { 7394 err = check_mem_access(env, insn_idx, regno, 7395 i, BPF_DW, BPF_WRITE, -1, false, false); 7396 if (err) 7397 return err; 7398 } 7399 7400 err = mark_stack_slots_dynptr(env, reg, arg_type, insn_idx, clone_ref_obj_id); 7401 } else /* MEM_RDONLY and None case from above */ { 7402 /* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */ 7403 if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) { 7404 verbose(env, "cannot pass pointer to const bpf_dynptr, the helper mutates it\n"); 7405 return -EINVAL; 7406 } 7407 7408 if (!is_dynptr_reg_valid_init(env, reg)) { 7409 verbose(env, 7410 "Expected an initialized dynptr as arg #%d\n", 7411 regno); 7412 return -EINVAL; 7413 } 7414 7415 /* Fold modifiers (in this case, MEM_RDONLY) when checking expected type */ 7416 if (!is_dynptr_type_expected(env, reg, arg_type & ~MEM_RDONLY)) { 7417 verbose(env, 7418 "Expected a dynptr of type %s as arg #%d\n", 7419 dynptr_type_str(arg_to_dynptr_type(arg_type)), regno); 7420 return -EINVAL; 7421 } 7422 7423 err = mark_dynptr_read(env, reg); 7424 } 7425 return err; 7426 } 7427 7428 static u32 iter_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int spi) 7429 { 7430 struct bpf_func_state *state = func(env, reg); 7431 7432 return state->stack[spi].spilled_ptr.ref_obj_id; 7433 } 7434 7435 static bool is_iter_kfunc(struct bpf_kfunc_call_arg_meta *meta) 7436 { 7437 return meta->kfunc_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY); 7438 } 7439 7440 static bool is_iter_new_kfunc(struct bpf_kfunc_call_arg_meta *meta) 7441 { 7442 return meta->kfunc_flags & KF_ITER_NEW; 7443 } 7444 7445 static bool is_iter_next_kfunc(struct bpf_kfunc_call_arg_meta *meta) 7446 { 7447 return meta->kfunc_flags & KF_ITER_NEXT; 7448 } 7449 7450 static bool is_iter_destroy_kfunc(struct bpf_kfunc_call_arg_meta *meta) 7451 { 7452 return meta->kfunc_flags & KF_ITER_DESTROY; 7453 } 7454 7455 static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg) 7456 { 7457 /* btf_check_iter_kfuncs() guarantees that first argument of any iter 7458 * kfunc is iter state pointer 7459 */ 7460 return arg == 0 && is_iter_kfunc(meta); 7461 } 7462 7463 static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_idx, 7464 struct bpf_kfunc_call_arg_meta *meta) 7465 { 7466 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 7467 const struct btf_type *t; 7468 const struct btf_param *arg; 7469 int spi, err, i, nr_slots; 7470 u32 btf_id; 7471 7472 /* btf_check_iter_kfuncs() ensures we don't need to validate anything here */ 7473 arg = &btf_params(meta->func_proto)[0]; 7474 t = btf_type_skip_modifiers(meta->btf, arg->type, NULL); /* PTR */ 7475 t = btf_type_skip_modifiers(meta->btf, t->type, &btf_id); /* STRUCT */ 7476 nr_slots = t->size / BPF_REG_SIZE; 7477 7478 if (is_iter_new_kfunc(meta)) { 7479 /* bpf_iter_<type>_new() expects pointer to uninit iter state */ 7480 if (!is_iter_reg_valid_uninit(env, reg, nr_slots)) { 7481 verbose(env, "expected uninitialized iter_%s as arg #%d\n", 7482 iter_type_str(meta->btf, btf_id), regno); 7483 return -EINVAL; 7484 } 7485 7486 for (i = 0; i < nr_slots * 8; i += BPF_REG_SIZE) { 7487 err = check_mem_access(env, insn_idx, regno, 7488 i, BPF_DW, BPF_WRITE, -1, false, false); 7489 if (err) 7490 return err; 7491 } 7492 7493 err = mark_stack_slots_iter(env, reg, insn_idx, meta->btf, btf_id, nr_slots); 7494 if (err) 7495 return err; 7496 } else { 7497 /* iter_next() or iter_destroy() expect initialized iter state*/ 7498 if (!is_iter_reg_valid_init(env, reg, meta->btf, btf_id, nr_slots)) { 7499 verbose(env, "expected an initialized iter_%s as arg #%d\n", 7500 iter_type_str(meta->btf, btf_id), regno); 7501 return -EINVAL; 7502 } 7503 7504 spi = iter_get_spi(env, reg, nr_slots); 7505 if (spi < 0) 7506 return spi; 7507 7508 err = mark_iter_read(env, reg, spi, nr_slots); 7509 if (err) 7510 return err; 7511 7512 /* remember meta->iter info for process_iter_next_call() */ 7513 meta->iter.spi = spi; 7514 meta->iter.frameno = reg->frameno; 7515 meta->ref_obj_id = iter_ref_obj_id(env, reg, spi); 7516 7517 if (is_iter_destroy_kfunc(meta)) { 7518 err = unmark_stack_slots_iter(env, reg, nr_slots); 7519 if (err) 7520 return err; 7521 } 7522 } 7523 7524 return 0; 7525 } 7526 7527 /* process_iter_next_call() is called when verifier gets to iterator's next 7528 * "method" (e.g., bpf_iter_num_next() for numbers iterator) call. We'll refer 7529 * to it as just "iter_next()" in comments below. 7530 * 7531 * BPF verifier relies on a crucial contract for any iter_next() 7532 * implementation: it should *eventually* return NULL, and once that happens 7533 * it should keep returning NULL. That is, once iterator exhausts elements to 7534 * iterate, it should never reset or spuriously return new elements. 7535 * 7536 * With the assumption of such contract, process_iter_next_call() simulates 7537 * a fork in the verifier state to validate loop logic correctness and safety 7538 * without having to simulate infinite amount of iterations. 7539 * 7540 * In current state, we first assume that iter_next() returned NULL and 7541 * iterator state is set to DRAINED (BPF_ITER_STATE_DRAINED). In such 7542 * conditions we should not form an infinite loop and should eventually reach 7543 * exit. 7544 * 7545 * Besides that, we also fork current state and enqueue it for later 7546 * verification. In a forked state we keep iterator state as ACTIVE 7547 * (BPF_ITER_STATE_ACTIVE) and assume non-NULL return from iter_next(). We 7548 * also bump iteration depth to prevent erroneous infinite loop detection 7549 * later on (see iter_active_depths_differ() comment for details). In this 7550 * state we assume that we'll eventually loop back to another iter_next() 7551 * calls (it could be in exactly same location or in some other instruction, 7552 * it doesn't matter, we don't make any unnecessary assumptions about this, 7553 * everything revolves around iterator state in a stack slot, not which 7554 * instruction is calling iter_next()). When that happens, we either will come 7555 * to iter_next() with equivalent state and can conclude that next iteration 7556 * will proceed in exactly the same way as we just verified, so it's safe to 7557 * assume that loop converges. If not, we'll go on another iteration 7558 * simulation with a different input state, until all possible starting states 7559 * are validated or we reach maximum number of instructions limit. 7560 * 7561 * This way, we will either exhaustively discover all possible input states 7562 * that iterator loop can start with and eventually will converge, or we'll 7563 * effectively regress into bounded loop simulation logic and either reach 7564 * maximum number of instructions if loop is not provably convergent, or there 7565 * is some statically known limit on number of iterations (e.g., if there is 7566 * an explicit `if n > 100 then break;` statement somewhere in the loop). 7567 * 7568 * One very subtle but very important aspect is that we *always* simulate NULL 7569 * condition first (as the current state) before we simulate non-NULL case. 7570 * This has to do with intricacies of scalar precision tracking. By simulating 7571 * "exit condition" of iter_next() returning NULL first, we make sure all the 7572 * relevant precision marks *that will be set **after** we exit iterator loop* 7573 * are propagated backwards to common parent state of NULL and non-NULL 7574 * branches. Thanks to that, state equivalence checks done later in forked 7575 * state, when reaching iter_next() for ACTIVE iterator, can assume that 7576 * precision marks are finalized and won't change. Because simulating another 7577 * ACTIVE iterator iteration won't change them (because given same input 7578 * states we'll end up with exactly same output states which we are currently 7579 * comparing; and verification after the loop already propagated back what 7580 * needs to be **additionally** tracked as precise). It's subtle, grok 7581 * precision tracking for more intuitive understanding. 7582 */ 7583 static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx, 7584 struct bpf_kfunc_call_arg_meta *meta) 7585 { 7586 struct bpf_verifier_state *cur_st = env->cur_state, *queued_st; 7587 struct bpf_func_state *cur_fr = cur_st->frame[cur_st->curframe], *queued_fr; 7588 struct bpf_reg_state *cur_iter, *queued_iter; 7589 int iter_frameno = meta->iter.frameno; 7590 int iter_spi = meta->iter.spi; 7591 7592 BTF_TYPE_EMIT(struct bpf_iter); 7593 7594 cur_iter = &env->cur_state->frame[iter_frameno]->stack[iter_spi].spilled_ptr; 7595 7596 if (cur_iter->iter.state != BPF_ITER_STATE_ACTIVE && 7597 cur_iter->iter.state != BPF_ITER_STATE_DRAINED) { 7598 verbose(env, "verifier internal error: unexpected iterator state %d (%s)\n", 7599 cur_iter->iter.state, iter_state_str(cur_iter->iter.state)); 7600 return -EFAULT; 7601 } 7602 7603 if (cur_iter->iter.state == BPF_ITER_STATE_ACTIVE) { 7604 /* branch out active iter state */ 7605 queued_st = push_stack(env, insn_idx + 1, insn_idx, false); 7606 if (!queued_st) 7607 return -ENOMEM; 7608 7609 queued_iter = &queued_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr; 7610 queued_iter->iter.state = BPF_ITER_STATE_ACTIVE; 7611 queued_iter->iter.depth++; 7612 7613 queued_fr = queued_st->frame[queued_st->curframe]; 7614 mark_ptr_not_null_reg(&queued_fr->regs[BPF_REG_0]); 7615 } 7616 7617 /* switch to DRAINED state, but keep the depth unchanged */ 7618 /* mark current iter state as drained and assume returned NULL */ 7619 cur_iter->iter.state = BPF_ITER_STATE_DRAINED; 7620 __mark_reg_const_zero(&cur_fr->regs[BPF_REG_0]); 7621 7622 return 0; 7623 } 7624 7625 static bool arg_type_is_mem_size(enum bpf_arg_type type) 7626 { 7627 return type == ARG_CONST_SIZE || 7628 type == ARG_CONST_SIZE_OR_ZERO; 7629 } 7630 7631 static bool arg_type_is_release(enum bpf_arg_type type) 7632 { 7633 return type & OBJ_RELEASE; 7634 } 7635 7636 static bool arg_type_is_dynptr(enum bpf_arg_type type) 7637 { 7638 return base_type(type) == ARG_PTR_TO_DYNPTR; 7639 } 7640 7641 static int int_ptr_type_to_size(enum bpf_arg_type type) 7642 { 7643 if (type == ARG_PTR_TO_INT) 7644 return sizeof(u32); 7645 else if (type == ARG_PTR_TO_LONG) 7646 return sizeof(u64); 7647 7648 return -EINVAL; 7649 } 7650 7651 static int resolve_map_arg_type(struct bpf_verifier_env *env, 7652 const struct bpf_call_arg_meta *meta, 7653 enum bpf_arg_type *arg_type) 7654 { 7655 if (!meta->map_ptr) { 7656 /* kernel subsystem misconfigured verifier */ 7657 verbose(env, "invalid map_ptr to access map->type\n"); 7658 return -EACCES; 7659 } 7660 7661 switch (meta->map_ptr->map_type) { 7662 case BPF_MAP_TYPE_SOCKMAP: 7663 case BPF_MAP_TYPE_SOCKHASH: 7664 if (*arg_type == ARG_PTR_TO_MAP_VALUE) { 7665 *arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON; 7666 } else { 7667 verbose(env, "invalid arg_type for sockmap/sockhash\n"); 7668 return -EINVAL; 7669 } 7670 break; 7671 case BPF_MAP_TYPE_BLOOM_FILTER: 7672 if (meta->func_id == BPF_FUNC_map_peek_elem) 7673 *arg_type = ARG_PTR_TO_MAP_VALUE; 7674 break; 7675 default: 7676 break; 7677 } 7678 return 0; 7679 } 7680 7681 struct bpf_reg_types { 7682 const enum bpf_reg_type types[10]; 7683 u32 *btf_id; 7684 }; 7685 7686 static const struct bpf_reg_types sock_types = { 7687 .types = { 7688 PTR_TO_SOCK_COMMON, 7689 PTR_TO_SOCKET, 7690 PTR_TO_TCP_SOCK, 7691 PTR_TO_XDP_SOCK, 7692 }, 7693 }; 7694 7695 #ifdef CONFIG_NET 7696 static const struct bpf_reg_types btf_id_sock_common_types = { 7697 .types = { 7698 PTR_TO_SOCK_COMMON, 7699 PTR_TO_SOCKET, 7700 PTR_TO_TCP_SOCK, 7701 PTR_TO_XDP_SOCK, 7702 PTR_TO_BTF_ID, 7703 PTR_TO_BTF_ID | PTR_TRUSTED, 7704 }, 7705 .btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], 7706 }; 7707 #endif 7708 7709 static const struct bpf_reg_types mem_types = { 7710 .types = { 7711 PTR_TO_STACK, 7712 PTR_TO_PACKET, 7713 PTR_TO_PACKET_META, 7714 PTR_TO_MAP_KEY, 7715 PTR_TO_MAP_VALUE, 7716 PTR_TO_MEM, 7717 PTR_TO_MEM | MEM_RINGBUF, 7718 PTR_TO_BUF, 7719 PTR_TO_BTF_ID | PTR_TRUSTED, 7720 }, 7721 }; 7722 7723 static const struct bpf_reg_types int_ptr_types = { 7724 .types = { 7725 PTR_TO_STACK, 7726 PTR_TO_PACKET, 7727 PTR_TO_PACKET_META, 7728 PTR_TO_MAP_KEY, 7729 PTR_TO_MAP_VALUE, 7730 }, 7731 }; 7732 7733 static const struct bpf_reg_types spin_lock_types = { 7734 .types = { 7735 PTR_TO_MAP_VALUE, 7736 PTR_TO_BTF_ID | MEM_ALLOC, 7737 } 7738 }; 7739 7740 static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } }; 7741 static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } }; 7742 static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } }; 7743 static const struct bpf_reg_types ringbuf_mem_types = { .types = { PTR_TO_MEM | MEM_RINGBUF } }; 7744 static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } }; 7745 static const struct bpf_reg_types btf_ptr_types = { 7746 .types = { 7747 PTR_TO_BTF_ID, 7748 PTR_TO_BTF_ID | PTR_TRUSTED, 7749 PTR_TO_BTF_ID | MEM_RCU, 7750 }, 7751 }; 7752 static const struct bpf_reg_types percpu_btf_ptr_types = { 7753 .types = { 7754 PTR_TO_BTF_ID | MEM_PERCPU, 7755 PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED, 7756 } 7757 }; 7758 static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } }; 7759 static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } }; 7760 static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } }; 7761 static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } }; 7762 static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } }; 7763 static const struct bpf_reg_types dynptr_types = { 7764 .types = { 7765 PTR_TO_STACK, 7766 CONST_PTR_TO_DYNPTR, 7767 } 7768 }; 7769 7770 static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { 7771 [ARG_PTR_TO_MAP_KEY] = &mem_types, 7772 [ARG_PTR_TO_MAP_VALUE] = &mem_types, 7773 [ARG_CONST_SIZE] = &scalar_types, 7774 [ARG_CONST_SIZE_OR_ZERO] = &scalar_types, 7775 [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types, 7776 [ARG_CONST_MAP_PTR] = &const_map_ptr_types, 7777 [ARG_PTR_TO_CTX] = &context_types, 7778 [ARG_PTR_TO_SOCK_COMMON] = &sock_types, 7779 #ifdef CONFIG_NET 7780 [ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types, 7781 #endif 7782 [ARG_PTR_TO_SOCKET] = &fullsock_types, 7783 [ARG_PTR_TO_BTF_ID] = &btf_ptr_types, 7784 [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types, 7785 [ARG_PTR_TO_MEM] = &mem_types, 7786 [ARG_PTR_TO_RINGBUF_MEM] = &ringbuf_mem_types, 7787 [ARG_PTR_TO_INT] = &int_ptr_types, 7788 [ARG_PTR_TO_LONG] = &int_ptr_types, 7789 [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types, 7790 [ARG_PTR_TO_FUNC] = &func_ptr_types, 7791 [ARG_PTR_TO_STACK] = &stack_ptr_types, 7792 [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types, 7793 [ARG_PTR_TO_TIMER] = &timer_types, 7794 [ARG_PTR_TO_KPTR] = &kptr_types, 7795 [ARG_PTR_TO_DYNPTR] = &dynptr_types, 7796 }; 7797 7798 static int check_reg_type(struct bpf_verifier_env *env, u32 regno, 7799 enum bpf_arg_type arg_type, 7800 const u32 *arg_btf_id, 7801 struct bpf_call_arg_meta *meta) 7802 { 7803 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 7804 enum bpf_reg_type expected, type = reg->type; 7805 const struct bpf_reg_types *compatible; 7806 int i, j; 7807 7808 compatible = compatible_reg_types[base_type(arg_type)]; 7809 if (!compatible) { 7810 verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type); 7811 return -EFAULT; 7812 } 7813 7814 /* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY, 7815 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY 7816 * 7817 * Same for MAYBE_NULL: 7818 * 7819 * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL, 7820 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL 7821 * 7822 * ARG_PTR_TO_MEM is compatible with PTR_TO_MEM that is tagged with a dynptr type. 7823 * 7824 * Therefore we fold these flags depending on the arg_type before comparison. 7825 */ 7826 if (arg_type & MEM_RDONLY) 7827 type &= ~MEM_RDONLY; 7828 if (arg_type & PTR_MAYBE_NULL) 7829 type &= ~PTR_MAYBE_NULL; 7830 if (base_type(arg_type) == ARG_PTR_TO_MEM) 7831 type &= ~DYNPTR_TYPE_FLAG_MASK; 7832 7833 if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type)) 7834 type &= ~MEM_ALLOC; 7835 7836 for (i = 0; i < ARRAY_SIZE(compatible->types); i++) { 7837 expected = compatible->types[i]; 7838 if (expected == NOT_INIT) 7839 break; 7840 7841 if (type == expected) 7842 goto found; 7843 } 7844 7845 verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type)); 7846 for (j = 0; j + 1 < i; j++) 7847 verbose(env, "%s, ", reg_type_str(env, compatible->types[j])); 7848 verbose(env, "%s\n", reg_type_str(env, compatible->types[j])); 7849 return -EACCES; 7850 7851 found: 7852 if (base_type(reg->type) != PTR_TO_BTF_ID) 7853 return 0; 7854 7855 if (compatible == &mem_types) { 7856 if (!(arg_type & MEM_RDONLY)) { 7857 verbose(env, 7858 "%s() may write into memory pointed by R%d type=%s\n", 7859 func_id_name(meta->func_id), 7860 regno, reg_type_str(env, reg->type)); 7861 return -EACCES; 7862 } 7863 return 0; 7864 } 7865 7866 switch ((int)reg->type) { 7867 case PTR_TO_BTF_ID: 7868 case PTR_TO_BTF_ID | PTR_TRUSTED: 7869 case PTR_TO_BTF_ID | MEM_RCU: 7870 case PTR_TO_BTF_ID | PTR_MAYBE_NULL: 7871 case PTR_TO_BTF_ID | PTR_MAYBE_NULL | MEM_RCU: 7872 { 7873 /* For bpf_sk_release, it needs to match against first member 7874 * 'struct sock_common', hence make an exception for it. This 7875 * allows bpf_sk_release to work for multiple socket types. 7876 */ 7877 bool strict_type_match = arg_type_is_release(arg_type) && 7878 meta->func_id != BPF_FUNC_sk_release; 7879 7880 if (type_may_be_null(reg->type) && 7881 (!type_may_be_null(arg_type) || arg_type_is_release(arg_type))) { 7882 verbose(env, "Possibly NULL pointer passed to helper arg%d\n", regno); 7883 return -EACCES; 7884 } 7885 7886 if (!arg_btf_id) { 7887 if (!compatible->btf_id) { 7888 verbose(env, "verifier internal error: missing arg compatible BTF ID\n"); 7889 return -EFAULT; 7890 } 7891 arg_btf_id = compatible->btf_id; 7892 } 7893 7894 if (meta->func_id == BPF_FUNC_kptr_xchg) { 7895 if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) 7896 return -EACCES; 7897 } else { 7898 if (arg_btf_id == BPF_PTR_POISON) { 7899 verbose(env, "verifier internal error:"); 7900 verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n", 7901 regno); 7902 return -EACCES; 7903 } 7904 7905 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, 7906 btf_vmlinux, *arg_btf_id, 7907 strict_type_match)) { 7908 verbose(env, "R%d is of type %s but %s is expected\n", 7909 regno, btf_type_name(reg->btf, reg->btf_id), 7910 btf_type_name(btf_vmlinux, *arg_btf_id)); 7911 return -EACCES; 7912 } 7913 } 7914 break; 7915 } 7916 case PTR_TO_BTF_ID | MEM_ALLOC: 7917 if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock && 7918 meta->func_id != BPF_FUNC_kptr_xchg) { 7919 verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n"); 7920 return -EFAULT; 7921 } 7922 if (meta->func_id == BPF_FUNC_kptr_xchg) { 7923 if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) 7924 return -EACCES; 7925 } 7926 break; 7927 case PTR_TO_BTF_ID | MEM_PERCPU: 7928 case PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED: 7929 /* Handled by helper specific checks */ 7930 break; 7931 default: 7932 verbose(env, "verifier internal error: invalid PTR_TO_BTF_ID register for type match\n"); 7933 return -EFAULT; 7934 } 7935 return 0; 7936 } 7937 7938 static struct btf_field * 7939 reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields) 7940 { 7941 struct btf_field *field; 7942 struct btf_record *rec; 7943 7944 rec = reg_btf_record(reg); 7945 if (!rec) 7946 return NULL; 7947 7948 field = btf_record_find(rec, off, fields); 7949 if (!field) 7950 return NULL; 7951 7952 return field; 7953 } 7954 7955 int check_func_arg_reg_off(struct bpf_verifier_env *env, 7956 const struct bpf_reg_state *reg, int regno, 7957 enum bpf_arg_type arg_type) 7958 { 7959 u32 type = reg->type; 7960 7961 /* When referenced register is passed to release function, its fixed 7962 * offset must be 0. 7963 * 7964 * We will check arg_type_is_release reg has ref_obj_id when storing 7965 * meta->release_regno. 7966 */ 7967 if (arg_type_is_release(arg_type)) { 7968 /* ARG_PTR_TO_DYNPTR with OBJ_RELEASE is a bit special, as it 7969 * may not directly point to the object being released, but to 7970 * dynptr pointing to such object, which might be at some offset 7971 * on the stack. In that case, we simply to fallback to the 7972 * default handling. 7973 */ 7974 if (arg_type_is_dynptr(arg_type) && type == PTR_TO_STACK) 7975 return 0; 7976 7977 /* Doing check_ptr_off_reg check for the offset will catch this 7978 * because fixed_off_ok is false, but checking here allows us 7979 * to give the user a better error message. 7980 */ 7981 if (reg->off) { 7982 verbose(env, "R%d must have zero offset when passed to release func or trusted arg to kfunc\n", 7983 regno); 7984 return -EINVAL; 7985 } 7986 return __check_ptr_off_reg(env, reg, regno, false); 7987 } 7988 7989 switch (type) { 7990 /* Pointer types where both fixed and variable offset is explicitly allowed: */ 7991 case PTR_TO_STACK: 7992 case PTR_TO_PACKET: 7993 case PTR_TO_PACKET_META: 7994 case PTR_TO_MAP_KEY: 7995 case PTR_TO_MAP_VALUE: 7996 case PTR_TO_MEM: 7997 case PTR_TO_MEM | MEM_RDONLY: 7998 case PTR_TO_MEM | MEM_RINGBUF: 7999 case PTR_TO_BUF: 8000 case PTR_TO_BUF | MEM_RDONLY: 8001 case SCALAR_VALUE: 8002 return 0; 8003 /* All the rest must be rejected, except PTR_TO_BTF_ID which allows 8004 * fixed offset. 8005 */ 8006 case PTR_TO_BTF_ID: 8007 case PTR_TO_BTF_ID | MEM_ALLOC: 8008 case PTR_TO_BTF_ID | PTR_TRUSTED: 8009 case PTR_TO_BTF_ID | MEM_RCU: 8010 case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF: 8011 case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF | MEM_RCU: 8012 /* When referenced PTR_TO_BTF_ID is passed to release function, 8013 * its fixed offset must be 0. In the other cases, fixed offset 8014 * can be non-zero. This was already checked above. So pass 8015 * fixed_off_ok as true to allow fixed offset for all other 8016 * cases. var_off always must be 0 for PTR_TO_BTF_ID, hence we 8017 * still need to do checks instead of returning. 8018 */ 8019 return __check_ptr_off_reg(env, reg, regno, true); 8020 default: 8021 return __check_ptr_off_reg(env, reg, regno, false); 8022 } 8023 } 8024 8025 static struct bpf_reg_state *get_dynptr_arg_reg(struct bpf_verifier_env *env, 8026 const struct bpf_func_proto *fn, 8027 struct bpf_reg_state *regs) 8028 { 8029 struct bpf_reg_state *state = NULL; 8030 int i; 8031 8032 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) 8033 if (arg_type_is_dynptr(fn->arg_type[i])) { 8034 if (state) { 8035 verbose(env, "verifier internal error: multiple dynptr args\n"); 8036 return NULL; 8037 } 8038 state = ®s[BPF_REG_1 + i]; 8039 } 8040 8041 if (!state) 8042 verbose(env, "verifier internal error: no dynptr arg found\n"); 8043 8044 return state; 8045 } 8046 8047 static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 8048 { 8049 struct bpf_func_state *state = func(env, reg); 8050 int spi; 8051 8052 if (reg->type == CONST_PTR_TO_DYNPTR) 8053 return reg->id; 8054 spi = dynptr_get_spi(env, reg); 8055 if (spi < 0) 8056 return spi; 8057 return state->stack[spi].spilled_ptr.id; 8058 } 8059 8060 static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 8061 { 8062 struct bpf_func_state *state = func(env, reg); 8063 int spi; 8064 8065 if (reg->type == CONST_PTR_TO_DYNPTR) 8066 return reg->ref_obj_id; 8067 spi = dynptr_get_spi(env, reg); 8068 if (spi < 0) 8069 return spi; 8070 return state->stack[spi].spilled_ptr.ref_obj_id; 8071 } 8072 8073 static enum bpf_dynptr_type dynptr_get_type(struct bpf_verifier_env *env, 8074 struct bpf_reg_state *reg) 8075 { 8076 struct bpf_func_state *state = func(env, reg); 8077 int spi; 8078 8079 if (reg->type == CONST_PTR_TO_DYNPTR) 8080 return reg->dynptr.type; 8081 8082 spi = __get_spi(reg->off); 8083 if (spi < 0) { 8084 verbose(env, "verifier internal error: invalid spi when querying dynptr type\n"); 8085 return BPF_DYNPTR_TYPE_INVALID; 8086 } 8087 8088 return state->stack[spi].spilled_ptr.dynptr.type; 8089 } 8090 8091 static int check_func_arg(struct bpf_verifier_env *env, u32 arg, 8092 struct bpf_call_arg_meta *meta, 8093 const struct bpf_func_proto *fn, 8094 int insn_idx) 8095 { 8096 u32 regno = BPF_REG_1 + arg; 8097 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 8098 enum bpf_arg_type arg_type = fn->arg_type[arg]; 8099 enum bpf_reg_type type = reg->type; 8100 u32 *arg_btf_id = NULL; 8101 int err = 0; 8102 8103 if (arg_type == ARG_DONTCARE) 8104 return 0; 8105 8106 err = check_reg_arg(env, regno, SRC_OP); 8107 if (err) 8108 return err; 8109 8110 if (arg_type == ARG_ANYTHING) { 8111 if (is_pointer_value(env, regno)) { 8112 verbose(env, "R%d leaks addr into helper function\n", 8113 regno); 8114 return -EACCES; 8115 } 8116 return 0; 8117 } 8118 8119 if (type_is_pkt_pointer(type) && 8120 !may_access_direct_pkt_data(env, meta, BPF_READ)) { 8121 verbose(env, "helper access to the packet is not allowed\n"); 8122 return -EACCES; 8123 } 8124 8125 if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) { 8126 err = resolve_map_arg_type(env, meta, &arg_type); 8127 if (err) 8128 return err; 8129 } 8130 8131 if (register_is_null(reg) && type_may_be_null(arg_type)) 8132 /* A NULL register has a SCALAR_VALUE type, so skip 8133 * type checking. 8134 */ 8135 goto skip_type_check; 8136 8137 /* arg_btf_id and arg_size are in a union. */ 8138 if (base_type(arg_type) == ARG_PTR_TO_BTF_ID || 8139 base_type(arg_type) == ARG_PTR_TO_SPIN_LOCK) 8140 arg_btf_id = fn->arg_btf_id[arg]; 8141 8142 err = check_reg_type(env, regno, arg_type, arg_btf_id, meta); 8143 if (err) 8144 return err; 8145 8146 err = check_func_arg_reg_off(env, reg, regno, arg_type); 8147 if (err) 8148 return err; 8149 8150 skip_type_check: 8151 if (arg_type_is_release(arg_type)) { 8152 if (arg_type_is_dynptr(arg_type)) { 8153 struct bpf_func_state *state = func(env, reg); 8154 int spi; 8155 8156 /* Only dynptr created on stack can be released, thus 8157 * the get_spi and stack state checks for spilled_ptr 8158 * should only be done before process_dynptr_func for 8159 * PTR_TO_STACK. 8160 */ 8161 if (reg->type == PTR_TO_STACK) { 8162 spi = dynptr_get_spi(env, reg); 8163 if (spi < 0 || !state->stack[spi].spilled_ptr.ref_obj_id) { 8164 verbose(env, "arg %d is an unacquired reference\n", regno); 8165 return -EINVAL; 8166 } 8167 } else { 8168 verbose(env, "cannot release unowned const bpf_dynptr\n"); 8169 return -EINVAL; 8170 } 8171 } else if (!reg->ref_obj_id && !register_is_null(reg)) { 8172 verbose(env, "R%d must be referenced when passed to release function\n", 8173 regno); 8174 return -EINVAL; 8175 } 8176 if (meta->release_regno) { 8177 verbose(env, "verifier internal error: more than one release argument\n"); 8178 return -EFAULT; 8179 } 8180 meta->release_regno = regno; 8181 } 8182 8183 if (reg->ref_obj_id) { 8184 if (meta->ref_obj_id) { 8185 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 8186 regno, reg->ref_obj_id, 8187 meta->ref_obj_id); 8188 return -EFAULT; 8189 } 8190 meta->ref_obj_id = reg->ref_obj_id; 8191 } 8192 8193 switch (base_type(arg_type)) { 8194 case ARG_CONST_MAP_PTR: 8195 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 8196 if (meta->map_ptr) { 8197 /* Use map_uid (which is unique id of inner map) to reject: 8198 * inner_map1 = bpf_map_lookup_elem(outer_map, key1) 8199 * inner_map2 = bpf_map_lookup_elem(outer_map, key2) 8200 * if (inner_map1 && inner_map2) { 8201 * timer = bpf_map_lookup_elem(inner_map1); 8202 * if (timer) 8203 * // mismatch would have been allowed 8204 * bpf_timer_init(timer, inner_map2); 8205 * } 8206 * 8207 * Comparing map_ptr is enough to distinguish normal and outer maps. 8208 */ 8209 if (meta->map_ptr != reg->map_ptr || 8210 meta->map_uid != reg->map_uid) { 8211 verbose(env, 8212 "timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n", 8213 meta->map_uid, reg->map_uid); 8214 return -EINVAL; 8215 } 8216 } 8217 meta->map_ptr = reg->map_ptr; 8218 meta->map_uid = reg->map_uid; 8219 break; 8220 case ARG_PTR_TO_MAP_KEY: 8221 /* bpf_map_xxx(..., map_ptr, ..., key) call: 8222 * check that [key, key + map->key_size) are within 8223 * stack limits and initialized 8224 */ 8225 if (!meta->map_ptr) { 8226 /* in function declaration map_ptr must come before 8227 * map_key, so that it's verified and known before 8228 * we have to check map_key here. Otherwise it means 8229 * that kernel subsystem misconfigured verifier 8230 */ 8231 verbose(env, "invalid map_ptr to access map->key\n"); 8232 return -EACCES; 8233 } 8234 err = check_helper_mem_access(env, regno, 8235 meta->map_ptr->key_size, false, 8236 NULL); 8237 break; 8238 case ARG_PTR_TO_MAP_VALUE: 8239 if (type_may_be_null(arg_type) && register_is_null(reg)) 8240 return 0; 8241 8242 /* bpf_map_xxx(..., map_ptr, ..., value) call: 8243 * check [value, value + map->value_size) validity 8244 */ 8245 if (!meta->map_ptr) { 8246 /* kernel subsystem misconfigured verifier */ 8247 verbose(env, "invalid map_ptr to access map->value\n"); 8248 return -EACCES; 8249 } 8250 meta->raw_mode = arg_type & MEM_UNINIT; 8251 err = check_helper_mem_access(env, regno, 8252 meta->map_ptr->value_size, false, 8253 meta); 8254 break; 8255 case ARG_PTR_TO_PERCPU_BTF_ID: 8256 if (!reg->btf_id) { 8257 verbose(env, "Helper has invalid btf_id in R%d\n", regno); 8258 return -EACCES; 8259 } 8260 meta->ret_btf = reg->btf; 8261 meta->ret_btf_id = reg->btf_id; 8262 break; 8263 case ARG_PTR_TO_SPIN_LOCK: 8264 if (in_rbtree_lock_required_cb(env)) { 8265 verbose(env, "can't spin_{lock,unlock} in rbtree cb\n"); 8266 return -EACCES; 8267 } 8268 if (meta->func_id == BPF_FUNC_spin_lock) { 8269 err = process_spin_lock(env, regno, true); 8270 if (err) 8271 return err; 8272 } else if (meta->func_id == BPF_FUNC_spin_unlock) { 8273 err = process_spin_lock(env, regno, false); 8274 if (err) 8275 return err; 8276 } else { 8277 verbose(env, "verifier internal error\n"); 8278 return -EFAULT; 8279 } 8280 break; 8281 case ARG_PTR_TO_TIMER: 8282 err = process_timer_func(env, regno, meta); 8283 if (err) 8284 return err; 8285 break; 8286 case ARG_PTR_TO_FUNC: 8287 meta->subprogno = reg->subprogno; 8288 break; 8289 case ARG_PTR_TO_MEM: 8290 /* The access to this pointer is only checked when we hit the 8291 * next is_mem_size argument below. 8292 */ 8293 meta->raw_mode = arg_type & MEM_UNINIT; 8294 if (arg_type & MEM_FIXED_SIZE) { 8295 err = check_helper_mem_access(env, regno, 8296 fn->arg_size[arg], false, 8297 meta); 8298 } 8299 break; 8300 case ARG_CONST_SIZE: 8301 err = check_mem_size_reg(env, reg, regno, false, meta); 8302 break; 8303 case ARG_CONST_SIZE_OR_ZERO: 8304 err = check_mem_size_reg(env, reg, regno, true, meta); 8305 break; 8306 case ARG_PTR_TO_DYNPTR: 8307 err = process_dynptr_func(env, regno, insn_idx, arg_type, 0); 8308 if (err) 8309 return err; 8310 break; 8311 case ARG_CONST_ALLOC_SIZE_OR_ZERO: 8312 if (!tnum_is_const(reg->var_off)) { 8313 verbose(env, "R%d is not a known constant'\n", 8314 regno); 8315 return -EACCES; 8316 } 8317 meta->mem_size = reg->var_off.value; 8318 err = mark_chain_precision(env, regno); 8319 if (err) 8320 return err; 8321 break; 8322 case ARG_PTR_TO_INT: 8323 case ARG_PTR_TO_LONG: 8324 { 8325 int size = int_ptr_type_to_size(arg_type); 8326 8327 err = check_helper_mem_access(env, regno, size, false, meta); 8328 if (err) 8329 return err; 8330 err = check_ptr_alignment(env, reg, 0, size, true); 8331 break; 8332 } 8333 case ARG_PTR_TO_CONST_STR: 8334 { 8335 struct bpf_map *map = reg->map_ptr; 8336 int map_off; 8337 u64 map_addr; 8338 char *str_ptr; 8339 8340 if (!bpf_map_is_rdonly(map)) { 8341 verbose(env, "R%d does not point to a readonly map'\n", regno); 8342 return -EACCES; 8343 } 8344 8345 if (!tnum_is_const(reg->var_off)) { 8346 verbose(env, "R%d is not a constant address'\n", regno); 8347 return -EACCES; 8348 } 8349 8350 if (!map->ops->map_direct_value_addr) { 8351 verbose(env, "no direct value access support for this map type\n"); 8352 return -EACCES; 8353 } 8354 8355 err = check_map_access(env, regno, reg->off, 8356 map->value_size - reg->off, false, 8357 ACCESS_HELPER); 8358 if (err) 8359 return err; 8360 8361 map_off = reg->off + reg->var_off.value; 8362 err = map->ops->map_direct_value_addr(map, &map_addr, map_off); 8363 if (err) { 8364 verbose(env, "direct value access on string failed\n"); 8365 return err; 8366 } 8367 8368 str_ptr = (char *)(long)(map_addr); 8369 if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) { 8370 verbose(env, "string is not zero-terminated\n"); 8371 return -EINVAL; 8372 } 8373 break; 8374 } 8375 case ARG_PTR_TO_KPTR: 8376 err = process_kptr_func(env, regno, meta); 8377 if (err) 8378 return err; 8379 break; 8380 } 8381 8382 return err; 8383 } 8384 8385 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id) 8386 { 8387 enum bpf_attach_type eatype = env->prog->expected_attach_type; 8388 enum bpf_prog_type type = resolve_prog_type(env->prog); 8389 8390 if (func_id != BPF_FUNC_map_update_elem) 8391 return false; 8392 8393 /* It's not possible to get access to a locked struct sock in these 8394 * contexts, so updating is safe. 8395 */ 8396 switch (type) { 8397 case BPF_PROG_TYPE_TRACING: 8398 if (eatype == BPF_TRACE_ITER) 8399 return true; 8400 break; 8401 case BPF_PROG_TYPE_SOCKET_FILTER: 8402 case BPF_PROG_TYPE_SCHED_CLS: 8403 case BPF_PROG_TYPE_SCHED_ACT: 8404 case BPF_PROG_TYPE_XDP: 8405 case BPF_PROG_TYPE_SK_REUSEPORT: 8406 case BPF_PROG_TYPE_FLOW_DISSECTOR: 8407 case BPF_PROG_TYPE_SK_LOOKUP: 8408 return true; 8409 default: 8410 break; 8411 } 8412 8413 verbose(env, "cannot update sockmap in this context\n"); 8414 return false; 8415 } 8416 8417 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env) 8418 { 8419 return env->prog->jit_requested && 8420 bpf_jit_supports_subprog_tailcalls(); 8421 } 8422 8423 static int check_map_func_compatibility(struct bpf_verifier_env *env, 8424 struct bpf_map *map, int func_id) 8425 { 8426 if (!map) 8427 return 0; 8428 8429 /* We need a two way check, first is from map perspective ... */ 8430 switch (map->map_type) { 8431 case BPF_MAP_TYPE_PROG_ARRAY: 8432 if (func_id != BPF_FUNC_tail_call) 8433 goto error; 8434 break; 8435 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 8436 if (func_id != BPF_FUNC_perf_event_read && 8437 func_id != BPF_FUNC_perf_event_output && 8438 func_id != BPF_FUNC_skb_output && 8439 func_id != BPF_FUNC_perf_event_read_value && 8440 func_id != BPF_FUNC_xdp_output) 8441 goto error; 8442 break; 8443 case BPF_MAP_TYPE_RINGBUF: 8444 if (func_id != BPF_FUNC_ringbuf_output && 8445 func_id != BPF_FUNC_ringbuf_reserve && 8446 func_id != BPF_FUNC_ringbuf_query && 8447 func_id != BPF_FUNC_ringbuf_reserve_dynptr && 8448 func_id != BPF_FUNC_ringbuf_submit_dynptr && 8449 func_id != BPF_FUNC_ringbuf_discard_dynptr) 8450 goto error; 8451 break; 8452 case BPF_MAP_TYPE_USER_RINGBUF: 8453 if (func_id != BPF_FUNC_user_ringbuf_drain) 8454 goto error; 8455 break; 8456 case BPF_MAP_TYPE_STACK_TRACE: 8457 if (func_id != BPF_FUNC_get_stackid) 8458 goto error; 8459 break; 8460 case BPF_MAP_TYPE_CGROUP_ARRAY: 8461 if (func_id != BPF_FUNC_skb_under_cgroup && 8462 func_id != BPF_FUNC_current_task_under_cgroup) 8463 goto error; 8464 break; 8465 case BPF_MAP_TYPE_CGROUP_STORAGE: 8466 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 8467 if (func_id != BPF_FUNC_get_local_storage) 8468 goto error; 8469 break; 8470 case BPF_MAP_TYPE_DEVMAP: 8471 case BPF_MAP_TYPE_DEVMAP_HASH: 8472 if (func_id != BPF_FUNC_redirect_map && 8473 func_id != BPF_FUNC_map_lookup_elem) 8474 goto error; 8475 break; 8476 /* Restrict bpf side of cpumap and xskmap, open when use-cases 8477 * appear. 8478 */ 8479 case BPF_MAP_TYPE_CPUMAP: 8480 if (func_id != BPF_FUNC_redirect_map) 8481 goto error; 8482 break; 8483 case BPF_MAP_TYPE_XSKMAP: 8484 if (func_id != BPF_FUNC_redirect_map && 8485 func_id != BPF_FUNC_map_lookup_elem) 8486 goto error; 8487 break; 8488 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 8489 case BPF_MAP_TYPE_HASH_OF_MAPS: 8490 if (func_id != BPF_FUNC_map_lookup_elem) 8491 goto error; 8492 break; 8493 case BPF_MAP_TYPE_SOCKMAP: 8494 if (func_id != BPF_FUNC_sk_redirect_map && 8495 func_id != BPF_FUNC_sock_map_update && 8496 func_id != BPF_FUNC_map_delete_elem && 8497 func_id != BPF_FUNC_msg_redirect_map && 8498 func_id != BPF_FUNC_sk_select_reuseport && 8499 func_id != BPF_FUNC_map_lookup_elem && 8500 !may_update_sockmap(env, func_id)) 8501 goto error; 8502 break; 8503 case BPF_MAP_TYPE_SOCKHASH: 8504 if (func_id != BPF_FUNC_sk_redirect_hash && 8505 func_id != BPF_FUNC_sock_hash_update && 8506 func_id != BPF_FUNC_map_delete_elem && 8507 func_id != BPF_FUNC_msg_redirect_hash && 8508 func_id != BPF_FUNC_sk_select_reuseport && 8509 func_id != BPF_FUNC_map_lookup_elem && 8510 !may_update_sockmap(env, func_id)) 8511 goto error; 8512 break; 8513 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 8514 if (func_id != BPF_FUNC_sk_select_reuseport) 8515 goto error; 8516 break; 8517 case BPF_MAP_TYPE_QUEUE: 8518 case BPF_MAP_TYPE_STACK: 8519 if (func_id != BPF_FUNC_map_peek_elem && 8520 func_id != BPF_FUNC_map_pop_elem && 8521 func_id != BPF_FUNC_map_push_elem) 8522 goto error; 8523 break; 8524 case BPF_MAP_TYPE_SK_STORAGE: 8525 if (func_id != BPF_FUNC_sk_storage_get && 8526 func_id != BPF_FUNC_sk_storage_delete && 8527 func_id != BPF_FUNC_kptr_xchg) 8528 goto error; 8529 break; 8530 case BPF_MAP_TYPE_INODE_STORAGE: 8531 if (func_id != BPF_FUNC_inode_storage_get && 8532 func_id != BPF_FUNC_inode_storage_delete && 8533 func_id != BPF_FUNC_kptr_xchg) 8534 goto error; 8535 break; 8536 case BPF_MAP_TYPE_TASK_STORAGE: 8537 if (func_id != BPF_FUNC_task_storage_get && 8538 func_id != BPF_FUNC_task_storage_delete && 8539 func_id != BPF_FUNC_kptr_xchg) 8540 goto error; 8541 break; 8542 case BPF_MAP_TYPE_CGRP_STORAGE: 8543 if (func_id != BPF_FUNC_cgrp_storage_get && 8544 func_id != BPF_FUNC_cgrp_storage_delete && 8545 func_id != BPF_FUNC_kptr_xchg) 8546 goto error; 8547 break; 8548 case BPF_MAP_TYPE_BLOOM_FILTER: 8549 if (func_id != BPF_FUNC_map_peek_elem && 8550 func_id != BPF_FUNC_map_push_elem) 8551 goto error; 8552 break; 8553 default: 8554 break; 8555 } 8556 8557 /* ... and second from the function itself. */ 8558 switch (func_id) { 8559 case BPF_FUNC_tail_call: 8560 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 8561 goto error; 8562 if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) { 8563 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); 8564 return -EINVAL; 8565 } 8566 break; 8567 case BPF_FUNC_perf_event_read: 8568 case BPF_FUNC_perf_event_output: 8569 case BPF_FUNC_perf_event_read_value: 8570 case BPF_FUNC_skb_output: 8571 case BPF_FUNC_xdp_output: 8572 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 8573 goto error; 8574 break; 8575 case BPF_FUNC_ringbuf_output: 8576 case BPF_FUNC_ringbuf_reserve: 8577 case BPF_FUNC_ringbuf_query: 8578 case BPF_FUNC_ringbuf_reserve_dynptr: 8579 case BPF_FUNC_ringbuf_submit_dynptr: 8580 case BPF_FUNC_ringbuf_discard_dynptr: 8581 if (map->map_type != BPF_MAP_TYPE_RINGBUF) 8582 goto error; 8583 break; 8584 case BPF_FUNC_user_ringbuf_drain: 8585 if (map->map_type != BPF_MAP_TYPE_USER_RINGBUF) 8586 goto error; 8587 break; 8588 case BPF_FUNC_get_stackid: 8589 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 8590 goto error; 8591 break; 8592 case BPF_FUNC_current_task_under_cgroup: 8593 case BPF_FUNC_skb_under_cgroup: 8594 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 8595 goto error; 8596 break; 8597 case BPF_FUNC_redirect_map: 8598 if (map->map_type != BPF_MAP_TYPE_DEVMAP && 8599 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && 8600 map->map_type != BPF_MAP_TYPE_CPUMAP && 8601 map->map_type != BPF_MAP_TYPE_XSKMAP) 8602 goto error; 8603 break; 8604 case BPF_FUNC_sk_redirect_map: 8605 case BPF_FUNC_msg_redirect_map: 8606 case BPF_FUNC_sock_map_update: 8607 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) 8608 goto error; 8609 break; 8610 case BPF_FUNC_sk_redirect_hash: 8611 case BPF_FUNC_msg_redirect_hash: 8612 case BPF_FUNC_sock_hash_update: 8613 if (map->map_type != BPF_MAP_TYPE_SOCKHASH) 8614 goto error; 8615 break; 8616 case BPF_FUNC_get_local_storage: 8617 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 8618 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 8619 goto error; 8620 break; 8621 case BPF_FUNC_sk_select_reuseport: 8622 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && 8623 map->map_type != BPF_MAP_TYPE_SOCKMAP && 8624 map->map_type != BPF_MAP_TYPE_SOCKHASH) 8625 goto error; 8626 break; 8627 case BPF_FUNC_map_pop_elem: 8628 if (map->map_type != BPF_MAP_TYPE_QUEUE && 8629 map->map_type != BPF_MAP_TYPE_STACK) 8630 goto error; 8631 break; 8632 case BPF_FUNC_map_peek_elem: 8633 case BPF_FUNC_map_push_elem: 8634 if (map->map_type != BPF_MAP_TYPE_QUEUE && 8635 map->map_type != BPF_MAP_TYPE_STACK && 8636 map->map_type != BPF_MAP_TYPE_BLOOM_FILTER) 8637 goto error; 8638 break; 8639 case BPF_FUNC_map_lookup_percpu_elem: 8640 if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && 8641 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 8642 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH) 8643 goto error; 8644 break; 8645 case BPF_FUNC_sk_storage_get: 8646 case BPF_FUNC_sk_storage_delete: 8647 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) 8648 goto error; 8649 break; 8650 case BPF_FUNC_inode_storage_get: 8651 case BPF_FUNC_inode_storage_delete: 8652 if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE) 8653 goto error; 8654 break; 8655 case BPF_FUNC_task_storage_get: 8656 case BPF_FUNC_task_storage_delete: 8657 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) 8658 goto error; 8659 break; 8660 case BPF_FUNC_cgrp_storage_get: 8661 case BPF_FUNC_cgrp_storage_delete: 8662 if (map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) 8663 goto error; 8664 break; 8665 default: 8666 break; 8667 } 8668 8669 return 0; 8670 error: 8671 verbose(env, "cannot pass map_type %d into func %s#%d\n", 8672 map->map_type, func_id_name(func_id), func_id); 8673 return -EINVAL; 8674 } 8675 8676 static bool check_raw_mode_ok(const struct bpf_func_proto *fn) 8677 { 8678 int count = 0; 8679 8680 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) 8681 count++; 8682 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) 8683 count++; 8684 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) 8685 count++; 8686 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) 8687 count++; 8688 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) 8689 count++; 8690 8691 /* We only support one arg being in raw mode at the moment, 8692 * which is sufficient for the helper functions we have 8693 * right now. 8694 */ 8695 return count <= 1; 8696 } 8697 8698 static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg) 8699 { 8700 bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE; 8701 bool has_size = fn->arg_size[arg] != 0; 8702 bool is_next_size = false; 8703 8704 if (arg + 1 < ARRAY_SIZE(fn->arg_type)) 8705 is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]); 8706 8707 if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM) 8708 return is_next_size; 8709 8710 return has_size == is_next_size || is_next_size == is_fixed; 8711 } 8712 8713 static bool check_arg_pair_ok(const struct bpf_func_proto *fn) 8714 { 8715 /* bpf_xxx(..., buf, len) call will access 'len' 8716 * bytes from memory 'buf'. Both arg types need 8717 * to be paired, so make sure there's no buggy 8718 * helper function specification. 8719 */ 8720 if (arg_type_is_mem_size(fn->arg1_type) || 8721 check_args_pair_invalid(fn, 0) || 8722 check_args_pair_invalid(fn, 1) || 8723 check_args_pair_invalid(fn, 2) || 8724 check_args_pair_invalid(fn, 3) || 8725 check_args_pair_invalid(fn, 4)) 8726 return false; 8727 8728 return true; 8729 } 8730 8731 static bool check_btf_id_ok(const struct bpf_func_proto *fn) 8732 { 8733 int i; 8734 8735 for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) { 8736 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID) 8737 return !!fn->arg_btf_id[i]; 8738 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_SPIN_LOCK) 8739 return fn->arg_btf_id[i] == BPF_PTR_POISON; 8740 if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] && 8741 /* arg_btf_id and arg_size are in a union. */ 8742 (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM || 8743 !(fn->arg_type[i] & MEM_FIXED_SIZE))) 8744 return false; 8745 } 8746 8747 return true; 8748 } 8749 8750 static int check_func_proto(const struct bpf_func_proto *fn, int func_id) 8751 { 8752 return check_raw_mode_ok(fn) && 8753 check_arg_pair_ok(fn) && 8754 check_btf_id_ok(fn) ? 0 : -EINVAL; 8755 } 8756 8757 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] 8758 * are now invalid, so turn them into unknown SCALAR_VALUE. 8759 * 8760 * This also applies to dynptr slices belonging to skb and xdp dynptrs, 8761 * since these slices point to packet data. 8762 */ 8763 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) 8764 { 8765 struct bpf_func_state *state; 8766 struct bpf_reg_state *reg; 8767 8768 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ 8769 if (reg_is_pkt_pointer_any(reg) || reg_is_dynptr_slice_pkt(reg)) 8770 mark_reg_invalid(env, reg); 8771 })); 8772 } 8773 8774 enum { 8775 AT_PKT_END = -1, 8776 BEYOND_PKT_END = -2, 8777 }; 8778 8779 static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open) 8780 { 8781 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 8782 struct bpf_reg_state *reg = &state->regs[regn]; 8783 8784 if (reg->type != PTR_TO_PACKET) 8785 /* PTR_TO_PACKET_META is not supported yet */ 8786 return; 8787 8788 /* The 'reg' is pkt > pkt_end or pkt >= pkt_end. 8789 * How far beyond pkt_end it goes is unknown. 8790 * if (!range_open) it's the case of pkt >= pkt_end 8791 * if (range_open) it's the case of pkt > pkt_end 8792 * hence this pointer is at least 1 byte bigger than pkt_end 8793 */ 8794 if (range_open) 8795 reg->range = BEYOND_PKT_END; 8796 else 8797 reg->range = AT_PKT_END; 8798 } 8799 8800 /* The pointer with the specified id has released its reference to kernel 8801 * resources. Identify all copies of the same pointer and clear the reference. 8802 */ 8803 static int release_reference(struct bpf_verifier_env *env, 8804 int ref_obj_id) 8805 { 8806 struct bpf_func_state *state; 8807 struct bpf_reg_state *reg; 8808 int err; 8809 8810 err = release_reference_state(cur_func(env), ref_obj_id); 8811 if (err) 8812 return err; 8813 8814 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ 8815 if (reg->ref_obj_id == ref_obj_id) 8816 mark_reg_invalid(env, reg); 8817 })); 8818 8819 return 0; 8820 } 8821 8822 static void invalidate_non_owning_refs(struct bpf_verifier_env *env) 8823 { 8824 struct bpf_func_state *unused; 8825 struct bpf_reg_state *reg; 8826 8827 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ 8828 if (type_is_non_owning_ref(reg->type)) 8829 mark_reg_invalid(env, reg); 8830 })); 8831 } 8832 8833 static void clear_caller_saved_regs(struct bpf_verifier_env *env, 8834 struct bpf_reg_state *regs) 8835 { 8836 int i; 8837 8838 /* after the call registers r0 - r5 were scratched */ 8839 for (i = 0; i < CALLER_SAVED_REGS; i++) { 8840 mark_reg_not_init(env, regs, caller_saved[i]); 8841 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 8842 } 8843 } 8844 8845 typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env, 8846 struct bpf_func_state *caller, 8847 struct bpf_func_state *callee, 8848 int insn_idx); 8849 8850 static int set_callee_state(struct bpf_verifier_env *env, 8851 struct bpf_func_state *caller, 8852 struct bpf_func_state *callee, int insn_idx); 8853 8854 static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 8855 int *insn_idx, int subprog, 8856 set_callee_state_fn set_callee_state_cb) 8857 { 8858 struct bpf_verifier_state *state = env->cur_state; 8859 struct bpf_func_state *caller, *callee; 8860 int err; 8861 8862 if (state->curframe + 1 >= MAX_CALL_FRAMES) { 8863 verbose(env, "the call stack of %d frames is too deep\n", 8864 state->curframe + 2); 8865 return -E2BIG; 8866 } 8867 8868 caller = state->frame[state->curframe]; 8869 if (state->frame[state->curframe + 1]) { 8870 verbose(env, "verifier bug. Frame %d already allocated\n", 8871 state->curframe + 1); 8872 return -EFAULT; 8873 } 8874 8875 err = btf_check_subprog_call(env, subprog, caller->regs); 8876 if (err == -EFAULT) 8877 return err; 8878 if (subprog_is_global(env, subprog)) { 8879 if (err) { 8880 verbose(env, "Caller passes invalid args into func#%d\n", 8881 subprog); 8882 return err; 8883 } else { 8884 if (env->log.level & BPF_LOG_LEVEL) 8885 verbose(env, 8886 "Func#%d is global and valid. Skipping.\n", 8887 subprog); 8888 clear_caller_saved_regs(env, caller->regs); 8889 8890 /* All global functions return a 64-bit SCALAR_VALUE */ 8891 mark_reg_unknown(env, caller->regs, BPF_REG_0); 8892 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 8893 8894 /* continue with next insn after call */ 8895 return 0; 8896 } 8897 } 8898 8899 /* set_callee_state is used for direct subprog calls, but we are 8900 * interested in validating only BPF helpers that can call subprogs as 8901 * callbacks 8902 */ 8903 if (set_callee_state_cb != set_callee_state) { 8904 if (bpf_pseudo_kfunc_call(insn) && 8905 !is_callback_calling_kfunc(insn->imm)) { 8906 verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n", 8907 func_id_name(insn->imm), insn->imm); 8908 return -EFAULT; 8909 } else if (!bpf_pseudo_kfunc_call(insn) && 8910 !is_callback_calling_function(insn->imm)) { /* helper */ 8911 verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n", 8912 func_id_name(insn->imm), insn->imm); 8913 return -EFAULT; 8914 } 8915 } 8916 8917 if (insn->code == (BPF_JMP | BPF_CALL) && 8918 insn->src_reg == 0 && 8919 insn->imm == BPF_FUNC_timer_set_callback) { 8920 struct bpf_verifier_state *async_cb; 8921 8922 /* there is no real recursion here. timer callbacks are async */ 8923 env->subprog_info[subprog].is_async_cb = true; 8924 async_cb = push_async_cb(env, env->subprog_info[subprog].start, 8925 *insn_idx, subprog); 8926 if (!async_cb) 8927 return -EFAULT; 8928 callee = async_cb->frame[0]; 8929 callee->async_entry_cnt = caller->async_entry_cnt + 1; 8930 8931 /* Convert bpf_timer_set_callback() args into timer callback args */ 8932 err = set_callee_state_cb(env, caller, callee, *insn_idx); 8933 if (err) 8934 return err; 8935 8936 clear_caller_saved_regs(env, caller->regs); 8937 mark_reg_unknown(env, caller->regs, BPF_REG_0); 8938 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 8939 /* continue with next insn after call */ 8940 return 0; 8941 } 8942 8943 callee = kzalloc(sizeof(*callee), GFP_KERNEL); 8944 if (!callee) 8945 return -ENOMEM; 8946 state->frame[state->curframe + 1] = callee; 8947 8948 /* callee cannot access r0, r6 - r9 for reading and has to write 8949 * into its own stack before reading from it. 8950 * callee can read/write into caller's stack 8951 */ 8952 init_func_state(env, callee, 8953 /* remember the callsite, it will be used by bpf_exit */ 8954 *insn_idx /* callsite */, 8955 state->curframe + 1 /* frameno within this callchain */, 8956 subprog /* subprog number within this prog */); 8957 8958 /* Transfer references to the callee */ 8959 err = copy_reference_state(callee, caller); 8960 if (err) 8961 goto err_out; 8962 8963 err = set_callee_state_cb(env, caller, callee, *insn_idx); 8964 if (err) 8965 goto err_out; 8966 8967 clear_caller_saved_regs(env, caller->regs); 8968 8969 /* only increment it after check_reg_arg() finished */ 8970 state->curframe++; 8971 8972 /* and go analyze first insn of the callee */ 8973 *insn_idx = env->subprog_info[subprog].start - 1; 8974 8975 if (env->log.level & BPF_LOG_LEVEL) { 8976 verbose(env, "caller:\n"); 8977 print_verifier_state(env, caller, true); 8978 verbose(env, "callee:\n"); 8979 print_verifier_state(env, callee, true); 8980 } 8981 return 0; 8982 8983 err_out: 8984 free_func_state(callee); 8985 state->frame[state->curframe + 1] = NULL; 8986 return err; 8987 } 8988 8989 int map_set_for_each_callback_args(struct bpf_verifier_env *env, 8990 struct bpf_func_state *caller, 8991 struct bpf_func_state *callee) 8992 { 8993 /* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, 8994 * void *callback_ctx, u64 flags); 8995 * callback_fn(struct bpf_map *map, void *key, void *value, 8996 * void *callback_ctx); 8997 */ 8998 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; 8999 9000 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; 9001 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); 9002 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr; 9003 9004 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; 9005 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); 9006 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr; 9007 9008 /* pointer to stack or null */ 9009 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3]; 9010 9011 /* unused */ 9012 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 9013 return 0; 9014 } 9015 9016 static int set_callee_state(struct bpf_verifier_env *env, 9017 struct bpf_func_state *caller, 9018 struct bpf_func_state *callee, int insn_idx) 9019 { 9020 int i; 9021 9022 /* copy r1 - r5 args that callee can access. The copy includes parent 9023 * pointers, which connects us up to the liveness chain 9024 */ 9025 for (i = BPF_REG_1; i <= BPF_REG_5; i++) 9026 callee->regs[i] = caller->regs[i]; 9027 return 0; 9028 } 9029 9030 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 9031 int *insn_idx) 9032 { 9033 int subprog, target_insn; 9034 9035 target_insn = *insn_idx + insn->imm + 1; 9036 subprog = find_subprog(env, target_insn); 9037 if (subprog < 0) { 9038 verbose(env, "verifier bug. No program starts at insn %d\n", 9039 target_insn); 9040 return -EFAULT; 9041 } 9042 9043 return __check_func_call(env, insn, insn_idx, subprog, set_callee_state); 9044 } 9045 9046 static int set_map_elem_callback_state(struct bpf_verifier_env *env, 9047 struct bpf_func_state *caller, 9048 struct bpf_func_state *callee, 9049 int insn_idx) 9050 { 9051 struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx]; 9052 struct bpf_map *map; 9053 int err; 9054 9055 if (bpf_map_ptr_poisoned(insn_aux)) { 9056 verbose(env, "tail_call abusing map_ptr\n"); 9057 return -EINVAL; 9058 } 9059 9060 map = BPF_MAP_PTR(insn_aux->map_ptr_state); 9061 if (!map->ops->map_set_for_each_callback_args || 9062 !map->ops->map_for_each_callback) { 9063 verbose(env, "callback function not allowed for map\n"); 9064 return -ENOTSUPP; 9065 } 9066 9067 err = map->ops->map_set_for_each_callback_args(env, caller, callee); 9068 if (err) 9069 return err; 9070 9071 callee->in_callback_fn = true; 9072 callee->callback_ret_range = tnum_range(0, 1); 9073 return 0; 9074 } 9075 9076 static int set_loop_callback_state(struct bpf_verifier_env *env, 9077 struct bpf_func_state *caller, 9078 struct bpf_func_state *callee, 9079 int insn_idx) 9080 { 9081 /* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, 9082 * u64 flags); 9083 * callback_fn(u32 index, void *callback_ctx); 9084 */ 9085 callee->regs[BPF_REG_1].type = SCALAR_VALUE; 9086 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; 9087 9088 /* unused */ 9089 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); 9090 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 9091 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 9092 9093 callee->in_callback_fn = true; 9094 callee->callback_ret_range = tnum_range(0, 1); 9095 return 0; 9096 } 9097 9098 static int set_timer_callback_state(struct bpf_verifier_env *env, 9099 struct bpf_func_state *caller, 9100 struct bpf_func_state *callee, 9101 int insn_idx) 9102 { 9103 struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr; 9104 9105 /* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn); 9106 * callback_fn(struct bpf_map *map, void *key, void *value); 9107 */ 9108 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; 9109 __mark_reg_known_zero(&callee->regs[BPF_REG_1]); 9110 callee->regs[BPF_REG_1].map_ptr = map_ptr; 9111 9112 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; 9113 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); 9114 callee->regs[BPF_REG_2].map_ptr = map_ptr; 9115 9116 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; 9117 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); 9118 callee->regs[BPF_REG_3].map_ptr = map_ptr; 9119 9120 /* unused */ 9121 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 9122 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 9123 callee->in_async_callback_fn = true; 9124 callee->callback_ret_range = tnum_range(0, 1); 9125 return 0; 9126 } 9127 9128 static int set_find_vma_callback_state(struct bpf_verifier_env *env, 9129 struct bpf_func_state *caller, 9130 struct bpf_func_state *callee, 9131 int insn_idx) 9132 { 9133 /* bpf_find_vma(struct task_struct *task, u64 addr, 9134 * void *callback_fn, void *callback_ctx, u64 flags) 9135 * (callback_fn)(struct task_struct *task, 9136 * struct vm_area_struct *vma, void *callback_ctx); 9137 */ 9138 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; 9139 9140 callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID; 9141 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); 9142 callee->regs[BPF_REG_2].btf = btf_vmlinux; 9143 callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA], 9144 9145 /* pointer to stack or null */ 9146 callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4]; 9147 9148 /* unused */ 9149 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 9150 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 9151 callee->in_callback_fn = true; 9152 callee->callback_ret_range = tnum_range(0, 1); 9153 return 0; 9154 } 9155 9156 static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env, 9157 struct bpf_func_state *caller, 9158 struct bpf_func_state *callee, 9159 int insn_idx) 9160 { 9161 /* bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void 9162 * callback_ctx, u64 flags); 9163 * callback_fn(const struct bpf_dynptr_t* dynptr, void *callback_ctx); 9164 */ 9165 __mark_reg_not_init(env, &callee->regs[BPF_REG_0]); 9166 mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL); 9167 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; 9168 9169 /* unused */ 9170 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); 9171 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 9172 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 9173 9174 callee->in_callback_fn = true; 9175 callee->callback_ret_range = tnum_range(0, 1); 9176 return 0; 9177 } 9178 9179 static int set_rbtree_add_callback_state(struct bpf_verifier_env *env, 9180 struct bpf_func_state *caller, 9181 struct bpf_func_state *callee, 9182 int insn_idx) 9183 { 9184 /* void bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, 9185 * bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b)); 9186 * 9187 * 'struct bpf_rb_node *node' arg to bpf_rbtree_add_impl is the same PTR_TO_BTF_ID w/ offset 9188 * that 'less' callback args will be receiving. However, 'node' arg was release_reference'd 9189 * by this point, so look at 'root' 9190 */ 9191 struct btf_field *field; 9192 9193 field = reg_find_field_offset(&caller->regs[BPF_REG_1], caller->regs[BPF_REG_1].off, 9194 BPF_RB_ROOT); 9195 if (!field || !field->graph_root.value_btf_id) 9196 return -EFAULT; 9197 9198 mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root); 9199 ref_set_non_owning(env, &callee->regs[BPF_REG_1]); 9200 mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root); 9201 ref_set_non_owning(env, &callee->regs[BPF_REG_2]); 9202 9203 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); 9204 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 9205 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 9206 callee->in_callback_fn = true; 9207 callee->callback_ret_range = tnum_range(0, 1); 9208 return 0; 9209 } 9210 9211 static bool is_rbtree_lock_required_kfunc(u32 btf_id); 9212 9213 /* Are we currently verifying the callback for a rbtree helper that must 9214 * be called with lock held? If so, no need to complain about unreleased 9215 * lock 9216 */ 9217 static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env) 9218 { 9219 struct bpf_verifier_state *state = env->cur_state; 9220 struct bpf_insn *insn = env->prog->insnsi; 9221 struct bpf_func_state *callee; 9222 int kfunc_btf_id; 9223 9224 if (!state->curframe) 9225 return false; 9226 9227 callee = state->frame[state->curframe]; 9228 9229 if (!callee->in_callback_fn) 9230 return false; 9231 9232 kfunc_btf_id = insn[callee->callsite].imm; 9233 return is_rbtree_lock_required_kfunc(kfunc_btf_id); 9234 } 9235 9236 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) 9237 { 9238 struct bpf_verifier_state *state = env->cur_state; 9239 struct bpf_func_state *caller, *callee; 9240 struct bpf_reg_state *r0; 9241 int err; 9242 9243 callee = state->frame[state->curframe]; 9244 r0 = &callee->regs[BPF_REG_0]; 9245 if (r0->type == PTR_TO_STACK) { 9246 /* technically it's ok to return caller's stack pointer 9247 * (or caller's caller's pointer) back to the caller, 9248 * since these pointers are valid. Only current stack 9249 * pointer will be invalid as soon as function exits, 9250 * but let's be conservative 9251 */ 9252 verbose(env, "cannot return stack pointer to the caller\n"); 9253 return -EINVAL; 9254 } 9255 9256 caller = state->frame[state->curframe - 1]; 9257 if (callee->in_callback_fn) { 9258 /* enforce R0 return value range [0, 1]. */ 9259 struct tnum range = callee->callback_ret_range; 9260 9261 if (r0->type != SCALAR_VALUE) { 9262 verbose(env, "R0 not a scalar value\n"); 9263 return -EACCES; 9264 } 9265 if (!tnum_in(range, r0->var_off)) { 9266 verbose_invalid_scalar(env, r0, &range, "callback return", "R0"); 9267 return -EINVAL; 9268 } 9269 } else { 9270 /* return to the caller whatever r0 had in the callee */ 9271 caller->regs[BPF_REG_0] = *r0; 9272 } 9273 9274 /* callback_fn frame should have released its own additions to parent's 9275 * reference state at this point, or check_reference_leak would 9276 * complain, hence it must be the same as the caller. There is no need 9277 * to copy it back. 9278 */ 9279 if (!callee->in_callback_fn) { 9280 /* Transfer references to the caller */ 9281 err = copy_reference_state(caller, callee); 9282 if (err) 9283 return err; 9284 } 9285 9286 *insn_idx = callee->callsite + 1; 9287 if (env->log.level & BPF_LOG_LEVEL) { 9288 verbose(env, "returning from callee:\n"); 9289 print_verifier_state(env, callee, true); 9290 verbose(env, "to caller at %d:\n", *insn_idx); 9291 print_verifier_state(env, caller, true); 9292 } 9293 /* clear everything in the callee */ 9294 free_func_state(callee); 9295 state->frame[state->curframe--] = NULL; 9296 return 0; 9297 } 9298 9299 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, 9300 int func_id, 9301 struct bpf_call_arg_meta *meta) 9302 { 9303 struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; 9304 9305 if (ret_type != RET_INTEGER) 9306 return; 9307 9308 switch (func_id) { 9309 case BPF_FUNC_get_stack: 9310 case BPF_FUNC_get_task_stack: 9311 case BPF_FUNC_probe_read_str: 9312 case BPF_FUNC_probe_read_kernel_str: 9313 case BPF_FUNC_probe_read_user_str: 9314 ret_reg->smax_value = meta->msize_max_value; 9315 ret_reg->s32_max_value = meta->msize_max_value; 9316 ret_reg->smin_value = -MAX_ERRNO; 9317 ret_reg->s32_min_value = -MAX_ERRNO; 9318 reg_bounds_sync(ret_reg); 9319 break; 9320 case BPF_FUNC_get_smp_processor_id: 9321 ret_reg->umax_value = nr_cpu_ids - 1; 9322 ret_reg->u32_max_value = nr_cpu_ids - 1; 9323 ret_reg->smax_value = nr_cpu_ids - 1; 9324 ret_reg->s32_max_value = nr_cpu_ids - 1; 9325 ret_reg->umin_value = 0; 9326 ret_reg->u32_min_value = 0; 9327 ret_reg->smin_value = 0; 9328 ret_reg->s32_min_value = 0; 9329 reg_bounds_sync(ret_reg); 9330 break; 9331 } 9332 } 9333 9334 static int 9335 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 9336 int func_id, int insn_idx) 9337 { 9338 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 9339 struct bpf_map *map = meta->map_ptr; 9340 9341 if (func_id != BPF_FUNC_tail_call && 9342 func_id != BPF_FUNC_map_lookup_elem && 9343 func_id != BPF_FUNC_map_update_elem && 9344 func_id != BPF_FUNC_map_delete_elem && 9345 func_id != BPF_FUNC_map_push_elem && 9346 func_id != BPF_FUNC_map_pop_elem && 9347 func_id != BPF_FUNC_map_peek_elem && 9348 func_id != BPF_FUNC_for_each_map_elem && 9349 func_id != BPF_FUNC_redirect_map && 9350 func_id != BPF_FUNC_map_lookup_percpu_elem) 9351 return 0; 9352 9353 if (map == NULL) { 9354 verbose(env, "kernel subsystem misconfigured verifier\n"); 9355 return -EINVAL; 9356 } 9357 9358 /* In case of read-only, some additional restrictions 9359 * need to be applied in order to prevent altering the 9360 * state of the map from program side. 9361 */ 9362 if ((map->map_flags & BPF_F_RDONLY_PROG) && 9363 (func_id == BPF_FUNC_map_delete_elem || 9364 func_id == BPF_FUNC_map_update_elem || 9365 func_id == BPF_FUNC_map_push_elem || 9366 func_id == BPF_FUNC_map_pop_elem)) { 9367 verbose(env, "write into map forbidden\n"); 9368 return -EACCES; 9369 } 9370 9371 if (!BPF_MAP_PTR(aux->map_ptr_state)) 9372 bpf_map_ptr_store(aux, meta->map_ptr, 9373 !meta->map_ptr->bypass_spec_v1); 9374 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr) 9375 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, 9376 !meta->map_ptr->bypass_spec_v1); 9377 return 0; 9378 } 9379 9380 static int 9381 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 9382 int func_id, int insn_idx) 9383 { 9384 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 9385 struct bpf_reg_state *regs = cur_regs(env), *reg; 9386 struct bpf_map *map = meta->map_ptr; 9387 u64 val, max; 9388 int err; 9389 9390 if (func_id != BPF_FUNC_tail_call) 9391 return 0; 9392 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) { 9393 verbose(env, "kernel subsystem misconfigured verifier\n"); 9394 return -EINVAL; 9395 } 9396 9397 reg = ®s[BPF_REG_3]; 9398 val = reg->var_off.value; 9399 max = map->max_entries; 9400 9401 if (!(register_is_const(reg) && val < max)) { 9402 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 9403 return 0; 9404 } 9405 9406 err = mark_chain_precision(env, BPF_REG_3); 9407 if (err) 9408 return err; 9409 if (bpf_map_key_unseen(aux)) 9410 bpf_map_key_store(aux, val); 9411 else if (!bpf_map_key_poisoned(aux) && 9412 bpf_map_key_immediate(aux) != val) 9413 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 9414 return 0; 9415 } 9416 9417 static int check_reference_leak(struct bpf_verifier_env *env) 9418 { 9419 struct bpf_func_state *state = cur_func(env); 9420 bool refs_lingering = false; 9421 int i; 9422 9423 if (state->frameno && !state->in_callback_fn) 9424 return 0; 9425 9426 for (i = 0; i < state->acquired_refs; i++) { 9427 if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno) 9428 continue; 9429 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", 9430 state->refs[i].id, state->refs[i].insn_idx); 9431 refs_lingering = true; 9432 } 9433 return refs_lingering ? -EINVAL : 0; 9434 } 9435 9436 static int check_bpf_snprintf_call(struct bpf_verifier_env *env, 9437 struct bpf_reg_state *regs) 9438 { 9439 struct bpf_reg_state *fmt_reg = ®s[BPF_REG_3]; 9440 struct bpf_reg_state *data_len_reg = ®s[BPF_REG_5]; 9441 struct bpf_map *fmt_map = fmt_reg->map_ptr; 9442 struct bpf_bprintf_data data = {}; 9443 int err, fmt_map_off, num_args; 9444 u64 fmt_addr; 9445 char *fmt; 9446 9447 /* data must be an array of u64 */ 9448 if (data_len_reg->var_off.value % 8) 9449 return -EINVAL; 9450 num_args = data_len_reg->var_off.value / 8; 9451 9452 /* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const 9453 * and map_direct_value_addr is set. 9454 */ 9455 fmt_map_off = fmt_reg->off + fmt_reg->var_off.value; 9456 err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr, 9457 fmt_map_off); 9458 if (err) { 9459 verbose(env, "verifier bug\n"); 9460 return -EFAULT; 9461 } 9462 fmt = (char *)(long)fmt_addr + fmt_map_off; 9463 9464 /* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we 9465 * can focus on validating the format specifiers. 9466 */ 9467 err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, num_args, &data); 9468 if (err < 0) 9469 verbose(env, "Invalid format string\n"); 9470 9471 return err; 9472 } 9473 9474 static int check_get_func_ip(struct bpf_verifier_env *env) 9475 { 9476 enum bpf_prog_type type = resolve_prog_type(env->prog); 9477 int func_id = BPF_FUNC_get_func_ip; 9478 9479 if (type == BPF_PROG_TYPE_TRACING) { 9480 if (!bpf_prog_has_trampoline(env->prog)) { 9481 verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n", 9482 func_id_name(func_id), func_id); 9483 return -ENOTSUPP; 9484 } 9485 return 0; 9486 } else if (type == BPF_PROG_TYPE_KPROBE) { 9487 return 0; 9488 } 9489 9490 verbose(env, "func %s#%d not supported for program type %d\n", 9491 func_id_name(func_id), func_id, type); 9492 return -ENOTSUPP; 9493 } 9494 9495 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) 9496 { 9497 return &env->insn_aux_data[env->insn_idx]; 9498 } 9499 9500 static bool loop_flag_is_zero(struct bpf_verifier_env *env) 9501 { 9502 struct bpf_reg_state *regs = cur_regs(env); 9503 struct bpf_reg_state *reg = ®s[BPF_REG_4]; 9504 bool reg_is_null = register_is_null(reg); 9505 9506 if (reg_is_null) 9507 mark_chain_precision(env, BPF_REG_4); 9508 9509 return reg_is_null; 9510 } 9511 9512 static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno) 9513 { 9514 struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state; 9515 9516 if (!state->initialized) { 9517 state->initialized = 1; 9518 state->fit_for_inline = loop_flag_is_zero(env); 9519 state->callback_subprogno = subprogno; 9520 return; 9521 } 9522 9523 if (!state->fit_for_inline) 9524 return; 9525 9526 state->fit_for_inline = (loop_flag_is_zero(env) && 9527 state->callback_subprogno == subprogno); 9528 } 9529 9530 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 9531 int *insn_idx_p) 9532 { 9533 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 9534 const struct bpf_func_proto *fn = NULL; 9535 enum bpf_return_type ret_type; 9536 enum bpf_type_flag ret_flag; 9537 struct bpf_reg_state *regs; 9538 struct bpf_call_arg_meta meta; 9539 int insn_idx = *insn_idx_p; 9540 bool changes_data; 9541 int i, err, func_id; 9542 9543 /* find function prototype */ 9544 func_id = insn->imm; 9545 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 9546 verbose(env, "invalid func %s#%d\n", func_id_name(func_id), 9547 func_id); 9548 return -EINVAL; 9549 } 9550 9551 if (env->ops->get_func_proto) 9552 fn = env->ops->get_func_proto(func_id, env->prog); 9553 if (!fn) { 9554 verbose(env, "unknown func %s#%d\n", func_id_name(func_id), 9555 func_id); 9556 return -EINVAL; 9557 } 9558 9559 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 9560 if (!env->prog->gpl_compatible && fn->gpl_only) { 9561 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); 9562 return -EINVAL; 9563 } 9564 9565 if (fn->allowed && !fn->allowed(env->prog)) { 9566 verbose(env, "helper call is not allowed in probe\n"); 9567 return -EINVAL; 9568 } 9569 9570 if (!env->prog->aux->sleepable && fn->might_sleep) { 9571 verbose(env, "helper call might sleep in a non-sleepable prog\n"); 9572 return -EINVAL; 9573 } 9574 9575 /* With LD_ABS/IND some JITs save/restore skb from r1. */ 9576 changes_data = bpf_helper_changes_pkt_data(fn->func); 9577 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { 9578 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", 9579 func_id_name(func_id), func_id); 9580 return -EINVAL; 9581 } 9582 9583 memset(&meta, 0, sizeof(meta)); 9584 meta.pkt_access = fn->pkt_access; 9585 9586 err = check_func_proto(fn, func_id); 9587 if (err) { 9588 verbose(env, "kernel subsystem misconfigured func %s#%d\n", 9589 func_id_name(func_id), func_id); 9590 return err; 9591 } 9592 9593 if (env->cur_state->active_rcu_lock) { 9594 if (fn->might_sleep) { 9595 verbose(env, "sleepable helper %s#%d in rcu_read_lock region\n", 9596 func_id_name(func_id), func_id); 9597 return -EINVAL; 9598 } 9599 9600 if (env->prog->aux->sleepable && is_storage_get_function(func_id)) 9601 env->insn_aux_data[insn_idx].storage_get_func_atomic = true; 9602 } 9603 9604 meta.func_id = func_id; 9605 /* check args */ 9606 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) { 9607 err = check_func_arg(env, i, &meta, fn, insn_idx); 9608 if (err) 9609 return err; 9610 } 9611 9612 err = record_func_map(env, &meta, func_id, insn_idx); 9613 if (err) 9614 return err; 9615 9616 err = record_func_key(env, &meta, func_id, insn_idx); 9617 if (err) 9618 return err; 9619 9620 /* Mark slots with STACK_MISC in case of raw mode, stack offset 9621 * is inferred from register state. 9622 */ 9623 for (i = 0; i < meta.access_size; i++) { 9624 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, 9625 BPF_WRITE, -1, false, false); 9626 if (err) 9627 return err; 9628 } 9629 9630 regs = cur_regs(env); 9631 9632 if (meta.release_regno) { 9633 err = -EINVAL; 9634 /* This can only be set for PTR_TO_STACK, as CONST_PTR_TO_DYNPTR cannot 9635 * be released by any dynptr helper. Hence, unmark_stack_slots_dynptr 9636 * is safe to do directly. 9637 */ 9638 if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1])) { 9639 if (regs[meta.release_regno].type == CONST_PTR_TO_DYNPTR) { 9640 verbose(env, "verifier internal error: CONST_PTR_TO_DYNPTR cannot be released\n"); 9641 return -EFAULT; 9642 } 9643 err = unmark_stack_slots_dynptr(env, ®s[meta.release_regno]); 9644 } else if (meta.ref_obj_id) { 9645 err = release_reference(env, meta.ref_obj_id); 9646 } else if (register_is_null(®s[meta.release_regno])) { 9647 /* meta.ref_obj_id can only be 0 if register that is meant to be 9648 * released is NULL, which must be > R0. 9649 */ 9650 err = 0; 9651 } 9652 if (err) { 9653 verbose(env, "func %s#%d reference has not been acquired before\n", 9654 func_id_name(func_id), func_id); 9655 return err; 9656 } 9657 } 9658 9659 switch (func_id) { 9660 case BPF_FUNC_tail_call: 9661 err = check_reference_leak(env); 9662 if (err) { 9663 verbose(env, "tail_call would lead to reference leak\n"); 9664 return err; 9665 } 9666 break; 9667 case BPF_FUNC_get_local_storage: 9668 /* check that flags argument in get_local_storage(map, flags) is 0, 9669 * this is required because get_local_storage() can't return an error. 9670 */ 9671 if (!register_is_null(®s[BPF_REG_2])) { 9672 verbose(env, "get_local_storage() doesn't support non-zero flags\n"); 9673 return -EINVAL; 9674 } 9675 break; 9676 case BPF_FUNC_for_each_map_elem: 9677 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 9678 set_map_elem_callback_state); 9679 break; 9680 case BPF_FUNC_timer_set_callback: 9681 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 9682 set_timer_callback_state); 9683 break; 9684 case BPF_FUNC_find_vma: 9685 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 9686 set_find_vma_callback_state); 9687 break; 9688 case BPF_FUNC_snprintf: 9689 err = check_bpf_snprintf_call(env, regs); 9690 break; 9691 case BPF_FUNC_loop: 9692 update_loop_inline_state(env, meta.subprogno); 9693 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 9694 set_loop_callback_state); 9695 break; 9696 case BPF_FUNC_dynptr_from_mem: 9697 if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) { 9698 verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n", 9699 reg_type_str(env, regs[BPF_REG_1].type)); 9700 return -EACCES; 9701 } 9702 break; 9703 case BPF_FUNC_set_retval: 9704 if (prog_type == BPF_PROG_TYPE_LSM && 9705 env->prog->expected_attach_type == BPF_LSM_CGROUP) { 9706 if (!env->prog->aux->attach_func_proto->type) { 9707 /* Make sure programs that attach to void 9708 * hooks don't try to modify return value. 9709 */ 9710 verbose(env, "BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n"); 9711 return -EINVAL; 9712 } 9713 } 9714 break; 9715 case BPF_FUNC_dynptr_data: 9716 { 9717 struct bpf_reg_state *reg; 9718 int id, ref_obj_id; 9719 9720 reg = get_dynptr_arg_reg(env, fn, regs); 9721 if (!reg) 9722 return -EFAULT; 9723 9724 9725 if (meta.dynptr_id) { 9726 verbose(env, "verifier internal error: meta.dynptr_id already set\n"); 9727 return -EFAULT; 9728 } 9729 if (meta.ref_obj_id) { 9730 verbose(env, "verifier internal error: meta.ref_obj_id already set\n"); 9731 return -EFAULT; 9732 } 9733 9734 id = dynptr_id(env, reg); 9735 if (id < 0) { 9736 verbose(env, "verifier internal error: failed to obtain dynptr id\n"); 9737 return id; 9738 } 9739 9740 ref_obj_id = dynptr_ref_obj_id(env, reg); 9741 if (ref_obj_id < 0) { 9742 verbose(env, "verifier internal error: failed to obtain dynptr ref_obj_id\n"); 9743 return ref_obj_id; 9744 } 9745 9746 meta.dynptr_id = id; 9747 meta.ref_obj_id = ref_obj_id; 9748 9749 break; 9750 } 9751 case BPF_FUNC_dynptr_write: 9752 { 9753 enum bpf_dynptr_type dynptr_type; 9754 struct bpf_reg_state *reg; 9755 9756 reg = get_dynptr_arg_reg(env, fn, regs); 9757 if (!reg) 9758 return -EFAULT; 9759 9760 dynptr_type = dynptr_get_type(env, reg); 9761 if (dynptr_type == BPF_DYNPTR_TYPE_INVALID) 9762 return -EFAULT; 9763 9764 if (dynptr_type == BPF_DYNPTR_TYPE_SKB) 9765 /* this will trigger clear_all_pkt_pointers(), which will 9766 * invalidate all dynptr slices associated with the skb 9767 */ 9768 changes_data = true; 9769 9770 break; 9771 } 9772 case BPF_FUNC_user_ringbuf_drain: 9773 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 9774 set_user_ringbuf_callback_state); 9775 break; 9776 } 9777 9778 if (err) 9779 return err; 9780 9781 /* reset caller saved regs */ 9782 for (i = 0; i < CALLER_SAVED_REGS; i++) { 9783 mark_reg_not_init(env, regs, caller_saved[i]); 9784 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 9785 } 9786 9787 /* helper call returns 64-bit value. */ 9788 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 9789 9790 /* update return register (already marked as written above) */ 9791 ret_type = fn->ret_type; 9792 ret_flag = type_flag(ret_type); 9793 9794 switch (base_type(ret_type)) { 9795 case RET_INTEGER: 9796 /* sets type to SCALAR_VALUE */ 9797 mark_reg_unknown(env, regs, BPF_REG_0); 9798 break; 9799 case RET_VOID: 9800 regs[BPF_REG_0].type = NOT_INIT; 9801 break; 9802 case RET_PTR_TO_MAP_VALUE: 9803 /* There is no offset yet applied, variable or fixed */ 9804 mark_reg_known_zero(env, regs, BPF_REG_0); 9805 /* remember map_ptr, so that check_map_access() 9806 * can check 'value_size' boundary of memory access 9807 * to map element returned from bpf_map_lookup_elem() 9808 */ 9809 if (meta.map_ptr == NULL) { 9810 verbose(env, 9811 "kernel subsystem misconfigured verifier\n"); 9812 return -EINVAL; 9813 } 9814 regs[BPF_REG_0].map_ptr = meta.map_ptr; 9815 regs[BPF_REG_0].map_uid = meta.map_uid; 9816 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag; 9817 if (!type_may_be_null(ret_type) && 9818 btf_record_has_field(meta.map_ptr->record, BPF_SPIN_LOCK)) { 9819 regs[BPF_REG_0].id = ++env->id_gen; 9820 } 9821 break; 9822 case RET_PTR_TO_SOCKET: 9823 mark_reg_known_zero(env, regs, BPF_REG_0); 9824 regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag; 9825 break; 9826 case RET_PTR_TO_SOCK_COMMON: 9827 mark_reg_known_zero(env, regs, BPF_REG_0); 9828 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag; 9829 break; 9830 case RET_PTR_TO_TCP_SOCK: 9831 mark_reg_known_zero(env, regs, BPF_REG_0); 9832 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag; 9833 break; 9834 case RET_PTR_TO_MEM: 9835 mark_reg_known_zero(env, regs, BPF_REG_0); 9836 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; 9837 regs[BPF_REG_0].mem_size = meta.mem_size; 9838 break; 9839 case RET_PTR_TO_MEM_OR_BTF_ID: 9840 { 9841 const struct btf_type *t; 9842 9843 mark_reg_known_zero(env, regs, BPF_REG_0); 9844 t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL); 9845 if (!btf_type_is_struct(t)) { 9846 u32 tsize; 9847 const struct btf_type *ret; 9848 const char *tname; 9849 9850 /* resolve the type size of ksym. */ 9851 ret = btf_resolve_size(meta.ret_btf, t, &tsize); 9852 if (IS_ERR(ret)) { 9853 tname = btf_name_by_offset(meta.ret_btf, t->name_off); 9854 verbose(env, "unable to resolve the size of type '%s': %ld\n", 9855 tname, PTR_ERR(ret)); 9856 return -EINVAL; 9857 } 9858 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; 9859 regs[BPF_REG_0].mem_size = tsize; 9860 } else { 9861 /* MEM_RDONLY may be carried from ret_flag, but it 9862 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise 9863 * it will confuse the check of PTR_TO_BTF_ID in 9864 * check_mem_access(). 9865 */ 9866 ret_flag &= ~MEM_RDONLY; 9867 9868 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; 9869 regs[BPF_REG_0].btf = meta.ret_btf; 9870 regs[BPF_REG_0].btf_id = meta.ret_btf_id; 9871 } 9872 break; 9873 } 9874 case RET_PTR_TO_BTF_ID: 9875 { 9876 struct btf *ret_btf; 9877 int ret_btf_id; 9878 9879 mark_reg_known_zero(env, regs, BPF_REG_0); 9880 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; 9881 if (func_id == BPF_FUNC_kptr_xchg) { 9882 ret_btf = meta.kptr_field->kptr.btf; 9883 ret_btf_id = meta.kptr_field->kptr.btf_id; 9884 if (!btf_is_kernel(ret_btf)) 9885 regs[BPF_REG_0].type |= MEM_ALLOC; 9886 } else { 9887 if (fn->ret_btf_id == BPF_PTR_POISON) { 9888 verbose(env, "verifier internal error:"); 9889 verbose(env, "func %s has non-overwritten BPF_PTR_POISON return type\n", 9890 func_id_name(func_id)); 9891 return -EINVAL; 9892 } 9893 ret_btf = btf_vmlinux; 9894 ret_btf_id = *fn->ret_btf_id; 9895 } 9896 if (ret_btf_id == 0) { 9897 verbose(env, "invalid return type %u of func %s#%d\n", 9898 base_type(ret_type), func_id_name(func_id), 9899 func_id); 9900 return -EINVAL; 9901 } 9902 regs[BPF_REG_0].btf = ret_btf; 9903 regs[BPF_REG_0].btf_id = ret_btf_id; 9904 break; 9905 } 9906 default: 9907 verbose(env, "unknown return type %u of func %s#%d\n", 9908 base_type(ret_type), func_id_name(func_id), func_id); 9909 return -EINVAL; 9910 } 9911 9912 if (type_may_be_null(regs[BPF_REG_0].type)) 9913 regs[BPF_REG_0].id = ++env->id_gen; 9914 9915 if (helper_multiple_ref_obj_use(func_id, meta.map_ptr)) { 9916 verbose(env, "verifier internal error: func %s#%d sets ref_obj_id more than once\n", 9917 func_id_name(func_id), func_id); 9918 return -EFAULT; 9919 } 9920 9921 if (is_dynptr_ref_function(func_id)) 9922 regs[BPF_REG_0].dynptr_id = meta.dynptr_id; 9923 9924 if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) { 9925 /* For release_reference() */ 9926 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; 9927 } else if (is_acquire_function(func_id, meta.map_ptr)) { 9928 int id = acquire_reference_state(env, insn_idx); 9929 9930 if (id < 0) 9931 return id; 9932 /* For mark_ptr_or_null_reg() */ 9933 regs[BPF_REG_0].id = id; 9934 /* For release_reference() */ 9935 regs[BPF_REG_0].ref_obj_id = id; 9936 } 9937 9938 do_refine_retval_range(regs, fn->ret_type, func_id, &meta); 9939 9940 err = check_map_func_compatibility(env, meta.map_ptr, func_id); 9941 if (err) 9942 return err; 9943 9944 if ((func_id == BPF_FUNC_get_stack || 9945 func_id == BPF_FUNC_get_task_stack) && 9946 !env->prog->has_callchain_buf) { 9947 const char *err_str; 9948 9949 #ifdef CONFIG_PERF_EVENTS 9950 err = get_callchain_buffers(sysctl_perf_event_max_stack); 9951 err_str = "cannot get callchain buffer for func %s#%d\n"; 9952 #else 9953 err = -ENOTSUPP; 9954 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; 9955 #endif 9956 if (err) { 9957 verbose(env, err_str, func_id_name(func_id), func_id); 9958 return err; 9959 } 9960 9961 env->prog->has_callchain_buf = true; 9962 } 9963 9964 if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack) 9965 env->prog->call_get_stack = true; 9966 9967 if (func_id == BPF_FUNC_get_func_ip) { 9968 if (check_get_func_ip(env)) 9969 return -ENOTSUPP; 9970 env->prog->call_get_func_ip = true; 9971 } 9972 9973 if (changes_data) 9974 clear_all_pkt_pointers(env); 9975 return 0; 9976 } 9977 9978 /* mark_btf_func_reg_size() is used when the reg size is determined by 9979 * the BTF func_proto's return value size and argument. 9980 */ 9981 static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno, 9982 size_t reg_size) 9983 { 9984 struct bpf_reg_state *reg = &cur_regs(env)[regno]; 9985 9986 if (regno == BPF_REG_0) { 9987 /* Function return value */ 9988 reg->live |= REG_LIVE_WRITTEN; 9989 reg->subreg_def = reg_size == sizeof(u64) ? 9990 DEF_NOT_SUBREG : env->insn_idx + 1; 9991 } else { 9992 /* Function argument */ 9993 if (reg_size == sizeof(u64)) { 9994 mark_insn_zext(env, reg); 9995 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 9996 } else { 9997 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32); 9998 } 9999 } 10000 } 10001 10002 static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta) 10003 { 10004 return meta->kfunc_flags & KF_ACQUIRE; 10005 } 10006 10007 static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta *meta) 10008 { 10009 return meta->kfunc_flags & KF_RELEASE; 10010 } 10011 10012 static bool is_kfunc_trusted_args(struct bpf_kfunc_call_arg_meta *meta) 10013 { 10014 return (meta->kfunc_flags & KF_TRUSTED_ARGS) || is_kfunc_release(meta); 10015 } 10016 10017 static bool is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta) 10018 { 10019 return meta->kfunc_flags & KF_SLEEPABLE; 10020 } 10021 10022 static bool is_kfunc_destructive(struct bpf_kfunc_call_arg_meta *meta) 10023 { 10024 return meta->kfunc_flags & KF_DESTRUCTIVE; 10025 } 10026 10027 static bool is_kfunc_rcu(struct bpf_kfunc_call_arg_meta *meta) 10028 { 10029 return meta->kfunc_flags & KF_RCU; 10030 } 10031 10032 static bool __kfunc_param_match_suffix(const struct btf *btf, 10033 const struct btf_param *arg, 10034 const char *suffix) 10035 { 10036 int suffix_len = strlen(suffix), len; 10037 const char *param_name; 10038 10039 /* In the future, this can be ported to use BTF tagging */ 10040 param_name = btf_name_by_offset(btf, arg->name_off); 10041 if (str_is_empty(param_name)) 10042 return false; 10043 len = strlen(param_name); 10044 if (len < suffix_len) 10045 return false; 10046 param_name += len - suffix_len; 10047 return !strncmp(param_name, suffix, suffix_len); 10048 } 10049 10050 static bool is_kfunc_arg_mem_size(const struct btf *btf, 10051 const struct btf_param *arg, 10052 const struct bpf_reg_state *reg) 10053 { 10054 const struct btf_type *t; 10055 10056 t = btf_type_skip_modifiers(btf, arg->type, NULL); 10057 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) 10058 return false; 10059 10060 return __kfunc_param_match_suffix(btf, arg, "__sz"); 10061 } 10062 10063 static bool is_kfunc_arg_const_mem_size(const struct btf *btf, 10064 const struct btf_param *arg, 10065 const struct bpf_reg_state *reg) 10066 { 10067 const struct btf_type *t; 10068 10069 t = btf_type_skip_modifiers(btf, arg->type, NULL); 10070 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) 10071 return false; 10072 10073 return __kfunc_param_match_suffix(btf, arg, "__szk"); 10074 } 10075 10076 static bool is_kfunc_arg_optional(const struct btf *btf, const struct btf_param *arg) 10077 { 10078 return __kfunc_param_match_suffix(btf, arg, "__opt"); 10079 } 10080 10081 static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg) 10082 { 10083 return __kfunc_param_match_suffix(btf, arg, "__k"); 10084 } 10085 10086 static bool is_kfunc_arg_ignore(const struct btf *btf, const struct btf_param *arg) 10087 { 10088 return __kfunc_param_match_suffix(btf, arg, "__ign"); 10089 } 10090 10091 static bool is_kfunc_arg_alloc_obj(const struct btf *btf, const struct btf_param *arg) 10092 { 10093 return __kfunc_param_match_suffix(btf, arg, "__alloc"); 10094 } 10095 10096 static bool is_kfunc_arg_uninit(const struct btf *btf, const struct btf_param *arg) 10097 { 10098 return __kfunc_param_match_suffix(btf, arg, "__uninit"); 10099 } 10100 10101 static bool is_kfunc_arg_refcounted_kptr(const struct btf *btf, const struct btf_param *arg) 10102 { 10103 return __kfunc_param_match_suffix(btf, arg, "__refcounted_kptr"); 10104 } 10105 10106 static bool is_kfunc_arg_scalar_with_name(const struct btf *btf, 10107 const struct btf_param *arg, 10108 const char *name) 10109 { 10110 int len, target_len = strlen(name); 10111 const char *param_name; 10112 10113 param_name = btf_name_by_offset(btf, arg->name_off); 10114 if (str_is_empty(param_name)) 10115 return false; 10116 len = strlen(param_name); 10117 if (len != target_len) 10118 return false; 10119 if (strcmp(param_name, name)) 10120 return false; 10121 10122 return true; 10123 } 10124 10125 enum { 10126 KF_ARG_DYNPTR_ID, 10127 KF_ARG_LIST_HEAD_ID, 10128 KF_ARG_LIST_NODE_ID, 10129 KF_ARG_RB_ROOT_ID, 10130 KF_ARG_RB_NODE_ID, 10131 }; 10132 10133 BTF_ID_LIST(kf_arg_btf_ids) 10134 BTF_ID(struct, bpf_dynptr_kern) 10135 BTF_ID(struct, bpf_list_head) 10136 BTF_ID(struct, bpf_list_node) 10137 BTF_ID(struct, bpf_rb_root) 10138 BTF_ID(struct, bpf_rb_node) 10139 10140 static bool __is_kfunc_ptr_arg_type(const struct btf *btf, 10141 const struct btf_param *arg, int type) 10142 { 10143 const struct btf_type *t; 10144 u32 res_id; 10145 10146 t = btf_type_skip_modifiers(btf, arg->type, NULL); 10147 if (!t) 10148 return false; 10149 if (!btf_type_is_ptr(t)) 10150 return false; 10151 t = btf_type_skip_modifiers(btf, t->type, &res_id); 10152 if (!t) 10153 return false; 10154 return btf_types_are_same(btf, res_id, btf_vmlinux, kf_arg_btf_ids[type]); 10155 } 10156 10157 static bool is_kfunc_arg_dynptr(const struct btf *btf, const struct btf_param *arg) 10158 { 10159 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_DYNPTR_ID); 10160 } 10161 10162 static bool is_kfunc_arg_list_head(const struct btf *btf, const struct btf_param *arg) 10163 { 10164 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_HEAD_ID); 10165 } 10166 10167 static bool is_kfunc_arg_list_node(const struct btf *btf, const struct btf_param *arg) 10168 { 10169 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_NODE_ID); 10170 } 10171 10172 static bool is_kfunc_arg_rbtree_root(const struct btf *btf, const struct btf_param *arg) 10173 { 10174 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_ROOT_ID); 10175 } 10176 10177 static bool is_kfunc_arg_rbtree_node(const struct btf *btf, const struct btf_param *arg) 10178 { 10179 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_NODE_ID); 10180 } 10181 10182 static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf, 10183 const struct btf_param *arg) 10184 { 10185 const struct btf_type *t; 10186 10187 t = btf_type_resolve_func_ptr(btf, arg->type, NULL); 10188 if (!t) 10189 return false; 10190 10191 return true; 10192 } 10193 10194 /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */ 10195 static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env, 10196 const struct btf *btf, 10197 const struct btf_type *t, int rec) 10198 { 10199 const struct btf_type *member_type; 10200 const struct btf_member *member; 10201 u32 i; 10202 10203 if (!btf_type_is_struct(t)) 10204 return false; 10205 10206 for_each_member(i, t, member) { 10207 const struct btf_array *array; 10208 10209 member_type = btf_type_skip_modifiers(btf, member->type, NULL); 10210 if (btf_type_is_struct(member_type)) { 10211 if (rec >= 3) { 10212 verbose(env, "max struct nesting depth exceeded\n"); 10213 return false; 10214 } 10215 if (!__btf_type_is_scalar_struct(env, btf, member_type, rec + 1)) 10216 return false; 10217 continue; 10218 } 10219 if (btf_type_is_array(member_type)) { 10220 array = btf_array(member_type); 10221 if (!array->nelems) 10222 return false; 10223 member_type = btf_type_skip_modifiers(btf, array->type, NULL); 10224 if (!btf_type_is_scalar(member_type)) 10225 return false; 10226 continue; 10227 } 10228 if (!btf_type_is_scalar(member_type)) 10229 return false; 10230 } 10231 return true; 10232 } 10233 10234 enum kfunc_ptr_arg_type { 10235 KF_ARG_PTR_TO_CTX, 10236 KF_ARG_PTR_TO_ALLOC_BTF_ID, /* Allocated object */ 10237 KF_ARG_PTR_TO_REFCOUNTED_KPTR, /* Refcounted local kptr */ 10238 KF_ARG_PTR_TO_DYNPTR, 10239 KF_ARG_PTR_TO_ITER, 10240 KF_ARG_PTR_TO_LIST_HEAD, 10241 KF_ARG_PTR_TO_LIST_NODE, 10242 KF_ARG_PTR_TO_BTF_ID, /* Also covers reg2btf_ids conversions */ 10243 KF_ARG_PTR_TO_MEM, 10244 KF_ARG_PTR_TO_MEM_SIZE, /* Size derived from next argument, skip it */ 10245 KF_ARG_PTR_TO_CALLBACK, 10246 KF_ARG_PTR_TO_RB_ROOT, 10247 KF_ARG_PTR_TO_RB_NODE, 10248 }; 10249 10250 enum special_kfunc_type { 10251 KF_bpf_obj_new_impl, 10252 KF_bpf_obj_drop_impl, 10253 KF_bpf_refcount_acquire_impl, 10254 KF_bpf_list_push_front_impl, 10255 KF_bpf_list_push_back_impl, 10256 KF_bpf_list_pop_front, 10257 KF_bpf_list_pop_back, 10258 KF_bpf_cast_to_kern_ctx, 10259 KF_bpf_rdonly_cast, 10260 KF_bpf_rcu_read_lock, 10261 KF_bpf_rcu_read_unlock, 10262 KF_bpf_rbtree_remove, 10263 KF_bpf_rbtree_add_impl, 10264 KF_bpf_rbtree_first, 10265 KF_bpf_dynptr_from_skb, 10266 KF_bpf_dynptr_from_xdp, 10267 KF_bpf_dynptr_slice, 10268 KF_bpf_dynptr_slice_rdwr, 10269 KF_bpf_dynptr_clone, 10270 }; 10271 10272 BTF_SET_START(special_kfunc_set) 10273 BTF_ID(func, bpf_obj_new_impl) 10274 BTF_ID(func, bpf_obj_drop_impl) 10275 BTF_ID(func, bpf_refcount_acquire_impl) 10276 BTF_ID(func, bpf_list_push_front_impl) 10277 BTF_ID(func, bpf_list_push_back_impl) 10278 BTF_ID(func, bpf_list_pop_front) 10279 BTF_ID(func, bpf_list_pop_back) 10280 BTF_ID(func, bpf_cast_to_kern_ctx) 10281 BTF_ID(func, bpf_rdonly_cast) 10282 BTF_ID(func, bpf_rbtree_remove) 10283 BTF_ID(func, bpf_rbtree_add_impl) 10284 BTF_ID(func, bpf_rbtree_first) 10285 BTF_ID(func, bpf_dynptr_from_skb) 10286 BTF_ID(func, bpf_dynptr_from_xdp) 10287 BTF_ID(func, bpf_dynptr_slice) 10288 BTF_ID(func, bpf_dynptr_slice_rdwr) 10289 BTF_ID(func, bpf_dynptr_clone) 10290 BTF_SET_END(special_kfunc_set) 10291 10292 BTF_ID_LIST(special_kfunc_list) 10293 BTF_ID(func, bpf_obj_new_impl) 10294 BTF_ID(func, bpf_obj_drop_impl) 10295 BTF_ID(func, bpf_refcount_acquire_impl) 10296 BTF_ID(func, bpf_list_push_front_impl) 10297 BTF_ID(func, bpf_list_push_back_impl) 10298 BTF_ID(func, bpf_list_pop_front) 10299 BTF_ID(func, bpf_list_pop_back) 10300 BTF_ID(func, bpf_cast_to_kern_ctx) 10301 BTF_ID(func, bpf_rdonly_cast) 10302 BTF_ID(func, bpf_rcu_read_lock) 10303 BTF_ID(func, bpf_rcu_read_unlock) 10304 BTF_ID(func, bpf_rbtree_remove) 10305 BTF_ID(func, bpf_rbtree_add_impl) 10306 BTF_ID(func, bpf_rbtree_first) 10307 BTF_ID(func, bpf_dynptr_from_skb) 10308 BTF_ID(func, bpf_dynptr_from_xdp) 10309 BTF_ID(func, bpf_dynptr_slice) 10310 BTF_ID(func, bpf_dynptr_slice_rdwr) 10311 BTF_ID(func, bpf_dynptr_clone) 10312 10313 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta) 10314 { 10315 if (meta->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && 10316 meta->arg_owning_ref) { 10317 return false; 10318 } 10319 10320 return meta->kfunc_flags & KF_RET_NULL; 10321 } 10322 10323 static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta) 10324 { 10325 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_lock]; 10326 } 10327 10328 static bool is_kfunc_bpf_rcu_read_unlock(struct bpf_kfunc_call_arg_meta *meta) 10329 { 10330 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_unlock]; 10331 } 10332 10333 static enum kfunc_ptr_arg_type 10334 get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, 10335 struct bpf_kfunc_call_arg_meta *meta, 10336 const struct btf_type *t, const struct btf_type *ref_t, 10337 const char *ref_tname, const struct btf_param *args, 10338 int argno, int nargs) 10339 { 10340 u32 regno = argno + 1; 10341 struct bpf_reg_state *regs = cur_regs(env); 10342 struct bpf_reg_state *reg = ®s[regno]; 10343 bool arg_mem_size = false; 10344 10345 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) 10346 return KF_ARG_PTR_TO_CTX; 10347 10348 /* In this function, we verify the kfunc's BTF as per the argument type, 10349 * leaving the rest of the verification with respect to the register 10350 * type to our caller. When a set of conditions hold in the BTF type of 10351 * arguments, we resolve it to a known kfunc_ptr_arg_type. 10352 */ 10353 if (btf_get_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno)) 10354 return KF_ARG_PTR_TO_CTX; 10355 10356 if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno])) 10357 return KF_ARG_PTR_TO_ALLOC_BTF_ID; 10358 10359 if (is_kfunc_arg_refcounted_kptr(meta->btf, &args[argno])) 10360 return KF_ARG_PTR_TO_REFCOUNTED_KPTR; 10361 10362 if (is_kfunc_arg_dynptr(meta->btf, &args[argno])) 10363 return KF_ARG_PTR_TO_DYNPTR; 10364 10365 if (is_kfunc_arg_iter(meta, argno)) 10366 return KF_ARG_PTR_TO_ITER; 10367 10368 if (is_kfunc_arg_list_head(meta->btf, &args[argno])) 10369 return KF_ARG_PTR_TO_LIST_HEAD; 10370 10371 if (is_kfunc_arg_list_node(meta->btf, &args[argno])) 10372 return KF_ARG_PTR_TO_LIST_NODE; 10373 10374 if (is_kfunc_arg_rbtree_root(meta->btf, &args[argno])) 10375 return KF_ARG_PTR_TO_RB_ROOT; 10376 10377 if (is_kfunc_arg_rbtree_node(meta->btf, &args[argno])) 10378 return KF_ARG_PTR_TO_RB_NODE; 10379 10380 if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) { 10381 if (!btf_type_is_struct(ref_t)) { 10382 verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n", 10383 meta->func_name, argno, btf_type_str(ref_t), ref_tname); 10384 return -EINVAL; 10385 } 10386 return KF_ARG_PTR_TO_BTF_ID; 10387 } 10388 10389 if (is_kfunc_arg_callback(env, meta->btf, &args[argno])) 10390 return KF_ARG_PTR_TO_CALLBACK; 10391 10392 10393 if (argno + 1 < nargs && 10394 (is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1]) || 10395 is_kfunc_arg_const_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1]))) 10396 arg_mem_size = true; 10397 10398 /* This is the catch all argument type of register types supported by 10399 * check_helper_mem_access. However, we only allow when argument type is 10400 * pointer to scalar, or struct composed (recursively) of scalars. When 10401 * arg_mem_size is true, the pointer can be void *. 10402 */ 10403 if (!btf_type_is_scalar(ref_t) && !__btf_type_is_scalar_struct(env, meta->btf, ref_t, 0) && 10404 (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) { 10405 verbose(env, "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n", 10406 argno, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : ""); 10407 return -EINVAL; 10408 } 10409 return arg_mem_size ? KF_ARG_PTR_TO_MEM_SIZE : KF_ARG_PTR_TO_MEM; 10410 } 10411 10412 static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env, 10413 struct bpf_reg_state *reg, 10414 const struct btf_type *ref_t, 10415 const char *ref_tname, u32 ref_id, 10416 struct bpf_kfunc_call_arg_meta *meta, 10417 int argno) 10418 { 10419 const struct btf_type *reg_ref_t; 10420 bool strict_type_match = false; 10421 const struct btf *reg_btf; 10422 const char *reg_ref_tname; 10423 u32 reg_ref_id; 10424 10425 if (base_type(reg->type) == PTR_TO_BTF_ID) { 10426 reg_btf = reg->btf; 10427 reg_ref_id = reg->btf_id; 10428 } else { 10429 reg_btf = btf_vmlinux; 10430 reg_ref_id = *reg2btf_ids[base_type(reg->type)]; 10431 } 10432 10433 /* Enforce strict type matching for calls to kfuncs that are acquiring 10434 * or releasing a reference, or are no-cast aliases. We do _not_ 10435 * enforce strict matching for plain KF_TRUSTED_ARGS kfuncs by default, 10436 * as we want to enable BPF programs to pass types that are bitwise 10437 * equivalent without forcing them to explicitly cast with something 10438 * like bpf_cast_to_kern_ctx(). 10439 * 10440 * For example, say we had a type like the following: 10441 * 10442 * struct bpf_cpumask { 10443 * cpumask_t cpumask; 10444 * refcount_t usage; 10445 * }; 10446 * 10447 * Note that as specified in <linux/cpumask.h>, cpumask_t is typedef'ed 10448 * to a struct cpumask, so it would be safe to pass a struct 10449 * bpf_cpumask * to a kfunc expecting a struct cpumask *. 10450 * 10451 * The philosophy here is similar to how we allow scalars of different 10452 * types to be passed to kfuncs as long as the size is the same. The 10453 * only difference here is that we're simply allowing 10454 * btf_struct_ids_match() to walk the struct at the 0th offset, and 10455 * resolve types. 10456 */ 10457 if (is_kfunc_acquire(meta) || 10458 (is_kfunc_release(meta) && reg->ref_obj_id) || 10459 btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id)) 10460 strict_type_match = true; 10461 10462 WARN_ON_ONCE(is_kfunc_trusted_args(meta) && reg->off); 10463 10464 reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, ®_ref_id); 10465 reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off); 10466 if (!btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match)) { 10467 verbose(env, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n", 10468 meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1, 10469 btf_type_str(reg_ref_t), reg_ref_tname); 10470 return -EINVAL; 10471 } 10472 return 0; 10473 } 10474 10475 static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 10476 { 10477 struct bpf_verifier_state *state = env->cur_state; 10478 struct btf_record *rec = reg_btf_record(reg); 10479 10480 if (!state->active_lock.ptr) { 10481 verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n"); 10482 return -EFAULT; 10483 } 10484 10485 if (type_flag(reg->type) & NON_OWN_REF) { 10486 verbose(env, "verifier internal error: NON_OWN_REF already set\n"); 10487 return -EFAULT; 10488 } 10489 10490 reg->type |= NON_OWN_REF; 10491 if (rec->refcount_off >= 0) 10492 reg->type |= MEM_RCU; 10493 10494 return 0; 10495 } 10496 10497 static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id) 10498 { 10499 struct bpf_func_state *state, *unused; 10500 struct bpf_reg_state *reg; 10501 int i; 10502 10503 state = cur_func(env); 10504 10505 if (!ref_obj_id) { 10506 verbose(env, "verifier internal error: ref_obj_id is zero for " 10507 "owning -> non-owning conversion\n"); 10508 return -EFAULT; 10509 } 10510 10511 for (i = 0; i < state->acquired_refs; i++) { 10512 if (state->refs[i].id != ref_obj_id) 10513 continue; 10514 10515 /* Clear ref_obj_id here so release_reference doesn't clobber 10516 * the whole reg 10517 */ 10518 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ 10519 if (reg->ref_obj_id == ref_obj_id) { 10520 reg->ref_obj_id = 0; 10521 ref_set_non_owning(env, reg); 10522 } 10523 })); 10524 return 0; 10525 } 10526 10527 verbose(env, "verifier internal error: ref state missing for ref_obj_id\n"); 10528 return -EFAULT; 10529 } 10530 10531 /* Implementation details: 10532 * 10533 * Each register points to some region of memory, which we define as an 10534 * allocation. Each allocation may embed a bpf_spin_lock which protects any 10535 * special BPF objects (bpf_list_head, bpf_rb_root, etc.) part of the same 10536 * allocation. The lock and the data it protects are colocated in the same 10537 * memory region. 10538 * 10539 * Hence, everytime a register holds a pointer value pointing to such 10540 * allocation, the verifier preserves a unique reg->id for it. 10541 * 10542 * The verifier remembers the lock 'ptr' and the lock 'id' whenever 10543 * bpf_spin_lock is called. 10544 * 10545 * To enable this, lock state in the verifier captures two values: 10546 * active_lock.ptr = Register's type specific pointer 10547 * active_lock.id = A unique ID for each register pointer value 10548 * 10549 * Currently, PTR_TO_MAP_VALUE and PTR_TO_BTF_ID | MEM_ALLOC are the two 10550 * supported register types. 10551 * 10552 * The active_lock.ptr in case of map values is the reg->map_ptr, and in case of 10553 * allocated objects is the reg->btf pointer. 10554 * 10555 * The active_lock.id is non-unique for maps supporting direct_value_addr, as we 10556 * can establish the provenance of the map value statically for each distinct 10557 * lookup into such maps. They always contain a single map value hence unique 10558 * IDs for each pseudo load pessimizes the algorithm and rejects valid programs. 10559 * 10560 * So, in case of global variables, they use array maps with max_entries = 1, 10561 * hence their active_lock.ptr becomes map_ptr and id = 0 (since they all point 10562 * into the same map value as max_entries is 1, as described above). 10563 * 10564 * In case of inner map lookups, the inner map pointer has same map_ptr as the 10565 * outer map pointer (in verifier context), but each lookup into an inner map 10566 * assigns a fresh reg->id to the lookup, so while lookups into distinct inner 10567 * maps from the same outer map share the same map_ptr as active_lock.ptr, they 10568 * will get different reg->id assigned to each lookup, hence different 10569 * active_lock.id. 10570 * 10571 * In case of allocated objects, active_lock.ptr is the reg->btf, and the 10572 * reg->id is a unique ID preserved after the NULL pointer check on the pointer 10573 * returned from bpf_obj_new. Each allocation receives a new reg->id. 10574 */ 10575 static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 10576 { 10577 void *ptr; 10578 u32 id; 10579 10580 switch ((int)reg->type) { 10581 case PTR_TO_MAP_VALUE: 10582 ptr = reg->map_ptr; 10583 break; 10584 case PTR_TO_BTF_ID | MEM_ALLOC: 10585 ptr = reg->btf; 10586 break; 10587 default: 10588 verbose(env, "verifier internal error: unknown reg type for lock check\n"); 10589 return -EFAULT; 10590 } 10591 id = reg->id; 10592 10593 if (!env->cur_state->active_lock.ptr) 10594 return -EINVAL; 10595 if (env->cur_state->active_lock.ptr != ptr || 10596 env->cur_state->active_lock.id != id) { 10597 verbose(env, "held lock and object are not in the same allocation\n"); 10598 return -EINVAL; 10599 } 10600 return 0; 10601 } 10602 10603 static bool is_bpf_list_api_kfunc(u32 btf_id) 10604 { 10605 return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] || 10606 btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] || 10607 btf_id == special_kfunc_list[KF_bpf_list_pop_front] || 10608 btf_id == special_kfunc_list[KF_bpf_list_pop_back]; 10609 } 10610 10611 static bool is_bpf_rbtree_api_kfunc(u32 btf_id) 10612 { 10613 return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] || 10614 btf_id == special_kfunc_list[KF_bpf_rbtree_remove] || 10615 btf_id == special_kfunc_list[KF_bpf_rbtree_first]; 10616 } 10617 10618 static bool is_bpf_graph_api_kfunc(u32 btf_id) 10619 { 10620 return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id) || 10621 btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]; 10622 } 10623 10624 static bool is_callback_calling_kfunc(u32 btf_id) 10625 { 10626 return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]; 10627 } 10628 10629 static bool is_rbtree_lock_required_kfunc(u32 btf_id) 10630 { 10631 return is_bpf_rbtree_api_kfunc(btf_id); 10632 } 10633 10634 static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env, 10635 enum btf_field_type head_field_type, 10636 u32 kfunc_btf_id) 10637 { 10638 bool ret; 10639 10640 switch (head_field_type) { 10641 case BPF_LIST_HEAD: 10642 ret = is_bpf_list_api_kfunc(kfunc_btf_id); 10643 break; 10644 case BPF_RB_ROOT: 10645 ret = is_bpf_rbtree_api_kfunc(kfunc_btf_id); 10646 break; 10647 default: 10648 verbose(env, "verifier internal error: unexpected graph root argument type %s\n", 10649 btf_field_type_name(head_field_type)); 10650 return false; 10651 } 10652 10653 if (!ret) 10654 verbose(env, "verifier internal error: %s head arg for unknown kfunc\n", 10655 btf_field_type_name(head_field_type)); 10656 return ret; 10657 } 10658 10659 static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env, 10660 enum btf_field_type node_field_type, 10661 u32 kfunc_btf_id) 10662 { 10663 bool ret; 10664 10665 switch (node_field_type) { 10666 case BPF_LIST_NODE: 10667 ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] || 10668 kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl]); 10669 break; 10670 case BPF_RB_NODE: 10671 ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] || 10672 kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]); 10673 break; 10674 default: 10675 verbose(env, "verifier internal error: unexpected graph node argument type %s\n", 10676 btf_field_type_name(node_field_type)); 10677 return false; 10678 } 10679 10680 if (!ret) 10681 verbose(env, "verifier internal error: %s node arg for unknown kfunc\n", 10682 btf_field_type_name(node_field_type)); 10683 return ret; 10684 } 10685 10686 static int 10687 __process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env, 10688 struct bpf_reg_state *reg, u32 regno, 10689 struct bpf_kfunc_call_arg_meta *meta, 10690 enum btf_field_type head_field_type, 10691 struct btf_field **head_field) 10692 { 10693 const char *head_type_name; 10694 struct btf_field *field; 10695 struct btf_record *rec; 10696 u32 head_off; 10697 10698 if (meta->btf != btf_vmlinux) { 10699 verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n"); 10700 return -EFAULT; 10701 } 10702 10703 if (!check_kfunc_is_graph_root_api(env, head_field_type, meta->func_id)) 10704 return -EFAULT; 10705 10706 head_type_name = btf_field_type_name(head_field_type); 10707 if (!tnum_is_const(reg->var_off)) { 10708 verbose(env, 10709 "R%d doesn't have constant offset. %s has to be at the constant offset\n", 10710 regno, head_type_name); 10711 return -EINVAL; 10712 } 10713 10714 rec = reg_btf_record(reg); 10715 head_off = reg->off + reg->var_off.value; 10716 field = btf_record_find(rec, head_off, head_field_type); 10717 if (!field) { 10718 verbose(env, "%s not found at offset=%u\n", head_type_name, head_off); 10719 return -EINVAL; 10720 } 10721 10722 /* All functions require bpf_list_head to be protected using a bpf_spin_lock */ 10723 if (check_reg_allocation_locked(env, reg)) { 10724 verbose(env, "bpf_spin_lock at off=%d must be held for %s\n", 10725 rec->spin_lock_off, head_type_name); 10726 return -EINVAL; 10727 } 10728 10729 if (*head_field) { 10730 verbose(env, "verifier internal error: repeating %s arg\n", head_type_name); 10731 return -EFAULT; 10732 } 10733 *head_field = field; 10734 return 0; 10735 } 10736 10737 static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env, 10738 struct bpf_reg_state *reg, u32 regno, 10739 struct bpf_kfunc_call_arg_meta *meta) 10740 { 10741 return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_LIST_HEAD, 10742 &meta->arg_list_head.field); 10743 } 10744 10745 static int process_kf_arg_ptr_to_rbtree_root(struct bpf_verifier_env *env, 10746 struct bpf_reg_state *reg, u32 regno, 10747 struct bpf_kfunc_call_arg_meta *meta) 10748 { 10749 return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_RB_ROOT, 10750 &meta->arg_rbtree_root.field); 10751 } 10752 10753 static int 10754 __process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env, 10755 struct bpf_reg_state *reg, u32 regno, 10756 struct bpf_kfunc_call_arg_meta *meta, 10757 enum btf_field_type head_field_type, 10758 enum btf_field_type node_field_type, 10759 struct btf_field **node_field) 10760 { 10761 const char *node_type_name; 10762 const struct btf_type *et, *t; 10763 struct btf_field *field; 10764 u32 node_off; 10765 10766 if (meta->btf != btf_vmlinux) { 10767 verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n"); 10768 return -EFAULT; 10769 } 10770 10771 if (!check_kfunc_is_graph_node_api(env, node_field_type, meta->func_id)) 10772 return -EFAULT; 10773 10774 node_type_name = btf_field_type_name(node_field_type); 10775 if (!tnum_is_const(reg->var_off)) { 10776 verbose(env, 10777 "R%d doesn't have constant offset. %s has to be at the constant offset\n", 10778 regno, node_type_name); 10779 return -EINVAL; 10780 } 10781 10782 node_off = reg->off + reg->var_off.value; 10783 field = reg_find_field_offset(reg, node_off, node_field_type); 10784 if (!field || field->offset != node_off) { 10785 verbose(env, "%s not found at offset=%u\n", node_type_name, node_off); 10786 return -EINVAL; 10787 } 10788 10789 field = *node_field; 10790 10791 et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id); 10792 t = btf_type_by_id(reg->btf, reg->btf_id); 10793 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf, 10794 field->graph_root.value_btf_id, true)) { 10795 verbose(env, "operation on %s expects arg#1 %s at offset=%d " 10796 "in struct %s, but arg is at offset=%d in struct %s\n", 10797 btf_field_type_name(head_field_type), 10798 btf_field_type_name(node_field_type), 10799 field->graph_root.node_offset, 10800 btf_name_by_offset(field->graph_root.btf, et->name_off), 10801 node_off, btf_name_by_offset(reg->btf, t->name_off)); 10802 return -EINVAL; 10803 } 10804 meta->arg_btf = reg->btf; 10805 meta->arg_btf_id = reg->btf_id; 10806 10807 if (node_off != field->graph_root.node_offset) { 10808 verbose(env, "arg#1 offset=%d, but expected %s at offset=%d in struct %s\n", 10809 node_off, btf_field_type_name(node_field_type), 10810 field->graph_root.node_offset, 10811 btf_name_by_offset(field->graph_root.btf, et->name_off)); 10812 return -EINVAL; 10813 } 10814 10815 return 0; 10816 } 10817 10818 static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env, 10819 struct bpf_reg_state *reg, u32 regno, 10820 struct bpf_kfunc_call_arg_meta *meta) 10821 { 10822 return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta, 10823 BPF_LIST_HEAD, BPF_LIST_NODE, 10824 &meta->arg_list_head.field); 10825 } 10826 10827 static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env, 10828 struct bpf_reg_state *reg, u32 regno, 10829 struct bpf_kfunc_call_arg_meta *meta) 10830 { 10831 return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta, 10832 BPF_RB_ROOT, BPF_RB_NODE, 10833 &meta->arg_rbtree_root.field); 10834 } 10835 10836 static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta, 10837 int insn_idx) 10838 { 10839 const char *func_name = meta->func_name, *ref_tname; 10840 const struct btf *btf = meta->btf; 10841 const struct btf_param *args; 10842 struct btf_record *rec; 10843 u32 i, nargs; 10844 int ret; 10845 10846 args = (const struct btf_param *)(meta->func_proto + 1); 10847 nargs = btf_type_vlen(meta->func_proto); 10848 if (nargs > MAX_BPF_FUNC_REG_ARGS) { 10849 verbose(env, "Function %s has %d > %d args\n", func_name, nargs, 10850 MAX_BPF_FUNC_REG_ARGS); 10851 return -EINVAL; 10852 } 10853 10854 /* Check that BTF function arguments match actual types that the 10855 * verifier sees. 10856 */ 10857 for (i = 0; i < nargs; i++) { 10858 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[i + 1]; 10859 const struct btf_type *t, *ref_t, *resolve_ret; 10860 enum bpf_arg_type arg_type = ARG_DONTCARE; 10861 u32 regno = i + 1, ref_id, type_size; 10862 bool is_ret_buf_sz = false; 10863 int kf_arg_type; 10864 10865 t = btf_type_skip_modifiers(btf, args[i].type, NULL); 10866 10867 if (is_kfunc_arg_ignore(btf, &args[i])) 10868 continue; 10869 10870 if (btf_type_is_scalar(t)) { 10871 if (reg->type != SCALAR_VALUE) { 10872 verbose(env, "R%d is not a scalar\n", regno); 10873 return -EINVAL; 10874 } 10875 10876 if (is_kfunc_arg_constant(meta->btf, &args[i])) { 10877 if (meta->arg_constant.found) { 10878 verbose(env, "verifier internal error: only one constant argument permitted\n"); 10879 return -EFAULT; 10880 } 10881 if (!tnum_is_const(reg->var_off)) { 10882 verbose(env, "R%d must be a known constant\n", regno); 10883 return -EINVAL; 10884 } 10885 ret = mark_chain_precision(env, regno); 10886 if (ret < 0) 10887 return ret; 10888 meta->arg_constant.found = true; 10889 meta->arg_constant.value = reg->var_off.value; 10890 } else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdonly_buf_size")) { 10891 meta->r0_rdonly = true; 10892 is_ret_buf_sz = true; 10893 } else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdwr_buf_size")) { 10894 is_ret_buf_sz = true; 10895 } 10896 10897 if (is_ret_buf_sz) { 10898 if (meta->r0_size) { 10899 verbose(env, "2 or more rdonly/rdwr_buf_size parameters for kfunc"); 10900 return -EINVAL; 10901 } 10902 10903 if (!tnum_is_const(reg->var_off)) { 10904 verbose(env, "R%d is not a const\n", regno); 10905 return -EINVAL; 10906 } 10907 10908 meta->r0_size = reg->var_off.value; 10909 ret = mark_chain_precision(env, regno); 10910 if (ret) 10911 return ret; 10912 } 10913 continue; 10914 } 10915 10916 if (!btf_type_is_ptr(t)) { 10917 verbose(env, "Unrecognized arg#%d type %s\n", i, btf_type_str(t)); 10918 return -EINVAL; 10919 } 10920 10921 if ((is_kfunc_trusted_args(meta) || is_kfunc_rcu(meta)) && 10922 (register_is_null(reg) || type_may_be_null(reg->type))) { 10923 verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i); 10924 return -EACCES; 10925 } 10926 10927 if (reg->ref_obj_id) { 10928 if (is_kfunc_release(meta) && meta->ref_obj_id) { 10929 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 10930 regno, reg->ref_obj_id, 10931 meta->ref_obj_id); 10932 return -EFAULT; 10933 } 10934 meta->ref_obj_id = reg->ref_obj_id; 10935 if (is_kfunc_release(meta)) 10936 meta->release_regno = regno; 10937 } 10938 10939 ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id); 10940 ref_tname = btf_name_by_offset(btf, ref_t->name_off); 10941 10942 kf_arg_type = get_kfunc_ptr_arg_type(env, meta, t, ref_t, ref_tname, args, i, nargs); 10943 if (kf_arg_type < 0) 10944 return kf_arg_type; 10945 10946 switch (kf_arg_type) { 10947 case KF_ARG_PTR_TO_ALLOC_BTF_ID: 10948 case KF_ARG_PTR_TO_BTF_ID: 10949 if (!is_kfunc_trusted_args(meta) && !is_kfunc_rcu(meta)) 10950 break; 10951 10952 if (!is_trusted_reg(reg)) { 10953 if (!is_kfunc_rcu(meta)) { 10954 verbose(env, "R%d must be referenced or trusted\n", regno); 10955 return -EINVAL; 10956 } 10957 if (!is_rcu_reg(reg)) { 10958 verbose(env, "R%d must be a rcu pointer\n", regno); 10959 return -EINVAL; 10960 } 10961 } 10962 10963 fallthrough; 10964 case KF_ARG_PTR_TO_CTX: 10965 /* Trusted arguments have the same offset checks as release arguments */ 10966 arg_type |= OBJ_RELEASE; 10967 break; 10968 case KF_ARG_PTR_TO_DYNPTR: 10969 case KF_ARG_PTR_TO_ITER: 10970 case KF_ARG_PTR_TO_LIST_HEAD: 10971 case KF_ARG_PTR_TO_LIST_NODE: 10972 case KF_ARG_PTR_TO_RB_ROOT: 10973 case KF_ARG_PTR_TO_RB_NODE: 10974 case KF_ARG_PTR_TO_MEM: 10975 case KF_ARG_PTR_TO_MEM_SIZE: 10976 case KF_ARG_PTR_TO_CALLBACK: 10977 case KF_ARG_PTR_TO_REFCOUNTED_KPTR: 10978 /* Trusted by default */ 10979 break; 10980 default: 10981 WARN_ON_ONCE(1); 10982 return -EFAULT; 10983 } 10984 10985 if (is_kfunc_release(meta) && reg->ref_obj_id) 10986 arg_type |= OBJ_RELEASE; 10987 ret = check_func_arg_reg_off(env, reg, regno, arg_type); 10988 if (ret < 0) 10989 return ret; 10990 10991 switch (kf_arg_type) { 10992 case KF_ARG_PTR_TO_CTX: 10993 if (reg->type != PTR_TO_CTX) { 10994 verbose(env, "arg#%d expected pointer to ctx, but got %s\n", i, btf_type_str(t)); 10995 return -EINVAL; 10996 } 10997 10998 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) { 10999 ret = get_kern_ctx_btf_id(&env->log, resolve_prog_type(env->prog)); 11000 if (ret < 0) 11001 return -EINVAL; 11002 meta->ret_btf_id = ret; 11003 } 11004 break; 11005 case KF_ARG_PTR_TO_ALLOC_BTF_ID: 11006 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { 11007 verbose(env, "arg#%d expected pointer to allocated object\n", i); 11008 return -EINVAL; 11009 } 11010 if (!reg->ref_obj_id) { 11011 verbose(env, "allocated object must be referenced\n"); 11012 return -EINVAL; 11013 } 11014 if (meta->btf == btf_vmlinux && 11015 meta->func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) { 11016 meta->arg_btf = reg->btf; 11017 meta->arg_btf_id = reg->btf_id; 11018 } 11019 break; 11020 case KF_ARG_PTR_TO_DYNPTR: 11021 { 11022 enum bpf_arg_type dynptr_arg_type = ARG_PTR_TO_DYNPTR; 11023 int clone_ref_obj_id = 0; 11024 11025 if (reg->type != PTR_TO_STACK && 11026 reg->type != CONST_PTR_TO_DYNPTR) { 11027 verbose(env, "arg#%d expected pointer to stack or dynptr_ptr\n", i); 11028 return -EINVAL; 11029 } 11030 11031 if (reg->type == CONST_PTR_TO_DYNPTR) 11032 dynptr_arg_type |= MEM_RDONLY; 11033 11034 if (is_kfunc_arg_uninit(btf, &args[i])) 11035 dynptr_arg_type |= MEM_UNINIT; 11036 11037 if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) { 11038 dynptr_arg_type |= DYNPTR_TYPE_SKB; 11039 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_xdp]) { 11040 dynptr_arg_type |= DYNPTR_TYPE_XDP; 11041 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_clone] && 11042 (dynptr_arg_type & MEM_UNINIT)) { 11043 enum bpf_dynptr_type parent_type = meta->initialized_dynptr.type; 11044 11045 if (parent_type == BPF_DYNPTR_TYPE_INVALID) { 11046 verbose(env, "verifier internal error: no dynptr type for parent of clone\n"); 11047 return -EFAULT; 11048 } 11049 11050 dynptr_arg_type |= (unsigned int)get_dynptr_type_flag(parent_type); 11051 clone_ref_obj_id = meta->initialized_dynptr.ref_obj_id; 11052 if (dynptr_type_refcounted(parent_type) && !clone_ref_obj_id) { 11053 verbose(env, "verifier internal error: missing ref obj id for parent of clone\n"); 11054 return -EFAULT; 11055 } 11056 } 11057 11058 ret = process_dynptr_func(env, regno, insn_idx, dynptr_arg_type, clone_ref_obj_id); 11059 if (ret < 0) 11060 return ret; 11061 11062 if (!(dynptr_arg_type & MEM_UNINIT)) { 11063 int id = dynptr_id(env, reg); 11064 11065 if (id < 0) { 11066 verbose(env, "verifier internal error: failed to obtain dynptr id\n"); 11067 return id; 11068 } 11069 meta->initialized_dynptr.id = id; 11070 meta->initialized_dynptr.type = dynptr_get_type(env, reg); 11071 meta->initialized_dynptr.ref_obj_id = dynptr_ref_obj_id(env, reg); 11072 } 11073 11074 break; 11075 } 11076 case KF_ARG_PTR_TO_ITER: 11077 ret = process_iter_arg(env, regno, insn_idx, meta); 11078 if (ret < 0) 11079 return ret; 11080 break; 11081 case KF_ARG_PTR_TO_LIST_HEAD: 11082 if (reg->type != PTR_TO_MAP_VALUE && 11083 reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { 11084 verbose(env, "arg#%d expected pointer to map value or allocated object\n", i); 11085 return -EINVAL; 11086 } 11087 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) { 11088 verbose(env, "allocated object must be referenced\n"); 11089 return -EINVAL; 11090 } 11091 ret = process_kf_arg_ptr_to_list_head(env, reg, regno, meta); 11092 if (ret < 0) 11093 return ret; 11094 break; 11095 case KF_ARG_PTR_TO_RB_ROOT: 11096 if (reg->type != PTR_TO_MAP_VALUE && 11097 reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { 11098 verbose(env, "arg#%d expected pointer to map value or allocated object\n", i); 11099 return -EINVAL; 11100 } 11101 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) { 11102 verbose(env, "allocated object must be referenced\n"); 11103 return -EINVAL; 11104 } 11105 ret = process_kf_arg_ptr_to_rbtree_root(env, reg, regno, meta); 11106 if (ret < 0) 11107 return ret; 11108 break; 11109 case KF_ARG_PTR_TO_LIST_NODE: 11110 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { 11111 verbose(env, "arg#%d expected pointer to allocated object\n", i); 11112 return -EINVAL; 11113 } 11114 if (!reg->ref_obj_id) { 11115 verbose(env, "allocated object must be referenced\n"); 11116 return -EINVAL; 11117 } 11118 ret = process_kf_arg_ptr_to_list_node(env, reg, regno, meta); 11119 if (ret < 0) 11120 return ret; 11121 break; 11122 case KF_ARG_PTR_TO_RB_NODE: 11123 if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_remove]) { 11124 if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) { 11125 verbose(env, "rbtree_remove node input must be non-owning ref\n"); 11126 return -EINVAL; 11127 } 11128 if (in_rbtree_lock_required_cb(env)) { 11129 verbose(env, "rbtree_remove not allowed in rbtree cb\n"); 11130 return -EINVAL; 11131 } 11132 } else { 11133 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { 11134 verbose(env, "arg#%d expected pointer to allocated object\n", i); 11135 return -EINVAL; 11136 } 11137 if (!reg->ref_obj_id) { 11138 verbose(env, "allocated object must be referenced\n"); 11139 return -EINVAL; 11140 } 11141 } 11142 11143 ret = process_kf_arg_ptr_to_rbtree_node(env, reg, regno, meta); 11144 if (ret < 0) 11145 return ret; 11146 break; 11147 case KF_ARG_PTR_TO_BTF_ID: 11148 /* Only base_type is checked, further checks are done here */ 11149 if ((base_type(reg->type) != PTR_TO_BTF_ID || 11150 (bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) && 11151 !reg2btf_ids[base_type(reg->type)]) { 11152 verbose(env, "arg#%d is %s ", i, reg_type_str(env, reg->type)); 11153 verbose(env, "expected %s or socket\n", 11154 reg_type_str(env, base_type(reg->type) | 11155 (type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS))); 11156 return -EINVAL; 11157 } 11158 ret = process_kf_arg_ptr_to_btf_id(env, reg, ref_t, ref_tname, ref_id, meta, i); 11159 if (ret < 0) 11160 return ret; 11161 break; 11162 case KF_ARG_PTR_TO_MEM: 11163 resolve_ret = btf_resolve_size(btf, ref_t, &type_size); 11164 if (IS_ERR(resolve_ret)) { 11165 verbose(env, "arg#%d reference type('%s %s') size cannot be determined: %ld\n", 11166 i, btf_type_str(ref_t), ref_tname, PTR_ERR(resolve_ret)); 11167 return -EINVAL; 11168 } 11169 ret = check_mem_reg(env, reg, regno, type_size); 11170 if (ret < 0) 11171 return ret; 11172 break; 11173 case KF_ARG_PTR_TO_MEM_SIZE: 11174 { 11175 struct bpf_reg_state *buff_reg = ®s[regno]; 11176 const struct btf_param *buff_arg = &args[i]; 11177 struct bpf_reg_state *size_reg = ®s[regno + 1]; 11178 const struct btf_param *size_arg = &args[i + 1]; 11179 11180 if (!register_is_null(buff_reg) || !is_kfunc_arg_optional(meta->btf, buff_arg)) { 11181 ret = check_kfunc_mem_size_reg(env, size_reg, regno + 1); 11182 if (ret < 0) { 11183 verbose(env, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", i, i + 1); 11184 return ret; 11185 } 11186 } 11187 11188 if (is_kfunc_arg_const_mem_size(meta->btf, size_arg, size_reg)) { 11189 if (meta->arg_constant.found) { 11190 verbose(env, "verifier internal error: only one constant argument permitted\n"); 11191 return -EFAULT; 11192 } 11193 if (!tnum_is_const(size_reg->var_off)) { 11194 verbose(env, "R%d must be a known constant\n", regno + 1); 11195 return -EINVAL; 11196 } 11197 meta->arg_constant.found = true; 11198 meta->arg_constant.value = size_reg->var_off.value; 11199 } 11200 11201 /* Skip next '__sz' or '__szk' argument */ 11202 i++; 11203 break; 11204 } 11205 case KF_ARG_PTR_TO_CALLBACK: 11206 if (reg->type != PTR_TO_FUNC) { 11207 verbose(env, "arg%d expected pointer to func\n", i); 11208 return -EINVAL; 11209 } 11210 meta->subprogno = reg->subprogno; 11211 break; 11212 case KF_ARG_PTR_TO_REFCOUNTED_KPTR: 11213 if (!type_is_ptr_alloc_obj(reg->type)) { 11214 verbose(env, "arg#%d is neither owning or non-owning ref\n", i); 11215 return -EINVAL; 11216 } 11217 if (!type_is_non_owning_ref(reg->type)) 11218 meta->arg_owning_ref = true; 11219 11220 rec = reg_btf_record(reg); 11221 if (!rec) { 11222 verbose(env, "verifier internal error: Couldn't find btf_record\n"); 11223 return -EFAULT; 11224 } 11225 11226 if (rec->refcount_off < 0) { 11227 verbose(env, "arg#%d doesn't point to a type with bpf_refcount field\n", i); 11228 return -EINVAL; 11229 } 11230 11231 meta->arg_btf = reg->btf; 11232 meta->arg_btf_id = reg->btf_id; 11233 break; 11234 } 11235 } 11236 11237 if (is_kfunc_release(meta) && !meta->release_regno) { 11238 verbose(env, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n", 11239 func_name); 11240 return -EINVAL; 11241 } 11242 11243 return 0; 11244 } 11245 11246 static int fetch_kfunc_meta(struct bpf_verifier_env *env, 11247 struct bpf_insn *insn, 11248 struct bpf_kfunc_call_arg_meta *meta, 11249 const char **kfunc_name) 11250 { 11251 const struct btf_type *func, *func_proto; 11252 u32 func_id, *kfunc_flags; 11253 const char *func_name; 11254 struct btf *desc_btf; 11255 11256 if (kfunc_name) 11257 *kfunc_name = NULL; 11258 11259 if (!insn->imm) 11260 return -EINVAL; 11261 11262 desc_btf = find_kfunc_desc_btf(env, insn->off); 11263 if (IS_ERR(desc_btf)) 11264 return PTR_ERR(desc_btf); 11265 11266 func_id = insn->imm; 11267 func = btf_type_by_id(desc_btf, func_id); 11268 func_name = btf_name_by_offset(desc_btf, func->name_off); 11269 if (kfunc_name) 11270 *kfunc_name = func_name; 11271 func_proto = btf_type_by_id(desc_btf, func->type); 11272 11273 kfunc_flags = btf_kfunc_id_set_contains(desc_btf, func_id, env->prog); 11274 if (!kfunc_flags) { 11275 return -EACCES; 11276 } 11277 11278 memset(meta, 0, sizeof(*meta)); 11279 meta->btf = desc_btf; 11280 meta->func_id = func_id; 11281 meta->kfunc_flags = *kfunc_flags; 11282 meta->func_proto = func_proto; 11283 meta->func_name = func_name; 11284 11285 return 0; 11286 } 11287 11288 static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 11289 int *insn_idx_p) 11290 { 11291 const struct btf_type *t, *ptr_type; 11292 u32 i, nargs, ptr_type_id, release_ref_obj_id; 11293 struct bpf_reg_state *regs = cur_regs(env); 11294 const char *func_name, *ptr_type_name; 11295 bool sleepable, rcu_lock, rcu_unlock; 11296 struct bpf_kfunc_call_arg_meta meta; 11297 struct bpf_insn_aux_data *insn_aux; 11298 int err, insn_idx = *insn_idx_p; 11299 const struct btf_param *args; 11300 const struct btf_type *ret_t; 11301 struct btf *desc_btf; 11302 11303 /* skip for now, but return error when we find this in fixup_kfunc_call */ 11304 if (!insn->imm) 11305 return 0; 11306 11307 err = fetch_kfunc_meta(env, insn, &meta, &func_name); 11308 if (err == -EACCES && func_name) 11309 verbose(env, "calling kernel function %s is not allowed\n", func_name); 11310 if (err) 11311 return err; 11312 desc_btf = meta.btf; 11313 insn_aux = &env->insn_aux_data[insn_idx]; 11314 11315 insn_aux->is_iter_next = is_iter_next_kfunc(&meta); 11316 11317 if (is_kfunc_destructive(&meta) && !capable(CAP_SYS_BOOT)) { 11318 verbose(env, "destructive kfunc calls require CAP_SYS_BOOT capability\n"); 11319 return -EACCES; 11320 } 11321 11322 sleepable = is_kfunc_sleepable(&meta); 11323 if (sleepable && !env->prog->aux->sleepable) { 11324 verbose(env, "program must be sleepable to call sleepable kfunc %s\n", func_name); 11325 return -EACCES; 11326 } 11327 11328 rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta); 11329 rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta); 11330 11331 if (env->cur_state->active_rcu_lock) { 11332 struct bpf_func_state *state; 11333 struct bpf_reg_state *reg; 11334 11335 if (in_rbtree_lock_required_cb(env) && (rcu_lock || rcu_unlock)) { 11336 verbose(env, "Calling bpf_rcu_read_{lock,unlock} in unnecessary rbtree callback\n"); 11337 return -EACCES; 11338 } 11339 11340 if (rcu_lock) { 11341 verbose(env, "nested rcu read lock (kernel function %s)\n", func_name); 11342 return -EINVAL; 11343 } else if (rcu_unlock) { 11344 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ 11345 if (reg->type & MEM_RCU) { 11346 reg->type &= ~(MEM_RCU | PTR_MAYBE_NULL); 11347 reg->type |= PTR_UNTRUSTED; 11348 } 11349 })); 11350 env->cur_state->active_rcu_lock = false; 11351 } else if (sleepable) { 11352 verbose(env, "kernel func %s is sleepable within rcu_read_lock region\n", func_name); 11353 return -EACCES; 11354 } 11355 } else if (rcu_lock) { 11356 env->cur_state->active_rcu_lock = true; 11357 } else if (rcu_unlock) { 11358 verbose(env, "unmatched rcu read unlock (kernel function %s)\n", func_name); 11359 return -EINVAL; 11360 } 11361 11362 /* Check the arguments */ 11363 err = check_kfunc_args(env, &meta, insn_idx); 11364 if (err < 0) 11365 return err; 11366 /* In case of release function, we get register number of refcounted 11367 * PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now. 11368 */ 11369 if (meta.release_regno) { 11370 err = release_reference(env, regs[meta.release_regno].ref_obj_id); 11371 if (err) { 11372 verbose(env, "kfunc %s#%d reference has not been acquired before\n", 11373 func_name, meta.func_id); 11374 return err; 11375 } 11376 } 11377 11378 if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || 11379 meta.func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || 11380 meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { 11381 release_ref_obj_id = regs[BPF_REG_2].ref_obj_id; 11382 insn_aux->insert_off = regs[BPF_REG_2].off; 11383 insn_aux->kptr_struct_meta = btf_find_struct_meta(meta.arg_btf, meta.arg_btf_id); 11384 err = ref_convert_owning_non_owning(env, release_ref_obj_id); 11385 if (err) { 11386 verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n", 11387 func_name, meta.func_id); 11388 return err; 11389 } 11390 11391 err = release_reference(env, release_ref_obj_id); 11392 if (err) { 11393 verbose(env, "kfunc %s#%d reference has not been acquired before\n", 11394 func_name, meta.func_id); 11395 return err; 11396 } 11397 } 11398 11399 if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { 11400 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 11401 set_rbtree_add_callback_state); 11402 if (err) { 11403 verbose(env, "kfunc %s#%d failed callback verification\n", 11404 func_name, meta.func_id); 11405 return err; 11406 } 11407 } 11408 11409 for (i = 0; i < CALLER_SAVED_REGS; i++) 11410 mark_reg_not_init(env, regs, caller_saved[i]); 11411 11412 /* Check return type */ 11413 t = btf_type_skip_modifiers(desc_btf, meta.func_proto->type, NULL); 11414 11415 if (is_kfunc_acquire(&meta) && !btf_type_is_struct_ptr(meta.btf, t)) { 11416 /* Only exception is bpf_obj_new_impl */ 11417 if (meta.btf != btf_vmlinux || 11418 (meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl] && 11419 meta.func_id != special_kfunc_list[KF_bpf_refcount_acquire_impl])) { 11420 verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n"); 11421 return -EINVAL; 11422 } 11423 } 11424 11425 if (btf_type_is_scalar(t)) { 11426 mark_reg_unknown(env, regs, BPF_REG_0); 11427 mark_btf_func_reg_size(env, BPF_REG_0, t->size); 11428 } else if (btf_type_is_ptr(t)) { 11429 ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id); 11430 11431 if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) { 11432 if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl]) { 11433 struct btf *ret_btf; 11434 u32 ret_btf_id; 11435 11436 if (unlikely(!bpf_global_ma_set)) 11437 return -ENOMEM; 11438 11439 if (((u64)(u32)meta.arg_constant.value) != meta.arg_constant.value) { 11440 verbose(env, "local type ID argument must be in range [0, U32_MAX]\n"); 11441 return -EINVAL; 11442 } 11443 11444 ret_btf = env->prog->aux->btf; 11445 ret_btf_id = meta.arg_constant.value; 11446 11447 /* This may be NULL due to user not supplying a BTF */ 11448 if (!ret_btf) { 11449 verbose(env, "bpf_obj_new requires prog BTF\n"); 11450 return -EINVAL; 11451 } 11452 11453 ret_t = btf_type_by_id(ret_btf, ret_btf_id); 11454 if (!ret_t || !__btf_type_is_struct(ret_t)) { 11455 verbose(env, "bpf_obj_new type ID argument must be of a struct\n"); 11456 return -EINVAL; 11457 } 11458 11459 mark_reg_known_zero(env, regs, BPF_REG_0); 11460 regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC; 11461 regs[BPF_REG_0].btf = ret_btf; 11462 regs[BPF_REG_0].btf_id = ret_btf_id; 11463 11464 insn_aux->obj_new_size = ret_t->size; 11465 insn_aux->kptr_struct_meta = 11466 btf_find_struct_meta(ret_btf, ret_btf_id); 11467 } else if (meta.func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { 11468 mark_reg_known_zero(env, regs, BPF_REG_0); 11469 regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC; 11470 regs[BPF_REG_0].btf = meta.arg_btf; 11471 regs[BPF_REG_0].btf_id = meta.arg_btf_id; 11472 11473 insn_aux->kptr_struct_meta = 11474 btf_find_struct_meta(meta.arg_btf, 11475 meta.arg_btf_id); 11476 } else if (meta.func_id == special_kfunc_list[KF_bpf_list_pop_front] || 11477 meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) { 11478 struct btf_field *field = meta.arg_list_head.field; 11479 11480 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); 11481 } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove] || 11482 meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) { 11483 struct btf_field *field = meta.arg_rbtree_root.field; 11484 11485 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); 11486 } else if (meta.func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) { 11487 mark_reg_known_zero(env, regs, BPF_REG_0); 11488 regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED; 11489 regs[BPF_REG_0].btf = desc_btf; 11490 regs[BPF_REG_0].btf_id = meta.ret_btf_id; 11491 } else if (meta.func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { 11492 ret_t = btf_type_by_id(desc_btf, meta.arg_constant.value); 11493 if (!ret_t || !btf_type_is_struct(ret_t)) { 11494 verbose(env, 11495 "kfunc bpf_rdonly_cast type ID argument must be of a struct\n"); 11496 return -EINVAL; 11497 } 11498 11499 mark_reg_known_zero(env, regs, BPF_REG_0); 11500 regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_UNTRUSTED; 11501 regs[BPF_REG_0].btf = desc_btf; 11502 regs[BPF_REG_0].btf_id = meta.arg_constant.value; 11503 } else if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice] || 11504 meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice_rdwr]) { 11505 enum bpf_type_flag type_flag = get_dynptr_type_flag(meta.initialized_dynptr.type); 11506 11507 mark_reg_known_zero(env, regs, BPF_REG_0); 11508 11509 if (!meta.arg_constant.found) { 11510 verbose(env, "verifier internal error: bpf_dynptr_slice(_rdwr) no constant size\n"); 11511 return -EFAULT; 11512 } 11513 11514 regs[BPF_REG_0].mem_size = meta.arg_constant.value; 11515 11516 /* PTR_MAYBE_NULL will be added when is_kfunc_ret_null is checked */ 11517 regs[BPF_REG_0].type = PTR_TO_MEM | type_flag; 11518 11519 if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice]) { 11520 regs[BPF_REG_0].type |= MEM_RDONLY; 11521 } else { 11522 /* this will set env->seen_direct_write to true */ 11523 if (!may_access_direct_pkt_data(env, NULL, BPF_WRITE)) { 11524 verbose(env, "the prog does not allow writes to packet data\n"); 11525 return -EINVAL; 11526 } 11527 } 11528 11529 if (!meta.initialized_dynptr.id) { 11530 verbose(env, "verifier internal error: no dynptr id\n"); 11531 return -EFAULT; 11532 } 11533 regs[BPF_REG_0].dynptr_id = meta.initialized_dynptr.id; 11534 11535 /* we don't need to set BPF_REG_0's ref obj id 11536 * because packet slices are not refcounted (see 11537 * dynptr_type_refcounted) 11538 */ 11539 } else { 11540 verbose(env, "kernel function %s unhandled dynamic return type\n", 11541 meta.func_name); 11542 return -EFAULT; 11543 } 11544 } else if (!__btf_type_is_struct(ptr_type)) { 11545 if (!meta.r0_size) { 11546 __u32 sz; 11547 11548 if (!IS_ERR(btf_resolve_size(desc_btf, ptr_type, &sz))) { 11549 meta.r0_size = sz; 11550 meta.r0_rdonly = true; 11551 } 11552 } 11553 if (!meta.r0_size) { 11554 ptr_type_name = btf_name_by_offset(desc_btf, 11555 ptr_type->name_off); 11556 verbose(env, 11557 "kernel function %s returns pointer type %s %s is not supported\n", 11558 func_name, 11559 btf_type_str(ptr_type), 11560 ptr_type_name); 11561 return -EINVAL; 11562 } 11563 11564 mark_reg_known_zero(env, regs, BPF_REG_0); 11565 regs[BPF_REG_0].type = PTR_TO_MEM; 11566 regs[BPF_REG_0].mem_size = meta.r0_size; 11567 11568 if (meta.r0_rdonly) 11569 regs[BPF_REG_0].type |= MEM_RDONLY; 11570 11571 /* Ensures we don't access the memory after a release_reference() */ 11572 if (meta.ref_obj_id) 11573 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; 11574 } else { 11575 mark_reg_known_zero(env, regs, BPF_REG_0); 11576 regs[BPF_REG_0].btf = desc_btf; 11577 regs[BPF_REG_0].type = PTR_TO_BTF_ID; 11578 regs[BPF_REG_0].btf_id = ptr_type_id; 11579 } 11580 11581 if (is_kfunc_ret_null(&meta)) { 11582 regs[BPF_REG_0].type |= PTR_MAYBE_NULL; 11583 /* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */ 11584 regs[BPF_REG_0].id = ++env->id_gen; 11585 } 11586 mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *)); 11587 if (is_kfunc_acquire(&meta)) { 11588 int id = acquire_reference_state(env, insn_idx); 11589 11590 if (id < 0) 11591 return id; 11592 if (is_kfunc_ret_null(&meta)) 11593 regs[BPF_REG_0].id = id; 11594 regs[BPF_REG_0].ref_obj_id = id; 11595 } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) { 11596 ref_set_non_owning(env, ®s[BPF_REG_0]); 11597 } 11598 11599 if (reg_may_point_to_spin_lock(®s[BPF_REG_0]) && !regs[BPF_REG_0].id) 11600 regs[BPF_REG_0].id = ++env->id_gen; 11601 } else if (btf_type_is_void(t)) { 11602 if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) { 11603 if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) { 11604 insn_aux->kptr_struct_meta = 11605 btf_find_struct_meta(meta.arg_btf, 11606 meta.arg_btf_id); 11607 } 11608 } 11609 } 11610 11611 nargs = btf_type_vlen(meta.func_proto); 11612 args = (const struct btf_param *)(meta.func_proto + 1); 11613 for (i = 0; i < nargs; i++) { 11614 u32 regno = i + 1; 11615 11616 t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL); 11617 if (btf_type_is_ptr(t)) 11618 mark_btf_func_reg_size(env, regno, sizeof(void *)); 11619 else 11620 /* scalar. ensured by btf_check_kfunc_arg_match() */ 11621 mark_btf_func_reg_size(env, regno, t->size); 11622 } 11623 11624 if (is_iter_next_kfunc(&meta)) { 11625 err = process_iter_next_call(env, insn_idx, &meta); 11626 if (err) 11627 return err; 11628 } 11629 11630 return 0; 11631 } 11632 11633 static bool signed_add_overflows(s64 a, s64 b) 11634 { 11635 /* Do the add in u64, where overflow is well-defined */ 11636 s64 res = (s64)((u64)a + (u64)b); 11637 11638 if (b < 0) 11639 return res > a; 11640 return res < a; 11641 } 11642 11643 static bool signed_add32_overflows(s32 a, s32 b) 11644 { 11645 /* Do the add in u32, where overflow is well-defined */ 11646 s32 res = (s32)((u32)a + (u32)b); 11647 11648 if (b < 0) 11649 return res > a; 11650 return res < a; 11651 } 11652 11653 static bool signed_sub_overflows(s64 a, s64 b) 11654 { 11655 /* Do the sub in u64, where overflow is well-defined */ 11656 s64 res = (s64)((u64)a - (u64)b); 11657 11658 if (b < 0) 11659 return res < a; 11660 return res > a; 11661 } 11662 11663 static bool signed_sub32_overflows(s32 a, s32 b) 11664 { 11665 /* Do the sub in u32, where overflow is well-defined */ 11666 s32 res = (s32)((u32)a - (u32)b); 11667 11668 if (b < 0) 11669 return res < a; 11670 return res > a; 11671 } 11672 11673 static bool check_reg_sane_offset(struct bpf_verifier_env *env, 11674 const struct bpf_reg_state *reg, 11675 enum bpf_reg_type type) 11676 { 11677 bool known = tnum_is_const(reg->var_off); 11678 s64 val = reg->var_off.value; 11679 s64 smin = reg->smin_value; 11680 11681 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { 11682 verbose(env, "math between %s pointer and %lld is not allowed\n", 11683 reg_type_str(env, type), val); 11684 return false; 11685 } 11686 11687 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { 11688 verbose(env, "%s pointer offset %d is not allowed\n", 11689 reg_type_str(env, type), reg->off); 11690 return false; 11691 } 11692 11693 if (smin == S64_MIN) { 11694 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", 11695 reg_type_str(env, type)); 11696 return false; 11697 } 11698 11699 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { 11700 verbose(env, "value %lld makes %s pointer be out of bounds\n", 11701 smin, reg_type_str(env, type)); 11702 return false; 11703 } 11704 11705 return true; 11706 } 11707 11708 enum { 11709 REASON_BOUNDS = -1, 11710 REASON_TYPE = -2, 11711 REASON_PATHS = -3, 11712 REASON_LIMIT = -4, 11713 REASON_STACK = -5, 11714 }; 11715 11716 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, 11717 u32 *alu_limit, bool mask_to_left) 11718 { 11719 u32 max = 0, ptr_limit = 0; 11720 11721 switch (ptr_reg->type) { 11722 case PTR_TO_STACK: 11723 /* Offset 0 is out-of-bounds, but acceptable start for the 11724 * left direction, see BPF_REG_FP. Also, unknown scalar 11725 * offset where we would need to deal with min/max bounds is 11726 * currently prohibited for unprivileged. 11727 */ 11728 max = MAX_BPF_STACK + mask_to_left; 11729 ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off); 11730 break; 11731 case PTR_TO_MAP_VALUE: 11732 max = ptr_reg->map_ptr->value_size; 11733 ptr_limit = (mask_to_left ? 11734 ptr_reg->smin_value : 11735 ptr_reg->umax_value) + ptr_reg->off; 11736 break; 11737 default: 11738 return REASON_TYPE; 11739 } 11740 11741 if (ptr_limit >= max) 11742 return REASON_LIMIT; 11743 *alu_limit = ptr_limit; 11744 return 0; 11745 } 11746 11747 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, 11748 const struct bpf_insn *insn) 11749 { 11750 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K; 11751 } 11752 11753 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, 11754 u32 alu_state, u32 alu_limit) 11755 { 11756 /* If we arrived here from different branches with different 11757 * state or limits to sanitize, then this won't work. 11758 */ 11759 if (aux->alu_state && 11760 (aux->alu_state != alu_state || 11761 aux->alu_limit != alu_limit)) 11762 return REASON_PATHS; 11763 11764 /* Corresponding fixup done in do_misc_fixups(). */ 11765 aux->alu_state = alu_state; 11766 aux->alu_limit = alu_limit; 11767 return 0; 11768 } 11769 11770 static int sanitize_val_alu(struct bpf_verifier_env *env, 11771 struct bpf_insn *insn) 11772 { 11773 struct bpf_insn_aux_data *aux = cur_aux(env); 11774 11775 if (can_skip_alu_sanitation(env, insn)) 11776 return 0; 11777 11778 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); 11779 } 11780 11781 static bool sanitize_needed(u8 opcode) 11782 { 11783 return opcode == BPF_ADD || opcode == BPF_SUB; 11784 } 11785 11786 struct bpf_sanitize_info { 11787 struct bpf_insn_aux_data aux; 11788 bool mask_to_left; 11789 }; 11790 11791 static struct bpf_verifier_state * 11792 sanitize_speculative_path(struct bpf_verifier_env *env, 11793 const struct bpf_insn *insn, 11794 u32 next_idx, u32 curr_idx) 11795 { 11796 struct bpf_verifier_state *branch; 11797 struct bpf_reg_state *regs; 11798 11799 branch = push_stack(env, next_idx, curr_idx, true); 11800 if (branch && insn) { 11801 regs = branch->frame[branch->curframe]->regs; 11802 if (BPF_SRC(insn->code) == BPF_K) { 11803 mark_reg_unknown(env, regs, insn->dst_reg); 11804 } else if (BPF_SRC(insn->code) == BPF_X) { 11805 mark_reg_unknown(env, regs, insn->dst_reg); 11806 mark_reg_unknown(env, regs, insn->src_reg); 11807 } 11808 } 11809 return branch; 11810 } 11811 11812 static int sanitize_ptr_alu(struct bpf_verifier_env *env, 11813 struct bpf_insn *insn, 11814 const struct bpf_reg_state *ptr_reg, 11815 const struct bpf_reg_state *off_reg, 11816 struct bpf_reg_state *dst_reg, 11817 struct bpf_sanitize_info *info, 11818 const bool commit_window) 11819 { 11820 struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux; 11821 struct bpf_verifier_state *vstate = env->cur_state; 11822 bool off_is_imm = tnum_is_const(off_reg->var_off); 11823 bool off_is_neg = off_reg->smin_value < 0; 11824 bool ptr_is_dst_reg = ptr_reg == dst_reg; 11825 u8 opcode = BPF_OP(insn->code); 11826 u32 alu_state, alu_limit; 11827 struct bpf_reg_state tmp; 11828 bool ret; 11829 int err; 11830 11831 if (can_skip_alu_sanitation(env, insn)) 11832 return 0; 11833 11834 /* We already marked aux for masking from non-speculative 11835 * paths, thus we got here in the first place. We only care 11836 * to explore bad access from here. 11837 */ 11838 if (vstate->speculative) 11839 goto do_sim; 11840 11841 if (!commit_window) { 11842 if (!tnum_is_const(off_reg->var_off) && 11843 (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) 11844 return REASON_BOUNDS; 11845 11846 info->mask_to_left = (opcode == BPF_ADD && off_is_neg) || 11847 (opcode == BPF_SUB && !off_is_neg); 11848 } 11849 11850 err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left); 11851 if (err < 0) 11852 return err; 11853 11854 if (commit_window) { 11855 /* In commit phase we narrow the masking window based on 11856 * the observed pointer move after the simulated operation. 11857 */ 11858 alu_state = info->aux.alu_state; 11859 alu_limit = abs(info->aux.alu_limit - alu_limit); 11860 } else { 11861 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; 11862 alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0; 11863 alu_state |= ptr_is_dst_reg ? 11864 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; 11865 11866 /* Limit pruning on unknown scalars to enable deep search for 11867 * potential masking differences from other program paths. 11868 */ 11869 if (!off_is_imm) 11870 env->explore_alu_limits = true; 11871 } 11872 11873 err = update_alu_sanitation_state(aux, alu_state, alu_limit); 11874 if (err < 0) 11875 return err; 11876 do_sim: 11877 /* If we're in commit phase, we're done here given we already 11878 * pushed the truncated dst_reg into the speculative verification 11879 * stack. 11880 * 11881 * Also, when register is a known constant, we rewrite register-based 11882 * operation to immediate-based, and thus do not need masking (and as 11883 * a consequence, do not need to simulate the zero-truncation either). 11884 */ 11885 if (commit_window || off_is_imm) 11886 return 0; 11887 11888 /* Simulate and find potential out-of-bounds access under 11889 * speculative execution from truncation as a result of 11890 * masking when off was not within expected range. If off 11891 * sits in dst, then we temporarily need to move ptr there 11892 * to simulate dst (== 0) +/-= ptr. Needed, for example, 11893 * for cases where we use K-based arithmetic in one direction 11894 * and truncated reg-based in the other in order to explore 11895 * bad access. 11896 */ 11897 if (!ptr_is_dst_reg) { 11898 tmp = *dst_reg; 11899 copy_register_state(dst_reg, ptr_reg); 11900 } 11901 ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, 11902 env->insn_idx); 11903 if (!ptr_is_dst_reg && ret) 11904 *dst_reg = tmp; 11905 return !ret ? REASON_STACK : 0; 11906 } 11907 11908 static void sanitize_mark_insn_seen(struct bpf_verifier_env *env) 11909 { 11910 struct bpf_verifier_state *vstate = env->cur_state; 11911 11912 /* If we simulate paths under speculation, we don't update the 11913 * insn as 'seen' such that when we verify unreachable paths in 11914 * the non-speculative domain, sanitize_dead_code() can still 11915 * rewrite/sanitize them. 11916 */ 11917 if (!vstate->speculative) 11918 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; 11919 } 11920 11921 static int sanitize_err(struct bpf_verifier_env *env, 11922 const struct bpf_insn *insn, int reason, 11923 const struct bpf_reg_state *off_reg, 11924 const struct bpf_reg_state *dst_reg) 11925 { 11926 static const char *err = "pointer arithmetic with it prohibited for !root"; 11927 const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub"; 11928 u32 dst = insn->dst_reg, src = insn->src_reg; 11929 11930 switch (reason) { 11931 case REASON_BOUNDS: 11932 verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n", 11933 off_reg == dst_reg ? dst : src, err); 11934 break; 11935 case REASON_TYPE: 11936 verbose(env, "R%d has pointer with unsupported alu operation, %s\n", 11937 off_reg == dst_reg ? src : dst, err); 11938 break; 11939 case REASON_PATHS: 11940 verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n", 11941 dst, op, err); 11942 break; 11943 case REASON_LIMIT: 11944 verbose(env, "R%d tried to %s beyond pointer bounds, %s\n", 11945 dst, op, err); 11946 break; 11947 case REASON_STACK: 11948 verbose(env, "R%d could not be pushed for speculative verification, %s\n", 11949 dst, err); 11950 break; 11951 default: 11952 verbose(env, "verifier internal error: unknown reason (%d)\n", 11953 reason); 11954 break; 11955 } 11956 11957 return -EACCES; 11958 } 11959 11960 /* check that stack access falls within stack limits and that 'reg' doesn't 11961 * have a variable offset. 11962 * 11963 * Variable offset is prohibited for unprivileged mode for simplicity since it 11964 * requires corresponding support in Spectre masking for stack ALU. See also 11965 * retrieve_ptr_limit(). 11966 * 11967 * 11968 * 'off' includes 'reg->off'. 11969 */ 11970 static int check_stack_access_for_ptr_arithmetic( 11971 struct bpf_verifier_env *env, 11972 int regno, 11973 const struct bpf_reg_state *reg, 11974 int off) 11975 { 11976 if (!tnum_is_const(reg->var_off)) { 11977 char tn_buf[48]; 11978 11979 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 11980 verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n", 11981 regno, tn_buf, off); 11982 return -EACCES; 11983 } 11984 11985 if (off >= 0 || off < -MAX_BPF_STACK) { 11986 verbose(env, "R%d stack pointer arithmetic goes out of range, " 11987 "prohibited for !root; off=%d\n", regno, off); 11988 return -EACCES; 11989 } 11990 11991 return 0; 11992 } 11993 11994 static int sanitize_check_bounds(struct bpf_verifier_env *env, 11995 const struct bpf_insn *insn, 11996 const struct bpf_reg_state *dst_reg) 11997 { 11998 u32 dst = insn->dst_reg; 11999 12000 /* For unprivileged we require that resulting offset must be in bounds 12001 * in order to be able to sanitize access later on. 12002 */ 12003 if (env->bypass_spec_v1) 12004 return 0; 12005 12006 switch (dst_reg->type) { 12007 case PTR_TO_STACK: 12008 if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg, 12009 dst_reg->off + dst_reg->var_off.value)) 12010 return -EACCES; 12011 break; 12012 case PTR_TO_MAP_VALUE: 12013 if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) { 12014 verbose(env, "R%d pointer arithmetic of map value goes out of range, " 12015 "prohibited for !root\n", dst); 12016 return -EACCES; 12017 } 12018 break; 12019 default: 12020 break; 12021 } 12022 12023 return 0; 12024 } 12025 12026 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. 12027 * Caller should also handle BPF_MOV case separately. 12028 * If we return -EACCES, caller may want to try again treating pointer as a 12029 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. 12030 */ 12031 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, 12032 struct bpf_insn *insn, 12033 const struct bpf_reg_state *ptr_reg, 12034 const struct bpf_reg_state *off_reg) 12035 { 12036 struct bpf_verifier_state *vstate = env->cur_state; 12037 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 12038 struct bpf_reg_state *regs = state->regs, *dst_reg; 12039 bool known = tnum_is_const(off_reg->var_off); 12040 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, 12041 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; 12042 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, 12043 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; 12044 struct bpf_sanitize_info info = {}; 12045 u8 opcode = BPF_OP(insn->code); 12046 u32 dst = insn->dst_reg; 12047 int ret; 12048 12049 dst_reg = ®s[dst]; 12050 12051 if ((known && (smin_val != smax_val || umin_val != umax_val)) || 12052 smin_val > smax_val || umin_val > umax_val) { 12053 /* Taint dst register if offset had invalid bounds derived from 12054 * e.g. dead branches. 12055 */ 12056 __mark_reg_unknown(env, dst_reg); 12057 return 0; 12058 } 12059 12060 if (BPF_CLASS(insn->code) != BPF_ALU64) { 12061 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ 12062 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 12063 __mark_reg_unknown(env, dst_reg); 12064 return 0; 12065 } 12066 12067 verbose(env, 12068 "R%d 32-bit pointer arithmetic prohibited\n", 12069 dst); 12070 return -EACCES; 12071 } 12072 12073 if (ptr_reg->type & PTR_MAYBE_NULL) { 12074 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", 12075 dst, reg_type_str(env, ptr_reg->type)); 12076 return -EACCES; 12077 } 12078 12079 switch (base_type(ptr_reg->type)) { 12080 case CONST_PTR_TO_MAP: 12081 /* smin_val represents the known value */ 12082 if (known && smin_val == 0 && opcode == BPF_ADD) 12083 break; 12084 fallthrough; 12085 case PTR_TO_PACKET_END: 12086 case PTR_TO_SOCKET: 12087 case PTR_TO_SOCK_COMMON: 12088 case PTR_TO_TCP_SOCK: 12089 case PTR_TO_XDP_SOCK: 12090 verbose(env, "R%d pointer arithmetic on %s prohibited\n", 12091 dst, reg_type_str(env, ptr_reg->type)); 12092 return -EACCES; 12093 default: 12094 break; 12095 } 12096 12097 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. 12098 * The id may be overwritten later if we create a new variable offset. 12099 */ 12100 dst_reg->type = ptr_reg->type; 12101 dst_reg->id = ptr_reg->id; 12102 12103 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || 12104 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) 12105 return -EINVAL; 12106 12107 /* pointer types do not carry 32-bit bounds at the moment. */ 12108 __mark_reg32_unbounded(dst_reg); 12109 12110 if (sanitize_needed(opcode)) { 12111 ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg, 12112 &info, false); 12113 if (ret < 0) 12114 return sanitize_err(env, insn, ret, off_reg, dst_reg); 12115 } 12116 12117 switch (opcode) { 12118 case BPF_ADD: 12119 /* We can take a fixed offset as long as it doesn't overflow 12120 * the s32 'off' field 12121 */ 12122 if (known && (ptr_reg->off + smin_val == 12123 (s64)(s32)(ptr_reg->off + smin_val))) { 12124 /* pointer += K. Accumulate it into fixed offset */ 12125 dst_reg->smin_value = smin_ptr; 12126 dst_reg->smax_value = smax_ptr; 12127 dst_reg->umin_value = umin_ptr; 12128 dst_reg->umax_value = umax_ptr; 12129 dst_reg->var_off = ptr_reg->var_off; 12130 dst_reg->off = ptr_reg->off + smin_val; 12131 dst_reg->raw = ptr_reg->raw; 12132 break; 12133 } 12134 /* A new variable offset is created. Note that off_reg->off 12135 * == 0, since it's a scalar. 12136 * dst_reg gets the pointer type and since some positive 12137 * integer value was added to the pointer, give it a new 'id' 12138 * if it's a PTR_TO_PACKET. 12139 * this creates a new 'base' pointer, off_reg (variable) gets 12140 * added into the variable offset, and we copy the fixed offset 12141 * from ptr_reg. 12142 */ 12143 if (signed_add_overflows(smin_ptr, smin_val) || 12144 signed_add_overflows(smax_ptr, smax_val)) { 12145 dst_reg->smin_value = S64_MIN; 12146 dst_reg->smax_value = S64_MAX; 12147 } else { 12148 dst_reg->smin_value = smin_ptr + smin_val; 12149 dst_reg->smax_value = smax_ptr + smax_val; 12150 } 12151 if (umin_ptr + umin_val < umin_ptr || 12152 umax_ptr + umax_val < umax_ptr) { 12153 dst_reg->umin_value = 0; 12154 dst_reg->umax_value = U64_MAX; 12155 } else { 12156 dst_reg->umin_value = umin_ptr + umin_val; 12157 dst_reg->umax_value = umax_ptr + umax_val; 12158 } 12159 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); 12160 dst_reg->off = ptr_reg->off; 12161 dst_reg->raw = ptr_reg->raw; 12162 if (reg_is_pkt_pointer(ptr_reg)) { 12163 dst_reg->id = ++env->id_gen; 12164 /* something was added to pkt_ptr, set range to zero */ 12165 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); 12166 } 12167 break; 12168 case BPF_SUB: 12169 if (dst_reg == off_reg) { 12170 /* scalar -= pointer. Creates an unknown scalar */ 12171 verbose(env, "R%d tried to subtract pointer from scalar\n", 12172 dst); 12173 return -EACCES; 12174 } 12175 /* We don't allow subtraction from FP, because (according to 12176 * test_verifier.c test "invalid fp arithmetic", JITs might not 12177 * be able to deal with it. 12178 */ 12179 if (ptr_reg->type == PTR_TO_STACK) { 12180 verbose(env, "R%d subtraction from stack pointer prohibited\n", 12181 dst); 12182 return -EACCES; 12183 } 12184 if (known && (ptr_reg->off - smin_val == 12185 (s64)(s32)(ptr_reg->off - smin_val))) { 12186 /* pointer -= K. Subtract it from fixed offset */ 12187 dst_reg->smin_value = smin_ptr; 12188 dst_reg->smax_value = smax_ptr; 12189 dst_reg->umin_value = umin_ptr; 12190 dst_reg->umax_value = umax_ptr; 12191 dst_reg->var_off = ptr_reg->var_off; 12192 dst_reg->id = ptr_reg->id; 12193 dst_reg->off = ptr_reg->off - smin_val; 12194 dst_reg->raw = ptr_reg->raw; 12195 break; 12196 } 12197 /* A new variable offset is created. If the subtrahend is known 12198 * nonnegative, then any reg->range we had before is still good. 12199 */ 12200 if (signed_sub_overflows(smin_ptr, smax_val) || 12201 signed_sub_overflows(smax_ptr, smin_val)) { 12202 /* Overflow possible, we know nothing */ 12203 dst_reg->smin_value = S64_MIN; 12204 dst_reg->smax_value = S64_MAX; 12205 } else { 12206 dst_reg->smin_value = smin_ptr - smax_val; 12207 dst_reg->smax_value = smax_ptr - smin_val; 12208 } 12209 if (umin_ptr < umax_val) { 12210 /* Overflow possible, we know nothing */ 12211 dst_reg->umin_value = 0; 12212 dst_reg->umax_value = U64_MAX; 12213 } else { 12214 /* Cannot overflow (as long as bounds are consistent) */ 12215 dst_reg->umin_value = umin_ptr - umax_val; 12216 dst_reg->umax_value = umax_ptr - umin_val; 12217 } 12218 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); 12219 dst_reg->off = ptr_reg->off; 12220 dst_reg->raw = ptr_reg->raw; 12221 if (reg_is_pkt_pointer(ptr_reg)) { 12222 dst_reg->id = ++env->id_gen; 12223 /* something was added to pkt_ptr, set range to zero */ 12224 if (smin_val < 0) 12225 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); 12226 } 12227 break; 12228 case BPF_AND: 12229 case BPF_OR: 12230 case BPF_XOR: 12231 /* bitwise ops on pointers are troublesome, prohibit. */ 12232 verbose(env, "R%d bitwise operator %s on pointer prohibited\n", 12233 dst, bpf_alu_string[opcode >> 4]); 12234 return -EACCES; 12235 default: 12236 /* other operators (e.g. MUL,LSH) produce non-pointer results */ 12237 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", 12238 dst, bpf_alu_string[opcode >> 4]); 12239 return -EACCES; 12240 } 12241 12242 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) 12243 return -EINVAL; 12244 reg_bounds_sync(dst_reg); 12245 if (sanitize_check_bounds(env, insn, dst_reg) < 0) 12246 return -EACCES; 12247 if (sanitize_needed(opcode)) { 12248 ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg, 12249 &info, true); 12250 if (ret < 0) 12251 return sanitize_err(env, insn, ret, off_reg, dst_reg); 12252 } 12253 12254 return 0; 12255 } 12256 12257 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg, 12258 struct bpf_reg_state *src_reg) 12259 { 12260 s32 smin_val = src_reg->s32_min_value; 12261 s32 smax_val = src_reg->s32_max_value; 12262 u32 umin_val = src_reg->u32_min_value; 12263 u32 umax_val = src_reg->u32_max_value; 12264 12265 if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) || 12266 signed_add32_overflows(dst_reg->s32_max_value, smax_val)) { 12267 dst_reg->s32_min_value = S32_MIN; 12268 dst_reg->s32_max_value = S32_MAX; 12269 } else { 12270 dst_reg->s32_min_value += smin_val; 12271 dst_reg->s32_max_value += smax_val; 12272 } 12273 if (dst_reg->u32_min_value + umin_val < umin_val || 12274 dst_reg->u32_max_value + umax_val < umax_val) { 12275 dst_reg->u32_min_value = 0; 12276 dst_reg->u32_max_value = U32_MAX; 12277 } else { 12278 dst_reg->u32_min_value += umin_val; 12279 dst_reg->u32_max_value += umax_val; 12280 } 12281 } 12282 12283 static void scalar_min_max_add(struct bpf_reg_state *dst_reg, 12284 struct bpf_reg_state *src_reg) 12285 { 12286 s64 smin_val = src_reg->smin_value; 12287 s64 smax_val = src_reg->smax_value; 12288 u64 umin_val = src_reg->umin_value; 12289 u64 umax_val = src_reg->umax_value; 12290 12291 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 12292 signed_add_overflows(dst_reg->smax_value, smax_val)) { 12293 dst_reg->smin_value = S64_MIN; 12294 dst_reg->smax_value = S64_MAX; 12295 } else { 12296 dst_reg->smin_value += smin_val; 12297 dst_reg->smax_value += smax_val; 12298 } 12299 if (dst_reg->umin_value + umin_val < umin_val || 12300 dst_reg->umax_value + umax_val < umax_val) { 12301 dst_reg->umin_value = 0; 12302 dst_reg->umax_value = U64_MAX; 12303 } else { 12304 dst_reg->umin_value += umin_val; 12305 dst_reg->umax_value += umax_val; 12306 } 12307 } 12308 12309 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg, 12310 struct bpf_reg_state *src_reg) 12311 { 12312 s32 smin_val = src_reg->s32_min_value; 12313 s32 smax_val = src_reg->s32_max_value; 12314 u32 umin_val = src_reg->u32_min_value; 12315 u32 umax_val = src_reg->u32_max_value; 12316 12317 if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) || 12318 signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) { 12319 /* Overflow possible, we know nothing */ 12320 dst_reg->s32_min_value = S32_MIN; 12321 dst_reg->s32_max_value = S32_MAX; 12322 } else { 12323 dst_reg->s32_min_value -= smax_val; 12324 dst_reg->s32_max_value -= smin_val; 12325 } 12326 if (dst_reg->u32_min_value < umax_val) { 12327 /* Overflow possible, we know nothing */ 12328 dst_reg->u32_min_value = 0; 12329 dst_reg->u32_max_value = U32_MAX; 12330 } else { 12331 /* Cannot overflow (as long as bounds are consistent) */ 12332 dst_reg->u32_min_value -= umax_val; 12333 dst_reg->u32_max_value -= umin_val; 12334 } 12335 } 12336 12337 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg, 12338 struct bpf_reg_state *src_reg) 12339 { 12340 s64 smin_val = src_reg->smin_value; 12341 s64 smax_val = src_reg->smax_value; 12342 u64 umin_val = src_reg->umin_value; 12343 u64 umax_val = src_reg->umax_value; 12344 12345 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || 12346 signed_sub_overflows(dst_reg->smax_value, smin_val)) { 12347 /* Overflow possible, we know nothing */ 12348 dst_reg->smin_value = S64_MIN; 12349 dst_reg->smax_value = S64_MAX; 12350 } else { 12351 dst_reg->smin_value -= smax_val; 12352 dst_reg->smax_value -= smin_val; 12353 } 12354 if (dst_reg->umin_value < umax_val) { 12355 /* Overflow possible, we know nothing */ 12356 dst_reg->umin_value = 0; 12357 dst_reg->umax_value = U64_MAX; 12358 } else { 12359 /* Cannot overflow (as long as bounds are consistent) */ 12360 dst_reg->umin_value -= umax_val; 12361 dst_reg->umax_value -= umin_val; 12362 } 12363 } 12364 12365 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg, 12366 struct bpf_reg_state *src_reg) 12367 { 12368 s32 smin_val = src_reg->s32_min_value; 12369 u32 umin_val = src_reg->u32_min_value; 12370 u32 umax_val = src_reg->u32_max_value; 12371 12372 if (smin_val < 0 || dst_reg->s32_min_value < 0) { 12373 /* Ain't nobody got time to multiply that sign */ 12374 __mark_reg32_unbounded(dst_reg); 12375 return; 12376 } 12377 /* Both values are positive, so we can work with unsigned and 12378 * copy the result to signed (unless it exceeds S32_MAX). 12379 */ 12380 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) { 12381 /* Potential overflow, we know nothing */ 12382 __mark_reg32_unbounded(dst_reg); 12383 return; 12384 } 12385 dst_reg->u32_min_value *= umin_val; 12386 dst_reg->u32_max_value *= umax_val; 12387 if (dst_reg->u32_max_value > S32_MAX) { 12388 /* Overflow possible, we know nothing */ 12389 dst_reg->s32_min_value = S32_MIN; 12390 dst_reg->s32_max_value = S32_MAX; 12391 } else { 12392 dst_reg->s32_min_value = dst_reg->u32_min_value; 12393 dst_reg->s32_max_value = dst_reg->u32_max_value; 12394 } 12395 } 12396 12397 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg, 12398 struct bpf_reg_state *src_reg) 12399 { 12400 s64 smin_val = src_reg->smin_value; 12401 u64 umin_val = src_reg->umin_value; 12402 u64 umax_val = src_reg->umax_value; 12403 12404 if (smin_val < 0 || dst_reg->smin_value < 0) { 12405 /* Ain't nobody got time to multiply that sign */ 12406 __mark_reg64_unbounded(dst_reg); 12407 return; 12408 } 12409 /* Both values are positive, so we can work with unsigned and 12410 * copy the result to signed (unless it exceeds S64_MAX). 12411 */ 12412 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { 12413 /* Potential overflow, we know nothing */ 12414 __mark_reg64_unbounded(dst_reg); 12415 return; 12416 } 12417 dst_reg->umin_value *= umin_val; 12418 dst_reg->umax_value *= umax_val; 12419 if (dst_reg->umax_value > S64_MAX) { 12420 /* Overflow possible, we know nothing */ 12421 dst_reg->smin_value = S64_MIN; 12422 dst_reg->smax_value = S64_MAX; 12423 } else { 12424 dst_reg->smin_value = dst_reg->umin_value; 12425 dst_reg->smax_value = dst_reg->umax_value; 12426 } 12427 } 12428 12429 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg, 12430 struct bpf_reg_state *src_reg) 12431 { 12432 bool src_known = tnum_subreg_is_const(src_reg->var_off); 12433 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 12434 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 12435 s32 smin_val = src_reg->s32_min_value; 12436 u32 umax_val = src_reg->u32_max_value; 12437 12438 if (src_known && dst_known) { 12439 __mark_reg32_known(dst_reg, var32_off.value); 12440 return; 12441 } 12442 12443 /* We get our minimum from the var_off, since that's inherently 12444 * bitwise. Our maximum is the minimum of the operands' maxima. 12445 */ 12446 dst_reg->u32_min_value = var32_off.value; 12447 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val); 12448 if (dst_reg->s32_min_value < 0 || smin_val < 0) { 12449 /* Lose signed bounds when ANDing negative numbers, 12450 * ain't nobody got time for that. 12451 */ 12452 dst_reg->s32_min_value = S32_MIN; 12453 dst_reg->s32_max_value = S32_MAX; 12454 } else { 12455 /* ANDing two positives gives a positive, so safe to 12456 * cast result into s64. 12457 */ 12458 dst_reg->s32_min_value = dst_reg->u32_min_value; 12459 dst_reg->s32_max_value = dst_reg->u32_max_value; 12460 } 12461 } 12462 12463 static void scalar_min_max_and(struct bpf_reg_state *dst_reg, 12464 struct bpf_reg_state *src_reg) 12465 { 12466 bool src_known = tnum_is_const(src_reg->var_off); 12467 bool dst_known = tnum_is_const(dst_reg->var_off); 12468 s64 smin_val = src_reg->smin_value; 12469 u64 umax_val = src_reg->umax_value; 12470 12471 if (src_known && dst_known) { 12472 __mark_reg_known(dst_reg, dst_reg->var_off.value); 12473 return; 12474 } 12475 12476 /* We get our minimum from the var_off, since that's inherently 12477 * bitwise. Our maximum is the minimum of the operands' maxima. 12478 */ 12479 dst_reg->umin_value = dst_reg->var_off.value; 12480 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); 12481 if (dst_reg->smin_value < 0 || smin_val < 0) { 12482 /* Lose signed bounds when ANDing negative numbers, 12483 * ain't nobody got time for that. 12484 */ 12485 dst_reg->smin_value = S64_MIN; 12486 dst_reg->smax_value = S64_MAX; 12487 } else { 12488 /* ANDing two positives gives a positive, so safe to 12489 * cast result into s64. 12490 */ 12491 dst_reg->smin_value = dst_reg->umin_value; 12492 dst_reg->smax_value = dst_reg->umax_value; 12493 } 12494 /* We may learn something more from the var_off */ 12495 __update_reg_bounds(dst_reg); 12496 } 12497 12498 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg, 12499 struct bpf_reg_state *src_reg) 12500 { 12501 bool src_known = tnum_subreg_is_const(src_reg->var_off); 12502 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 12503 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 12504 s32 smin_val = src_reg->s32_min_value; 12505 u32 umin_val = src_reg->u32_min_value; 12506 12507 if (src_known && dst_known) { 12508 __mark_reg32_known(dst_reg, var32_off.value); 12509 return; 12510 } 12511 12512 /* We get our maximum from the var_off, and our minimum is the 12513 * maximum of the operands' minima 12514 */ 12515 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val); 12516 dst_reg->u32_max_value = var32_off.value | var32_off.mask; 12517 if (dst_reg->s32_min_value < 0 || smin_val < 0) { 12518 /* Lose signed bounds when ORing negative numbers, 12519 * ain't nobody got time for that. 12520 */ 12521 dst_reg->s32_min_value = S32_MIN; 12522 dst_reg->s32_max_value = S32_MAX; 12523 } else { 12524 /* ORing two positives gives a positive, so safe to 12525 * cast result into s64. 12526 */ 12527 dst_reg->s32_min_value = dst_reg->u32_min_value; 12528 dst_reg->s32_max_value = dst_reg->u32_max_value; 12529 } 12530 } 12531 12532 static void scalar_min_max_or(struct bpf_reg_state *dst_reg, 12533 struct bpf_reg_state *src_reg) 12534 { 12535 bool src_known = tnum_is_const(src_reg->var_off); 12536 bool dst_known = tnum_is_const(dst_reg->var_off); 12537 s64 smin_val = src_reg->smin_value; 12538 u64 umin_val = src_reg->umin_value; 12539 12540 if (src_known && dst_known) { 12541 __mark_reg_known(dst_reg, dst_reg->var_off.value); 12542 return; 12543 } 12544 12545 /* We get our maximum from the var_off, and our minimum is the 12546 * maximum of the operands' minima 12547 */ 12548 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); 12549 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; 12550 if (dst_reg->smin_value < 0 || smin_val < 0) { 12551 /* Lose signed bounds when ORing negative numbers, 12552 * ain't nobody got time for that. 12553 */ 12554 dst_reg->smin_value = S64_MIN; 12555 dst_reg->smax_value = S64_MAX; 12556 } else { 12557 /* ORing two positives gives a positive, so safe to 12558 * cast result into s64. 12559 */ 12560 dst_reg->smin_value = dst_reg->umin_value; 12561 dst_reg->smax_value = dst_reg->umax_value; 12562 } 12563 /* We may learn something more from the var_off */ 12564 __update_reg_bounds(dst_reg); 12565 } 12566 12567 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg, 12568 struct bpf_reg_state *src_reg) 12569 { 12570 bool src_known = tnum_subreg_is_const(src_reg->var_off); 12571 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 12572 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 12573 s32 smin_val = src_reg->s32_min_value; 12574 12575 if (src_known && dst_known) { 12576 __mark_reg32_known(dst_reg, var32_off.value); 12577 return; 12578 } 12579 12580 /* We get both minimum and maximum from the var32_off. */ 12581 dst_reg->u32_min_value = var32_off.value; 12582 dst_reg->u32_max_value = var32_off.value | var32_off.mask; 12583 12584 if (dst_reg->s32_min_value >= 0 && smin_val >= 0) { 12585 /* XORing two positive sign numbers gives a positive, 12586 * so safe to cast u32 result into s32. 12587 */ 12588 dst_reg->s32_min_value = dst_reg->u32_min_value; 12589 dst_reg->s32_max_value = dst_reg->u32_max_value; 12590 } else { 12591 dst_reg->s32_min_value = S32_MIN; 12592 dst_reg->s32_max_value = S32_MAX; 12593 } 12594 } 12595 12596 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg, 12597 struct bpf_reg_state *src_reg) 12598 { 12599 bool src_known = tnum_is_const(src_reg->var_off); 12600 bool dst_known = tnum_is_const(dst_reg->var_off); 12601 s64 smin_val = src_reg->smin_value; 12602 12603 if (src_known && dst_known) { 12604 /* dst_reg->var_off.value has been updated earlier */ 12605 __mark_reg_known(dst_reg, dst_reg->var_off.value); 12606 return; 12607 } 12608 12609 /* We get both minimum and maximum from the var_off. */ 12610 dst_reg->umin_value = dst_reg->var_off.value; 12611 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; 12612 12613 if (dst_reg->smin_value >= 0 && smin_val >= 0) { 12614 /* XORing two positive sign numbers gives a positive, 12615 * so safe to cast u64 result into s64. 12616 */ 12617 dst_reg->smin_value = dst_reg->umin_value; 12618 dst_reg->smax_value = dst_reg->umax_value; 12619 } else { 12620 dst_reg->smin_value = S64_MIN; 12621 dst_reg->smax_value = S64_MAX; 12622 } 12623 12624 __update_reg_bounds(dst_reg); 12625 } 12626 12627 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, 12628 u64 umin_val, u64 umax_val) 12629 { 12630 /* We lose all sign bit information (except what we can pick 12631 * up from var_off) 12632 */ 12633 dst_reg->s32_min_value = S32_MIN; 12634 dst_reg->s32_max_value = S32_MAX; 12635 /* If we might shift our top bit out, then we know nothing */ 12636 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) { 12637 dst_reg->u32_min_value = 0; 12638 dst_reg->u32_max_value = U32_MAX; 12639 } else { 12640 dst_reg->u32_min_value <<= umin_val; 12641 dst_reg->u32_max_value <<= umax_val; 12642 } 12643 } 12644 12645 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, 12646 struct bpf_reg_state *src_reg) 12647 { 12648 u32 umax_val = src_reg->u32_max_value; 12649 u32 umin_val = src_reg->u32_min_value; 12650 /* u32 alu operation will zext upper bits */ 12651 struct tnum subreg = tnum_subreg(dst_reg->var_off); 12652 12653 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); 12654 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val)); 12655 /* Not required but being careful mark reg64 bounds as unknown so 12656 * that we are forced to pick them up from tnum and zext later and 12657 * if some path skips this step we are still safe. 12658 */ 12659 __mark_reg64_unbounded(dst_reg); 12660 __update_reg32_bounds(dst_reg); 12661 } 12662 12663 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg, 12664 u64 umin_val, u64 umax_val) 12665 { 12666 /* Special case <<32 because it is a common compiler pattern to sign 12667 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are 12668 * positive we know this shift will also be positive so we can track 12669 * bounds correctly. Otherwise we lose all sign bit information except 12670 * what we can pick up from var_off. Perhaps we can generalize this 12671 * later to shifts of any length. 12672 */ 12673 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0) 12674 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32; 12675 else 12676 dst_reg->smax_value = S64_MAX; 12677 12678 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0) 12679 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32; 12680 else 12681 dst_reg->smin_value = S64_MIN; 12682 12683 /* If we might shift our top bit out, then we know nothing */ 12684 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { 12685 dst_reg->umin_value = 0; 12686 dst_reg->umax_value = U64_MAX; 12687 } else { 12688 dst_reg->umin_value <<= umin_val; 12689 dst_reg->umax_value <<= umax_val; 12690 } 12691 } 12692 12693 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg, 12694 struct bpf_reg_state *src_reg) 12695 { 12696 u64 umax_val = src_reg->umax_value; 12697 u64 umin_val = src_reg->umin_value; 12698 12699 /* scalar64 calc uses 32bit unshifted bounds so must be called first */ 12700 __scalar64_min_max_lsh(dst_reg, umin_val, umax_val); 12701 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); 12702 12703 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); 12704 /* We may learn something more from the var_off */ 12705 __update_reg_bounds(dst_reg); 12706 } 12707 12708 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg, 12709 struct bpf_reg_state *src_reg) 12710 { 12711 struct tnum subreg = tnum_subreg(dst_reg->var_off); 12712 u32 umax_val = src_reg->u32_max_value; 12713 u32 umin_val = src_reg->u32_min_value; 12714 12715 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 12716 * be negative, then either: 12717 * 1) src_reg might be zero, so the sign bit of the result is 12718 * unknown, so we lose our signed bounds 12719 * 2) it's known negative, thus the unsigned bounds capture the 12720 * signed bounds 12721 * 3) the signed bounds cross zero, so they tell us nothing 12722 * about the result 12723 * If the value in dst_reg is known nonnegative, then again the 12724 * unsigned bounds capture the signed bounds. 12725 * Thus, in all cases it suffices to blow away our signed bounds 12726 * and rely on inferring new ones from the unsigned bounds and 12727 * var_off of the result. 12728 */ 12729 dst_reg->s32_min_value = S32_MIN; 12730 dst_reg->s32_max_value = S32_MAX; 12731 12732 dst_reg->var_off = tnum_rshift(subreg, umin_val); 12733 dst_reg->u32_min_value >>= umax_val; 12734 dst_reg->u32_max_value >>= umin_val; 12735 12736 __mark_reg64_unbounded(dst_reg); 12737 __update_reg32_bounds(dst_reg); 12738 } 12739 12740 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg, 12741 struct bpf_reg_state *src_reg) 12742 { 12743 u64 umax_val = src_reg->umax_value; 12744 u64 umin_val = src_reg->umin_value; 12745 12746 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 12747 * be negative, then either: 12748 * 1) src_reg might be zero, so the sign bit of the result is 12749 * unknown, so we lose our signed bounds 12750 * 2) it's known negative, thus the unsigned bounds capture the 12751 * signed bounds 12752 * 3) the signed bounds cross zero, so they tell us nothing 12753 * about the result 12754 * If the value in dst_reg is known nonnegative, then again the 12755 * unsigned bounds capture the signed bounds. 12756 * Thus, in all cases it suffices to blow away our signed bounds 12757 * and rely on inferring new ones from the unsigned bounds and 12758 * var_off of the result. 12759 */ 12760 dst_reg->smin_value = S64_MIN; 12761 dst_reg->smax_value = S64_MAX; 12762 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); 12763 dst_reg->umin_value >>= umax_val; 12764 dst_reg->umax_value >>= umin_val; 12765 12766 /* Its not easy to operate on alu32 bounds here because it depends 12767 * on bits being shifted in. Take easy way out and mark unbounded 12768 * so we can recalculate later from tnum. 12769 */ 12770 __mark_reg32_unbounded(dst_reg); 12771 __update_reg_bounds(dst_reg); 12772 } 12773 12774 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg, 12775 struct bpf_reg_state *src_reg) 12776 { 12777 u64 umin_val = src_reg->u32_min_value; 12778 12779 /* Upon reaching here, src_known is true and 12780 * umax_val is equal to umin_val. 12781 */ 12782 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val); 12783 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val); 12784 12785 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32); 12786 12787 /* blow away the dst_reg umin_value/umax_value and rely on 12788 * dst_reg var_off to refine the result. 12789 */ 12790 dst_reg->u32_min_value = 0; 12791 dst_reg->u32_max_value = U32_MAX; 12792 12793 __mark_reg64_unbounded(dst_reg); 12794 __update_reg32_bounds(dst_reg); 12795 } 12796 12797 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg, 12798 struct bpf_reg_state *src_reg) 12799 { 12800 u64 umin_val = src_reg->umin_value; 12801 12802 /* Upon reaching here, src_known is true and umax_val is equal 12803 * to umin_val. 12804 */ 12805 dst_reg->smin_value >>= umin_val; 12806 dst_reg->smax_value >>= umin_val; 12807 12808 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64); 12809 12810 /* blow away the dst_reg umin_value/umax_value and rely on 12811 * dst_reg var_off to refine the result. 12812 */ 12813 dst_reg->umin_value = 0; 12814 dst_reg->umax_value = U64_MAX; 12815 12816 /* Its not easy to operate on alu32 bounds here because it depends 12817 * on bits being shifted in from upper 32-bits. Take easy way out 12818 * and mark unbounded so we can recalculate later from tnum. 12819 */ 12820 __mark_reg32_unbounded(dst_reg); 12821 __update_reg_bounds(dst_reg); 12822 } 12823 12824 /* WARNING: This function does calculations on 64-bit values, but the actual 12825 * execution may occur on 32-bit values. Therefore, things like bitshifts 12826 * need extra checks in the 32-bit case. 12827 */ 12828 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 12829 struct bpf_insn *insn, 12830 struct bpf_reg_state *dst_reg, 12831 struct bpf_reg_state src_reg) 12832 { 12833 struct bpf_reg_state *regs = cur_regs(env); 12834 u8 opcode = BPF_OP(insn->code); 12835 bool src_known; 12836 s64 smin_val, smax_val; 12837 u64 umin_val, umax_val; 12838 s32 s32_min_val, s32_max_val; 12839 u32 u32_min_val, u32_max_val; 12840 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 12841 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); 12842 int ret; 12843 12844 smin_val = src_reg.smin_value; 12845 smax_val = src_reg.smax_value; 12846 umin_val = src_reg.umin_value; 12847 umax_val = src_reg.umax_value; 12848 12849 s32_min_val = src_reg.s32_min_value; 12850 s32_max_val = src_reg.s32_max_value; 12851 u32_min_val = src_reg.u32_min_value; 12852 u32_max_val = src_reg.u32_max_value; 12853 12854 if (alu32) { 12855 src_known = tnum_subreg_is_const(src_reg.var_off); 12856 if ((src_known && 12857 (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) || 12858 s32_min_val > s32_max_val || u32_min_val > u32_max_val) { 12859 /* Taint dst register if offset had invalid bounds 12860 * derived from e.g. dead branches. 12861 */ 12862 __mark_reg_unknown(env, dst_reg); 12863 return 0; 12864 } 12865 } else { 12866 src_known = tnum_is_const(src_reg.var_off); 12867 if ((src_known && 12868 (smin_val != smax_val || umin_val != umax_val)) || 12869 smin_val > smax_val || umin_val > umax_val) { 12870 /* Taint dst register if offset had invalid bounds 12871 * derived from e.g. dead branches. 12872 */ 12873 __mark_reg_unknown(env, dst_reg); 12874 return 0; 12875 } 12876 } 12877 12878 if (!src_known && 12879 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { 12880 __mark_reg_unknown(env, dst_reg); 12881 return 0; 12882 } 12883 12884 if (sanitize_needed(opcode)) { 12885 ret = sanitize_val_alu(env, insn); 12886 if (ret < 0) 12887 return sanitize_err(env, insn, ret, NULL, NULL); 12888 } 12889 12890 /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops. 12891 * There are two classes of instructions: The first class we track both 12892 * alu32 and alu64 sign/unsigned bounds independently this provides the 12893 * greatest amount of precision when alu operations are mixed with jmp32 12894 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD, 12895 * and BPF_OR. This is possible because these ops have fairly easy to 12896 * understand and calculate behavior in both 32-bit and 64-bit alu ops. 12897 * See alu32 verifier tests for examples. The second class of 12898 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy 12899 * with regards to tracking sign/unsigned bounds because the bits may 12900 * cross subreg boundaries in the alu64 case. When this happens we mark 12901 * the reg unbounded in the subreg bound space and use the resulting 12902 * tnum to calculate an approximation of the sign/unsigned bounds. 12903 */ 12904 switch (opcode) { 12905 case BPF_ADD: 12906 scalar32_min_max_add(dst_reg, &src_reg); 12907 scalar_min_max_add(dst_reg, &src_reg); 12908 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); 12909 break; 12910 case BPF_SUB: 12911 scalar32_min_max_sub(dst_reg, &src_reg); 12912 scalar_min_max_sub(dst_reg, &src_reg); 12913 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); 12914 break; 12915 case BPF_MUL: 12916 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); 12917 scalar32_min_max_mul(dst_reg, &src_reg); 12918 scalar_min_max_mul(dst_reg, &src_reg); 12919 break; 12920 case BPF_AND: 12921 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); 12922 scalar32_min_max_and(dst_reg, &src_reg); 12923 scalar_min_max_and(dst_reg, &src_reg); 12924 break; 12925 case BPF_OR: 12926 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); 12927 scalar32_min_max_or(dst_reg, &src_reg); 12928 scalar_min_max_or(dst_reg, &src_reg); 12929 break; 12930 case BPF_XOR: 12931 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off); 12932 scalar32_min_max_xor(dst_reg, &src_reg); 12933 scalar_min_max_xor(dst_reg, &src_reg); 12934 break; 12935 case BPF_LSH: 12936 if (umax_val >= insn_bitness) { 12937 /* Shifts greater than 31 or 63 are undefined. 12938 * This includes shifts by a negative number. 12939 */ 12940 mark_reg_unknown(env, regs, insn->dst_reg); 12941 break; 12942 } 12943 if (alu32) 12944 scalar32_min_max_lsh(dst_reg, &src_reg); 12945 else 12946 scalar_min_max_lsh(dst_reg, &src_reg); 12947 break; 12948 case BPF_RSH: 12949 if (umax_val >= insn_bitness) { 12950 /* Shifts greater than 31 or 63 are undefined. 12951 * This includes shifts by a negative number. 12952 */ 12953 mark_reg_unknown(env, regs, insn->dst_reg); 12954 break; 12955 } 12956 if (alu32) 12957 scalar32_min_max_rsh(dst_reg, &src_reg); 12958 else 12959 scalar_min_max_rsh(dst_reg, &src_reg); 12960 break; 12961 case BPF_ARSH: 12962 if (umax_val >= insn_bitness) { 12963 /* Shifts greater than 31 or 63 are undefined. 12964 * This includes shifts by a negative number. 12965 */ 12966 mark_reg_unknown(env, regs, insn->dst_reg); 12967 break; 12968 } 12969 if (alu32) 12970 scalar32_min_max_arsh(dst_reg, &src_reg); 12971 else 12972 scalar_min_max_arsh(dst_reg, &src_reg); 12973 break; 12974 default: 12975 mark_reg_unknown(env, regs, insn->dst_reg); 12976 break; 12977 } 12978 12979 /* ALU32 ops are zero extended into 64bit register */ 12980 if (alu32) 12981 zext_32_to_64(dst_reg); 12982 reg_bounds_sync(dst_reg); 12983 return 0; 12984 } 12985 12986 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max 12987 * and var_off. 12988 */ 12989 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, 12990 struct bpf_insn *insn) 12991 { 12992 struct bpf_verifier_state *vstate = env->cur_state; 12993 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 12994 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; 12995 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; 12996 u8 opcode = BPF_OP(insn->code); 12997 int err; 12998 12999 dst_reg = ®s[insn->dst_reg]; 13000 src_reg = NULL; 13001 if (dst_reg->type != SCALAR_VALUE) 13002 ptr_reg = dst_reg; 13003 else 13004 /* Make sure ID is cleared otherwise dst_reg min/max could be 13005 * incorrectly propagated into other registers by find_equal_scalars() 13006 */ 13007 dst_reg->id = 0; 13008 if (BPF_SRC(insn->code) == BPF_X) { 13009 src_reg = ®s[insn->src_reg]; 13010 if (src_reg->type != SCALAR_VALUE) { 13011 if (dst_reg->type != SCALAR_VALUE) { 13012 /* Combining two pointers by any ALU op yields 13013 * an arbitrary scalar. Disallow all math except 13014 * pointer subtraction 13015 */ 13016 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 13017 mark_reg_unknown(env, regs, insn->dst_reg); 13018 return 0; 13019 } 13020 verbose(env, "R%d pointer %s pointer prohibited\n", 13021 insn->dst_reg, 13022 bpf_alu_string[opcode >> 4]); 13023 return -EACCES; 13024 } else { 13025 /* scalar += pointer 13026 * This is legal, but we have to reverse our 13027 * src/dest handling in computing the range 13028 */ 13029 err = mark_chain_precision(env, insn->dst_reg); 13030 if (err) 13031 return err; 13032 return adjust_ptr_min_max_vals(env, insn, 13033 src_reg, dst_reg); 13034 } 13035 } else if (ptr_reg) { 13036 /* pointer += scalar */ 13037 err = mark_chain_precision(env, insn->src_reg); 13038 if (err) 13039 return err; 13040 return adjust_ptr_min_max_vals(env, insn, 13041 dst_reg, src_reg); 13042 } else if (dst_reg->precise) { 13043 /* if dst_reg is precise, src_reg should be precise as well */ 13044 err = mark_chain_precision(env, insn->src_reg); 13045 if (err) 13046 return err; 13047 } 13048 } else { 13049 /* Pretend the src is a reg with a known value, since we only 13050 * need to be able to read from this state. 13051 */ 13052 off_reg.type = SCALAR_VALUE; 13053 __mark_reg_known(&off_reg, insn->imm); 13054 src_reg = &off_reg; 13055 if (ptr_reg) /* pointer += K */ 13056 return adjust_ptr_min_max_vals(env, insn, 13057 ptr_reg, src_reg); 13058 } 13059 13060 /* Got here implies adding two SCALAR_VALUEs */ 13061 if (WARN_ON_ONCE(ptr_reg)) { 13062 print_verifier_state(env, state, true); 13063 verbose(env, "verifier internal error: unexpected ptr_reg\n"); 13064 return -EINVAL; 13065 } 13066 if (WARN_ON(!src_reg)) { 13067 print_verifier_state(env, state, true); 13068 verbose(env, "verifier internal error: no src_reg\n"); 13069 return -EINVAL; 13070 } 13071 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); 13072 } 13073 13074 /* check validity of 32-bit and 64-bit arithmetic operations */ 13075 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) 13076 { 13077 struct bpf_reg_state *regs = cur_regs(env); 13078 u8 opcode = BPF_OP(insn->code); 13079 int err; 13080 13081 if (opcode == BPF_END || opcode == BPF_NEG) { 13082 if (opcode == BPF_NEG) { 13083 if (BPF_SRC(insn->code) != BPF_K || 13084 insn->src_reg != BPF_REG_0 || 13085 insn->off != 0 || insn->imm != 0) { 13086 verbose(env, "BPF_NEG uses reserved fields\n"); 13087 return -EINVAL; 13088 } 13089 } else { 13090 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 13091 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || 13092 (BPF_CLASS(insn->code) == BPF_ALU64 && 13093 BPF_SRC(insn->code) != BPF_TO_LE)) { 13094 verbose(env, "BPF_END uses reserved fields\n"); 13095 return -EINVAL; 13096 } 13097 } 13098 13099 /* check src operand */ 13100 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 13101 if (err) 13102 return err; 13103 13104 if (is_pointer_value(env, insn->dst_reg)) { 13105 verbose(env, "R%d pointer arithmetic prohibited\n", 13106 insn->dst_reg); 13107 return -EACCES; 13108 } 13109 13110 /* check dest operand */ 13111 err = check_reg_arg(env, insn->dst_reg, DST_OP); 13112 if (err) 13113 return err; 13114 13115 } else if (opcode == BPF_MOV) { 13116 13117 if (BPF_SRC(insn->code) == BPF_X) { 13118 if (insn->imm != 0) { 13119 verbose(env, "BPF_MOV uses reserved fields\n"); 13120 return -EINVAL; 13121 } 13122 13123 if (BPF_CLASS(insn->code) == BPF_ALU) { 13124 if (insn->off != 0 && insn->off != 8 && insn->off != 16) { 13125 verbose(env, "BPF_MOV uses reserved fields\n"); 13126 return -EINVAL; 13127 } 13128 } else { 13129 if (insn->off != 0 && insn->off != 8 && insn->off != 16 && 13130 insn->off != 32) { 13131 verbose(env, "BPF_MOV uses reserved fields\n"); 13132 return -EINVAL; 13133 } 13134 } 13135 13136 /* check src operand */ 13137 err = check_reg_arg(env, insn->src_reg, SRC_OP); 13138 if (err) 13139 return err; 13140 } else { 13141 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 13142 verbose(env, "BPF_MOV uses reserved fields\n"); 13143 return -EINVAL; 13144 } 13145 } 13146 13147 /* check dest operand, mark as required later */ 13148 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 13149 if (err) 13150 return err; 13151 13152 if (BPF_SRC(insn->code) == BPF_X) { 13153 struct bpf_reg_state *src_reg = regs + insn->src_reg; 13154 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; 13155 bool need_id = src_reg->type == SCALAR_VALUE && !src_reg->id && 13156 !tnum_is_const(src_reg->var_off); 13157 13158 if (BPF_CLASS(insn->code) == BPF_ALU64) { 13159 if (insn->off == 0) { 13160 /* case: R1 = R2 13161 * copy register state to dest reg 13162 */ 13163 if (need_id) 13164 /* Assign src and dst registers the same ID 13165 * that will be used by find_equal_scalars() 13166 * to propagate min/max range. 13167 */ 13168 src_reg->id = ++env->id_gen; 13169 copy_register_state(dst_reg, src_reg); 13170 dst_reg->live |= REG_LIVE_WRITTEN; 13171 dst_reg->subreg_def = DEF_NOT_SUBREG; 13172 } else { 13173 /* case: R1 = (s8, s16 s32)R2 */ 13174 if (is_pointer_value(env, insn->src_reg)) { 13175 verbose(env, 13176 "R%d sign-extension part of pointer\n", 13177 insn->src_reg); 13178 return -EACCES; 13179 } else if (src_reg->type == SCALAR_VALUE) { 13180 bool no_sext; 13181 13182 no_sext = src_reg->umax_value < (1ULL << (insn->off - 1)); 13183 if (no_sext && need_id) 13184 src_reg->id = ++env->id_gen; 13185 copy_register_state(dst_reg, src_reg); 13186 if (!no_sext) 13187 dst_reg->id = 0; 13188 coerce_reg_to_size_sx(dst_reg, insn->off >> 3); 13189 dst_reg->live |= REG_LIVE_WRITTEN; 13190 dst_reg->subreg_def = DEF_NOT_SUBREG; 13191 } else { 13192 mark_reg_unknown(env, regs, insn->dst_reg); 13193 } 13194 } 13195 } else { 13196 /* R1 = (u32) R2 */ 13197 if (is_pointer_value(env, insn->src_reg)) { 13198 verbose(env, 13199 "R%d partial copy of pointer\n", 13200 insn->src_reg); 13201 return -EACCES; 13202 } else if (src_reg->type == SCALAR_VALUE) { 13203 if (insn->off == 0) { 13204 bool is_src_reg_u32 = src_reg->umax_value <= U32_MAX; 13205 13206 if (is_src_reg_u32 && need_id) 13207 src_reg->id = ++env->id_gen; 13208 copy_register_state(dst_reg, src_reg); 13209 /* Make sure ID is cleared if src_reg is not in u32 13210 * range otherwise dst_reg min/max could be incorrectly 13211 * propagated into src_reg by find_equal_scalars() 13212 */ 13213 if (!is_src_reg_u32) 13214 dst_reg->id = 0; 13215 dst_reg->live |= REG_LIVE_WRITTEN; 13216 dst_reg->subreg_def = env->insn_idx + 1; 13217 } else { 13218 /* case: W1 = (s8, s16)W2 */ 13219 bool no_sext = src_reg->umax_value < (1ULL << (insn->off - 1)); 13220 13221 if (no_sext && need_id) 13222 src_reg->id = ++env->id_gen; 13223 copy_register_state(dst_reg, src_reg); 13224 if (!no_sext) 13225 dst_reg->id = 0; 13226 dst_reg->live |= REG_LIVE_WRITTEN; 13227 dst_reg->subreg_def = env->insn_idx + 1; 13228 coerce_subreg_to_size_sx(dst_reg, insn->off >> 3); 13229 } 13230 } else { 13231 mark_reg_unknown(env, regs, 13232 insn->dst_reg); 13233 } 13234 zext_32_to_64(dst_reg); 13235 reg_bounds_sync(dst_reg); 13236 } 13237 } else { 13238 /* case: R = imm 13239 * remember the value we stored into this reg 13240 */ 13241 /* clear any state __mark_reg_known doesn't set */ 13242 mark_reg_unknown(env, regs, insn->dst_reg); 13243 regs[insn->dst_reg].type = SCALAR_VALUE; 13244 if (BPF_CLASS(insn->code) == BPF_ALU64) { 13245 __mark_reg_known(regs + insn->dst_reg, 13246 insn->imm); 13247 } else { 13248 __mark_reg_known(regs + insn->dst_reg, 13249 (u32)insn->imm); 13250 } 13251 } 13252 13253 } else if (opcode > BPF_END) { 13254 verbose(env, "invalid BPF_ALU opcode %x\n", opcode); 13255 return -EINVAL; 13256 13257 } else { /* all other ALU ops: and, sub, xor, add, ... */ 13258 13259 if (BPF_SRC(insn->code) == BPF_X) { 13260 if (insn->imm != 0 || insn->off > 1 || 13261 (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) { 13262 verbose(env, "BPF_ALU uses reserved fields\n"); 13263 return -EINVAL; 13264 } 13265 /* check src1 operand */ 13266 err = check_reg_arg(env, insn->src_reg, SRC_OP); 13267 if (err) 13268 return err; 13269 } else { 13270 if (insn->src_reg != BPF_REG_0 || insn->off > 1 || 13271 (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) { 13272 verbose(env, "BPF_ALU uses reserved fields\n"); 13273 return -EINVAL; 13274 } 13275 } 13276 13277 /* check src2 operand */ 13278 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 13279 if (err) 13280 return err; 13281 13282 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 13283 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 13284 verbose(env, "div by zero\n"); 13285 return -EINVAL; 13286 } 13287 13288 if ((opcode == BPF_LSH || opcode == BPF_RSH || 13289 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 13290 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 13291 13292 if (insn->imm < 0 || insn->imm >= size) { 13293 verbose(env, "invalid shift %d\n", insn->imm); 13294 return -EINVAL; 13295 } 13296 } 13297 13298 /* check dest operand */ 13299 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 13300 if (err) 13301 return err; 13302 13303 return adjust_reg_min_max_vals(env, insn); 13304 } 13305 13306 return 0; 13307 } 13308 13309 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, 13310 struct bpf_reg_state *dst_reg, 13311 enum bpf_reg_type type, 13312 bool range_right_open) 13313 { 13314 struct bpf_func_state *state; 13315 struct bpf_reg_state *reg; 13316 int new_range; 13317 13318 if (dst_reg->off < 0 || 13319 (dst_reg->off == 0 && range_right_open)) 13320 /* This doesn't give us any range */ 13321 return; 13322 13323 if (dst_reg->umax_value > MAX_PACKET_OFF || 13324 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) 13325 /* Risk of overflow. For instance, ptr + (1<<63) may be less 13326 * than pkt_end, but that's because it's also less than pkt. 13327 */ 13328 return; 13329 13330 new_range = dst_reg->off; 13331 if (range_right_open) 13332 new_range++; 13333 13334 /* Examples for register markings: 13335 * 13336 * pkt_data in dst register: 13337 * 13338 * r2 = r3; 13339 * r2 += 8; 13340 * if (r2 > pkt_end) goto <handle exception> 13341 * <access okay> 13342 * 13343 * r2 = r3; 13344 * r2 += 8; 13345 * if (r2 < pkt_end) goto <access okay> 13346 * <handle exception> 13347 * 13348 * Where: 13349 * r2 == dst_reg, pkt_end == src_reg 13350 * r2=pkt(id=n,off=8,r=0) 13351 * r3=pkt(id=n,off=0,r=0) 13352 * 13353 * pkt_data in src register: 13354 * 13355 * r2 = r3; 13356 * r2 += 8; 13357 * if (pkt_end >= r2) goto <access okay> 13358 * <handle exception> 13359 * 13360 * r2 = r3; 13361 * r2 += 8; 13362 * if (pkt_end <= r2) goto <handle exception> 13363 * <access okay> 13364 * 13365 * Where: 13366 * pkt_end == dst_reg, r2 == src_reg 13367 * r2=pkt(id=n,off=8,r=0) 13368 * r3=pkt(id=n,off=0,r=0) 13369 * 13370 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 13371 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) 13372 * and [r3, r3 + 8-1) respectively is safe to access depending on 13373 * the check. 13374 */ 13375 13376 /* If our ids match, then we must have the same max_value. And we 13377 * don't care about the other reg's fixed offset, since if it's too big 13378 * the range won't allow anything. 13379 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. 13380 */ 13381 bpf_for_each_reg_in_vstate(vstate, state, reg, ({ 13382 if (reg->type == type && reg->id == dst_reg->id) 13383 /* keep the maximum range already checked */ 13384 reg->range = max(reg->range, new_range); 13385 })); 13386 } 13387 13388 static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode) 13389 { 13390 struct tnum subreg = tnum_subreg(reg->var_off); 13391 s32 sval = (s32)val; 13392 13393 switch (opcode) { 13394 case BPF_JEQ: 13395 if (tnum_is_const(subreg)) 13396 return !!tnum_equals_const(subreg, val); 13397 else if (val < reg->u32_min_value || val > reg->u32_max_value) 13398 return 0; 13399 break; 13400 case BPF_JNE: 13401 if (tnum_is_const(subreg)) 13402 return !tnum_equals_const(subreg, val); 13403 else if (val < reg->u32_min_value || val > reg->u32_max_value) 13404 return 1; 13405 break; 13406 case BPF_JSET: 13407 if ((~subreg.mask & subreg.value) & val) 13408 return 1; 13409 if (!((subreg.mask | subreg.value) & val)) 13410 return 0; 13411 break; 13412 case BPF_JGT: 13413 if (reg->u32_min_value > val) 13414 return 1; 13415 else if (reg->u32_max_value <= val) 13416 return 0; 13417 break; 13418 case BPF_JSGT: 13419 if (reg->s32_min_value > sval) 13420 return 1; 13421 else if (reg->s32_max_value <= sval) 13422 return 0; 13423 break; 13424 case BPF_JLT: 13425 if (reg->u32_max_value < val) 13426 return 1; 13427 else if (reg->u32_min_value >= val) 13428 return 0; 13429 break; 13430 case BPF_JSLT: 13431 if (reg->s32_max_value < sval) 13432 return 1; 13433 else if (reg->s32_min_value >= sval) 13434 return 0; 13435 break; 13436 case BPF_JGE: 13437 if (reg->u32_min_value >= val) 13438 return 1; 13439 else if (reg->u32_max_value < val) 13440 return 0; 13441 break; 13442 case BPF_JSGE: 13443 if (reg->s32_min_value >= sval) 13444 return 1; 13445 else if (reg->s32_max_value < sval) 13446 return 0; 13447 break; 13448 case BPF_JLE: 13449 if (reg->u32_max_value <= val) 13450 return 1; 13451 else if (reg->u32_min_value > val) 13452 return 0; 13453 break; 13454 case BPF_JSLE: 13455 if (reg->s32_max_value <= sval) 13456 return 1; 13457 else if (reg->s32_min_value > sval) 13458 return 0; 13459 break; 13460 } 13461 13462 return -1; 13463 } 13464 13465 13466 static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) 13467 { 13468 s64 sval = (s64)val; 13469 13470 switch (opcode) { 13471 case BPF_JEQ: 13472 if (tnum_is_const(reg->var_off)) 13473 return !!tnum_equals_const(reg->var_off, val); 13474 else if (val < reg->umin_value || val > reg->umax_value) 13475 return 0; 13476 break; 13477 case BPF_JNE: 13478 if (tnum_is_const(reg->var_off)) 13479 return !tnum_equals_const(reg->var_off, val); 13480 else if (val < reg->umin_value || val > reg->umax_value) 13481 return 1; 13482 break; 13483 case BPF_JSET: 13484 if ((~reg->var_off.mask & reg->var_off.value) & val) 13485 return 1; 13486 if (!((reg->var_off.mask | reg->var_off.value) & val)) 13487 return 0; 13488 break; 13489 case BPF_JGT: 13490 if (reg->umin_value > val) 13491 return 1; 13492 else if (reg->umax_value <= val) 13493 return 0; 13494 break; 13495 case BPF_JSGT: 13496 if (reg->smin_value > sval) 13497 return 1; 13498 else if (reg->smax_value <= sval) 13499 return 0; 13500 break; 13501 case BPF_JLT: 13502 if (reg->umax_value < val) 13503 return 1; 13504 else if (reg->umin_value >= val) 13505 return 0; 13506 break; 13507 case BPF_JSLT: 13508 if (reg->smax_value < sval) 13509 return 1; 13510 else if (reg->smin_value >= sval) 13511 return 0; 13512 break; 13513 case BPF_JGE: 13514 if (reg->umin_value >= val) 13515 return 1; 13516 else if (reg->umax_value < val) 13517 return 0; 13518 break; 13519 case BPF_JSGE: 13520 if (reg->smin_value >= sval) 13521 return 1; 13522 else if (reg->smax_value < sval) 13523 return 0; 13524 break; 13525 case BPF_JLE: 13526 if (reg->umax_value <= val) 13527 return 1; 13528 else if (reg->umin_value > val) 13529 return 0; 13530 break; 13531 case BPF_JSLE: 13532 if (reg->smax_value <= sval) 13533 return 1; 13534 else if (reg->smin_value > sval) 13535 return 0; 13536 break; 13537 } 13538 13539 return -1; 13540 } 13541 13542 /* compute branch direction of the expression "if (reg opcode val) goto target;" 13543 * and return: 13544 * 1 - branch will be taken and "goto target" will be executed 13545 * 0 - branch will not be taken and fall-through to next insn 13546 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value 13547 * range [0,10] 13548 */ 13549 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, 13550 bool is_jmp32) 13551 { 13552 if (__is_pointer_value(false, reg)) { 13553 if (!reg_not_null(reg)) 13554 return -1; 13555 13556 /* If pointer is valid tests against zero will fail so we can 13557 * use this to direct branch taken. 13558 */ 13559 if (val != 0) 13560 return -1; 13561 13562 switch (opcode) { 13563 case BPF_JEQ: 13564 return 0; 13565 case BPF_JNE: 13566 return 1; 13567 default: 13568 return -1; 13569 } 13570 } 13571 13572 if (is_jmp32) 13573 return is_branch32_taken(reg, val, opcode); 13574 return is_branch64_taken(reg, val, opcode); 13575 } 13576 13577 static int flip_opcode(u32 opcode) 13578 { 13579 /* How can we transform "a <op> b" into "b <op> a"? */ 13580 static const u8 opcode_flip[16] = { 13581 /* these stay the same */ 13582 [BPF_JEQ >> 4] = BPF_JEQ, 13583 [BPF_JNE >> 4] = BPF_JNE, 13584 [BPF_JSET >> 4] = BPF_JSET, 13585 /* these swap "lesser" and "greater" (L and G in the opcodes) */ 13586 [BPF_JGE >> 4] = BPF_JLE, 13587 [BPF_JGT >> 4] = BPF_JLT, 13588 [BPF_JLE >> 4] = BPF_JGE, 13589 [BPF_JLT >> 4] = BPF_JGT, 13590 [BPF_JSGE >> 4] = BPF_JSLE, 13591 [BPF_JSGT >> 4] = BPF_JSLT, 13592 [BPF_JSLE >> 4] = BPF_JSGE, 13593 [BPF_JSLT >> 4] = BPF_JSGT 13594 }; 13595 return opcode_flip[opcode >> 4]; 13596 } 13597 13598 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg, 13599 struct bpf_reg_state *src_reg, 13600 u8 opcode) 13601 { 13602 struct bpf_reg_state *pkt; 13603 13604 if (src_reg->type == PTR_TO_PACKET_END) { 13605 pkt = dst_reg; 13606 } else if (dst_reg->type == PTR_TO_PACKET_END) { 13607 pkt = src_reg; 13608 opcode = flip_opcode(opcode); 13609 } else { 13610 return -1; 13611 } 13612 13613 if (pkt->range >= 0) 13614 return -1; 13615 13616 switch (opcode) { 13617 case BPF_JLE: 13618 /* pkt <= pkt_end */ 13619 fallthrough; 13620 case BPF_JGT: 13621 /* pkt > pkt_end */ 13622 if (pkt->range == BEYOND_PKT_END) 13623 /* pkt has at last one extra byte beyond pkt_end */ 13624 return opcode == BPF_JGT; 13625 break; 13626 case BPF_JLT: 13627 /* pkt < pkt_end */ 13628 fallthrough; 13629 case BPF_JGE: 13630 /* pkt >= pkt_end */ 13631 if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END) 13632 return opcode == BPF_JGE; 13633 break; 13634 } 13635 return -1; 13636 } 13637 13638 /* Adjusts the register min/max values in the case that the dst_reg is the 13639 * variable register that we are working on, and src_reg is a constant or we're 13640 * simply doing a BPF_K check. 13641 * In JEQ/JNE cases we also adjust the var_off values. 13642 */ 13643 static void reg_set_min_max(struct bpf_reg_state *true_reg, 13644 struct bpf_reg_state *false_reg, 13645 u64 val, u32 val32, 13646 u8 opcode, bool is_jmp32) 13647 { 13648 struct tnum false_32off = tnum_subreg(false_reg->var_off); 13649 struct tnum false_64off = false_reg->var_off; 13650 struct tnum true_32off = tnum_subreg(true_reg->var_off); 13651 struct tnum true_64off = true_reg->var_off; 13652 s64 sval = (s64)val; 13653 s32 sval32 = (s32)val32; 13654 13655 /* If the dst_reg is a pointer, we can't learn anything about its 13656 * variable offset from the compare (unless src_reg were a pointer into 13657 * the same object, but we don't bother with that. 13658 * Since false_reg and true_reg have the same type by construction, we 13659 * only need to check one of them for pointerness. 13660 */ 13661 if (__is_pointer_value(false, false_reg)) 13662 return; 13663 13664 switch (opcode) { 13665 /* JEQ/JNE comparison doesn't change the register equivalence. 13666 * 13667 * r1 = r2; 13668 * if (r1 == 42) goto label; 13669 * ... 13670 * label: // here both r1 and r2 are known to be 42. 13671 * 13672 * Hence when marking register as known preserve it's ID. 13673 */ 13674 case BPF_JEQ: 13675 if (is_jmp32) { 13676 __mark_reg32_known(true_reg, val32); 13677 true_32off = tnum_subreg(true_reg->var_off); 13678 } else { 13679 ___mark_reg_known(true_reg, val); 13680 true_64off = true_reg->var_off; 13681 } 13682 break; 13683 case BPF_JNE: 13684 if (is_jmp32) { 13685 __mark_reg32_known(false_reg, val32); 13686 false_32off = tnum_subreg(false_reg->var_off); 13687 } else { 13688 ___mark_reg_known(false_reg, val); 13689 false_64off = false_reg->var_off; 13690 } 13691 break; 13692 case BPF_JSET: 13693 if (is_jmp32) { 13694 false_32off = tnum_and(false_32off, tnum_const(~val32)); 13695 if (is_power_of_2(val32)) 13696 true_32off = tnum_or(true_32off, 13697 tnum_const(val32)); 13698 } else { 13699 false_64off = tnum_and(false_64off, tnum_const(~val)); 13700 if (is_power_of_2(val)) 13701 true_64off = tnum_or(true_64off, 13702 tnum_const(val)); 13703 } 13704 break; 13705 case BPF_JGE: 13706 case BPF_JGT: 13707 { 13708 if (is_jmp32) { 13709 u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1; 13710 u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32; 13711 13712 false_reg->u32_max_value = min(false_reg->u32_max_value, 13713 false_umax); 13714 true_reg->u32_min_value = max(true_reg->u32_min_value, 13715 true_umin); 13716 } else { 13717 u64 false_umax = opcode == BPF_JGT ? val : val - 1; 13718 u64 true_umin = opcode == BPF_JGT ? val + 1 : val; 13719 13720 false_reg->umax_value = min(false_reg->umax_value, false_umax); 13721 true_reg->umin_value = max(true_reg->umin_value, true_umin); 13722 } 13723 break; 13724 } 13725 case BPF_JSGE: 13726 case BPF_JSGT: 13727 { 13728 if (is_jmp32) { 13729 s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1; 13730 s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32; 13731 13732 false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax); 13733 true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin); 13734 } else { 13735 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; 13736 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval; 13737 13738 false_reg->smax_value = min(false_reg->smax_value, false_smax); 13739 true_reg->smin_value = max(true_reg->smin_value, true_smin); 13740 } 13741 break; 13742 } 13743 case BPF_JLE: 13744 case BPF_JLT: 13745 { 13746 if (is_jmp32) { 13747 u32 false_umin = opcode == BPF_JLT ? val32 : val32 + 1; 13748 u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32; 13749 13750 false_reg->u32_min_value = max(false_reg->u32_min_value, 13751 false_umin); 13752 true_reg->u32_max_value = min(true_reg->u32_max_value, 13753 true_umax); 13754 } else { 13755 u64 false_umin = opcode == BPF_JLT ? val : val + 1; 13756 u64 true_umax = opcode == BPF_JLT ? val - 1 : val; 13757 13758 false_reg->umin_value = max(false_reg->umin_value, false_umin); 13759 true_reg->umax_value = min(true_reg->umax_value, true_umax); 13760 } 13761 break; 13762 } 13763 case BPF_JSLE: 13764 case BPF_JSLT: 13765 { 13766 if (is_jmp32) { 13767 s32 false_smin = opcode == BPF_JSLT ? sval32 : sval32 + 1; 13768 s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32; 13769 13770 false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin); 13771 true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax); 13772 } else { 13773 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; 13774 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval; 13775 13776 false_reg->smin_value = max(false_reg->smin_value, false_smin); 13777 true_reg->smax_value = min(true_reg->smax_value, true_smax); 13778 } 13779 break; 13780 } 13781 default: 13782 return; 13783 } 13784 13785 if (is_jmp32) { 13786 false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off), 13787 tnum_subreg(false_32off)); 13788 true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off), 13789 tnum_subreg(true_32off)); 13790 __reg_combine_32_into_64(false_reg); 13791 __reg_combine_32_into_64(true_reg); 13792 } else { 13793 false_reg->var_off = false_64off; 13794 true_reg->var_off = true_64off; 13795 __reg_combine_64_into_32(false_reg); 13796 __reg_combine_64_into_32(true_reg); 13797 } 13798 } 13799 13800 /* Same as above, but for the case that dst_reg holds a constant and src_reg is 13801 * the variable reg. 13802 */ 13803 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, 13804 struct bpf_reg_state *false_reg, 13805 u64 val, u32 val32, 13806 u8 opcode, bool is_jmp32) 13807 { 13808 opcode = flip_opcode(opcode); 13809 /* This uses zero as "not present in table"; luckily the zero opcode, 13810 * BPF_JA, can't get here. 13811 */ 13812 if (opcode) 13813 reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32); 13814 } 13815 13816 /* Regs are known to be equal, so intersect their min/max/var_off */ 13817 static void __reg_combine_min_max(struct bpf_reg_state *src_reg, 13818 struct bpf_reg_state *dst_reg) 13819 { 13820 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, 13821 dst_reg->umin_value); 13822 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, 13823 dst_reg->umax_value); 13824 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, 13825 dst_reg->smin_value); 13826 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, 13827 dst_reg->smax_value); 13828 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, 13829 dst_reg->var_off); 13830 reg_bounds_sync(src_reg); 13831 reg_bounds_sync(dst_reg); 13832 } 13833 13834 static void reg_combine_min_max(struct bpf_reg_state *true_src, 13835 struct bpf_reg_state *true_dst, 13836 struct bpf_reg_state *false_src, 13837 struct bpf_reg_state *false_dst, 13838 u8 opcode) 13839 { 13840 switch (opcode) { 13841 case BPF_JEQ: 13842 __reg_combine_min_max(true_src, true_dst); 13843 break; 13844 case BPF_JNE: 13845 __reg_combine_min_max(false_src, false_dst); 13846 break; 13847 } 13848 } 13849 13850 static void mark_ptr_or_null_reg(struct bpf_func_state *state, 13851 struct bpf_reg_state *reg, u32 id, 13852 bool is_null) 13853 { 13854 if (type_may_be_null(reg->type) && reg->id == id && 13855 (is_rcu_reg(reg) || !WARN_ON_ONCE(!reg->id))) { 13856 /* Old offset (both fixed and variable parts) should have been 13857 * known-zero, because we don't allow pointer arithmetic on 13858 * pointers that might be NULL. If we see this happening, don't 13859 * convert the register. 13860 * 13861 * But in some cases, some helpers that return local kptrs 13862 * advance offset for the returned pointer. In those cases, it 13863 * is fine to expect to see reg->off. 13864 */ 13865 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0))) 13866 return; 13867 if (!(type_is_ptr_alloc_obj(reg->type) || type_is_non_owning_ref(reg->type)) && 13868 WARN_ON_ONCE(reg->off)) 13869 return; 13870 13871 if (is_null) { 13872 reg->type = SCALAR_VALUE; 13873 /* We don't need id and ref_obj_id from this point 13874 * onwards anymore, thus we should better reset it, 13875 * so that state pruning has chances to take effect. 13876 */ 13877 reg->id = 0; 13878 reg->ref_obj_id = 0; 13879 13880 return; 13881 } 13882 13883 mark_ptr_not_null_reg(reg); 13884 13885 if (!reg_may_point_to_spin_lock(reg)) { 13886 /* For not-NULL ptr, reg->ref_obj_id will be reset 13887 * in release_reference(). 13888 * 13889 * reg->id is still used by spin_lock ptr. Other 13890 * than spin_lock ptr type, reg->id can be reset. 13891 */ 13892 reg->id = 0; 13893 } 13894 } 13895 } 13896 13897 /* The logic is similar to find_good_pkt_pointers(), both could eventually 13898 * be folded together at some point. 13899 */ 13900 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, 13901 bool is_null) 13902 { 13903 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 13904 struct bpf_reg_state *regs = state->regs, *reg; 13905 u32 ref_obj_id = regs[regno].ref_obj_id; 13906 u32 id = regs[regno].id; 13907 13908 if (ref_obj_id && ref_obj_id == id && is_null) 13909 /* regs[regno] is in the " == NULL" branch. 13910 * No one could have freed the reference state before 13911 * doing the NULL check. 13912 */ 13913 WARN_ON_ONCE(release_reference_state(state, id)); 13914 13915 bpf_for_each_reg_in_vstate(vstate, state, reg, ({ 13916 mark_ptr_or_null_reg(state, reg, id, is_null); 13917 })); 13918 } 13919 13920 static bool try_match_pkt_pointers(const struct bpf_insn *insn, 13921 struct bpf_reg_state *dst_reg, 13922 struct bpf_reg_state *src_reg, 13923 struct bpf_verifier_state *this_branch, 13924 struct bpf_verifier_state *other_branch) 13925 { 13926 if (BPF_SRC(insn->code) != BPF_X) 13927 return false; 13928 13929 /* Pointers are always 64-bit. */ 13930 if (BPF_CLASS(insn->code) == BPF_JMP32) 13931 return false; 13932 13933 switch (BPF_OP(insn->code)) { 13934 case BPF_JGT: 13935 if ((dst_reg->type == PTR_TO_PACKET && 13936 src_reg->type == PTR_TO_PACKET_END) || 13937 (dst_reg->type == PTR_TO_PACKET_META && 13938 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 13939 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ 13940 find_good_pkt_pointers(this_branch, dst_reg, 13941 dst_reg->type, false); 13942 mark_pkt_end(other_branch, insn->dst_reg, true); 13943 } else if ((dst_reg->type == PTR_TO_PACKET_END && 13944 src_reg->type == PTR_TO_PACKET) || 13945 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 13946 src_reg->type == PTR_TO_PACKET_META)) { 13947 /* pkt_end > pkt_data', pkt_data > pkt_meta' */ 13948 find_good_pkt_pointers(other_branch, src_reg, 13949 src_reg->type, true); 13950 mark_pkt_end(this_branch, insn->src_reg, false); 13951 } else { 13952 return false; 13953 } 13954 break; 13955 case BPF_JLT: 13956 if ((dst_reg->type == PTR_TO_PACKET && 13957 src_reg->type == PTR_TO_PACKET_END) || 13958 (dst_reg->type == PTR_TO_PACKET_META && 13959 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 13960 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ 13961 find_good_pkt_pointers(other_branch, dst_reg, 13962 dst_reg->type, true); 13963 mark_pkt_end(this_branch, insn->dst_reg, false); 13964 } else if ((dst_reg->type == PTR_TO_PACKET_END && 13965 src_reg->type == PTR_TO_PACKET) || 13966 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 13967 src_reg->type == PTR_TO_PACKET_META)) { 13968 /* pkt_end < pkt_data', pkt_data > pkt_meta' */ 13969 find_good_pkt_pointers(this_branch, src_reg, 13970 src_reg->type, false); 13971 mark_pkt_end(other_branch, insn->src_reg, true); 13972 } else { 13973 return false; 13974 } 13975 break; 13976 case BPF_JGE: 13977 if ((dst_reg->type == PTR_TO_PACKET && 13978 src_reg->type == PTR_TO_PACKET_END) || 13979 (dst_reg->type == PTR_TO_PACKET_META && 13980 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 13981 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ 13982 find_good_pkt_pointers(this_branch, dst_reg, 13983 dst_reg->type, true); 13984 mark_pkt_end(other_branch, insn->dst_reg, false); 13985 } else if ((dst_reg->type == PTR_TO_PACKET_END && 13986 src_reg->type == PTR_TO_PACKET) || 13987 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 13988 src_reg->type == PTR_TO_PACKET_META)) { 13989 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ 13990 find_good_pkt_pointers(other_branch, src_reg, 13991 src_reg->type, false); 13992 mark_pkt_end(this_branch, insn->src_reg, true); 13993 } else { 13994 return false; 13995 } 13996 break; 13997 case BPF_JLE: 13998 if ((dst_reg->type == PTR_TO_PACKET && 13999 src_reg->type == PTR_TO_PACKET_END) || 14000 (dst_reg->type == PTR_TO_PACKET_META && 14001 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 14002 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ 14003 find_good_pkt_pointers(other_branch, dst_reg, 14004 dst_reg->type, false); 14005 mark_pkt_end(this_branch, insn->dst_reg, true); 14006 } else if ((dst_reg->type == PTR_TO_PACKET_END && 14007 src_reg->type == PTR_TO_PACKET) || 14008 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 14009 src_reg->type == PTR_TO_PACKET_META)) { 14010 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ 14011 find_good_pkt_pointers(this_branch, src_reg, 14012 src_reg->type, true); 14013 mark_pkt_end(other_branch, insn->src_reg, false); 14014 } else { 14015 return false; 14016 } 14017 break; 14018 default: 14019 return false; 14020 } 14021 14022 return true; 14023 } 14024 14025 static void find_equal_scalars(struct bpf_verifier_state *vstate, 14026 struct bpf_reg_state *known_reg) 14027 { 14028 struct bpf_func_state *state; 14029 struct bpf_reg_state *reg; 14030 14031 bpf_for_each_reg_in_vstate(vstate, state, reg, ({ 14032 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) 14033 copy_register_state(reg, known_reg); 14034 })); 14035 } 14036 14037 static int check_cond_jmp_op(struct bpf_verifier_env *env, 14038 struct bpf_insn *insn, int *insn_idx) 14039 { 14040 struct bpf_verifier_state *this_branch = env->cur_state; 14041 struct bpf_verifier_state *other_branch; 14042 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; 14043 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; 14044 struct bpf_reg_state *eq_branch_regs; 14045 u8 opcode = BPF_OP(insn->code); 14046 bool is_jmp32; 14047 int pred = -1; 14048 int err; 14049 14050 /* Only conditional jumps are expected to reach here. */ 14051 if (opcode == BPF_JA || opcode > BPF_JSLE) { 14052 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); 14053 return -EINVAL; 14054 } 14055 14056 /* check src2 operand */ 14057 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 14058 if (err) 14059 return err; 14060 14061 dst_reg = ®s[insn->dst_reg]; 14062 if (BPF_SRC(insn->code) == BPF_X) { 14063 if (insn->imm != 0) { 14064 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 14065 return -EINVAL; 14066 } 14067 14068 /* check src1 operand */ 14069 err = check_reg_arg(env, insn->src_reg, SRC_OP); 14070 if (err) 14071 return err; 14072 14073 src_reg = ®s[insn->src_reg]; 14074 if (!(reg_is_pkt_pointer_any(dst_reg) && reg_is_pkt_pointer_any(src_reg)) && 14075 is_pointer_value(env, insn->src_reg)) { 14076 verbose(env, "R%d pointer comparison prohibited\n", 14077 insn->src_reg); 14078 return -EACCES; 14079 } 14080 } else { 14081 if (insn->src_reg != BPF_REG_0) { 14082 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 14083 return -EINVAL; 14084 } 14085 } 14086 14087 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 14088 14089 if (BPF_SRC(insn->code) == BPF_K) { 14090 pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32); 14091 } else if (src_reg->type == SCALAR_VALUE && 14092 is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) { 14093 pred = is_branch_taken(dst_reg, 14094 tnum_subreg(src_reg->var_off).value, 14095 opcode, 14096 is_jmp32); 14097 } else if (src_reg->type == SCALAR_VALUE && 14098 !is_jmp32 && tnum_is_const(src_reg->var_off)) { 14099 pred = is_branch_taken(dst_reg, 14100 src_reg->var_off.value, 14101 opcode, 14102 is_jmp32); 14103 } else if (dst_reg->type == SCALAR_VALUE && 14104 is_jmp32 && tnum_is_const(tnum_subreg(dst_reg->var_off))) { 14105 pred = is_branch_taken(src_reg, 14106 tnum_subreg(dst_reg->var_off).value, 14107 flip_opcode(opcode), 14108 is_jmp32); 14109 } else if (dst_reg->type == SCALAR_VALUE && 14110 !is_jmp32 && tnum_is_const(dst_reg->var_off)) { 14111 pred = is_branch_taken(src_reg, 14112 dst_reg->var_off.value, 14113 flip_opcode(opcode), 14114 is_jmp32); 14115 } else if (reg_is_pkt_pointer_any(dst_reg) && 14116 reg_is_pkt_pointer_any(src_reg) && 14117 !is_jmp32) { 14118 pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode); 14119 } 14120 14121 if (pred >= 0) { 14122 /* If we get here with a dst_reg pointer type it is because 14123 * above is_branch_taken() special cased the 0 comparison. 14124 */ 14125 if (!__is_pointer_value(false, dst_reg)) 14126 err = mark_chain_precision(env, insn->dst_reg); 14127 if (BPF_SRC(insn->code) == BPF_X && !err && 14128 !__is_pointer_value(false, src_reg)) 14129 err = mark_chain_precision(env, insn->src_reg); 14130 if (err) 14131 return err; 14132 } 14133 14134 if (pred == 1) { 14135 /* Only follow the goto, ignore fall-through. If needed, push 14136 * the fall-through branch for simulation under speculative 14137 * execution. 14138 */ 14139 if (!env->bypass_spec_v1 && 14140 !sanitize_speculative_path(env, insn, *insn_idx + 1, 14141 *insn_idx)) 14142 return -EFAULT; 14143 if (env->log.level & BPF_LOG_LEVEL) 14144 print_insn_state(env, this_branch->frame[this_branch->curframe]); 14145 *insn_idx += insn->off; 14146 return 0; 14147 } else if (pred == 0) { 14148 /* Only follow the fall-through branch, since that's where the 14149 * program will go. If needed, push the goto branch for 14150 * simulation under speculative execution. 14151 */ 14152 if (!env->bypass_spec_v1 && 14153 !sanitize_speculative_path(env, insn, 14154 *insn_idx + insn->off + 1, 14155 *insn_idx)) 14156 return -EFAULT; 14157 if (env->log.level & BPF_LOG_LEVEL) 14158 print_insn_state(env, this_branch->frame[this_branch->curframe]); 14159 return 0; 14160 } 14161 14162 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, 14163 false); 14164 if (!other_branch) 14165 return -EFAULT; 14166 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; 14167 14168 /* detect if we are comparing against a constant value so we can adjust 14169 * our min/max values for our dst register. 14170 * this is only legit if both are scalars (or pointers to the same 14171 * object, I suppose, see the PTR_MAYBE_NULL related if block below), 14172 * because otherwise the different base pointers mean the offsets aren't 14173 * comparable. 14174 */ 14175 if (BPF_SRC(insn->code) == BPF_X) { 14176 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 14177 14178 if (dst_reg->type == SCALAR_VALUE && 14179 src_reg->type == SCALAR_VALUE) { 14180 if (tnum_is_const(src_reg->var_off) || 14181 (is_jmp32 && 14182 tnum_is_const(tnum_subreg(src_reg->var_off)))) 14183 reg_set_min_max(&other_branch_regs[insn->dst_reg], 14184 dst_reg, 14185 src_reg->var_off.value, 14186 tnum_subreg(src_reg->var_off).value, 14187 opcode, is_jmp32); 14188 else if (tnum_is_const(dst_reg->var_off) || 14189 (is_jmp32 && 14190 tnum_is_const(tnum_subreg(dst_reg->var_off)))) 14191 reg_set_min_max_inv(&other_branch_regs[insn->src_reg], 14192 src_reg, 14193 dst_reg->var_off.value, 14194 tnum_subreg(dst_reg->var_off).value, 14195 opcode, is_jmp32); 14196 else if (!is_jmp32 && 14197 (opcode == BPF_JEQ || opcode == BPF_JNE)) 14198 /* Comparing for equality, we can combine knowledge */ 14199 reg_combine_min_max(&other_branch_regs[insn->src_reg], 14200 &other_branch_regs[insn->dst_reg], 14201 src_reg, dst_reg, opcode); 14202 if (src_reg->id && 14203 !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) { 14204 find_equal_scalars(this_branch, src_reg); 14205 find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]); 14206 } 14207 14208 } 14209 } else if (dst_reg->type == SCALAR_VALUE) { 14210 reg_set_min_max(&other_branch_regs[insn->dst_reg], 14211 dst_reg, insn->imm, (u32)insn->imm, 14212 opcode, is_jmp32); 14213 } 14214 14215 if (dst_reg->type == SCALAR_VALUE && dst_reg->id && 14216 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) { 14217 find_equal_scalars(this_branch, dst_reg); 14218 find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]); 14219 } 14220 14221 /* if one pointer register is compared to another pointer 14222 * register check if PTR_MAYBE_NULL could be lifted. 14223 * E.g. register A - maybe null 14224 * register B - not null 14225 * for JNE A, B, ... - A is not null in the false branch; 14226 * for JEQ A, B, ... - A is not null in the true branch. 14227 * 14228 * Since PTR_TO_BTF_ID points to a kernel struct that does 14229 * not need to be null checked by the BPF program, i.e., 14230 * could be null even without PTR_MAYBE_NULL marking, so 14231 * only propagate nullness when neither reg is that type. 14232 */ 14233 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_X && 14234 __is_pointer_value(false, src_reg) && __is_pointer_value(false, dst_reg) && 14235 type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type) && 14236 base_type(src_reg->type) != PTR_TO_BTF_ID && 14237 base_type(dst_reg->type) != PTR_TO_BTF_ID) { 14238 eq_branch_regs = NULL; 14239 switch (opcode) { 14240 case BPF_JEQ: 14241 eq_branch_regs = other_branch_regs; 14242 break; 14243 case BPF_JNE: 14244 eq_branch_regs = regs; 14245 break; 14246 default: 14247 /* do nothing */ 14248 break; 14249 } 14250 if (eq_branch_regs) { 14251 if (type_may_be_null(src_reg->type)) 14252 mark_ptr_not_null_reg(&eq_branch_regs[insn->src_reg]); 14253 else 14254 mark_ptr_not_null_reg(&eq_branch_regs[insn->dst_reg]); 14255 } 14256 } 14257 14258 /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). 14259 * NOTE: these optimizations below are related with pointer comparison 14260 * which will never be JMP32. 14261 */ 14262 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && 14263 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 14264 type_may_be_null(dst_reg->type)) { 14265 /* Mark all identical registers in each branch as either 14266 * safe or unknown depending R == 0 or R != 0 conditional. 14267 */ 14268 mark_ptr_or_null_regs(this_branch, insn->dst_reg, 14269 opcode == BPF_JNE); 14270 mark_ptr_or_null_regs(other_branch, insn->dst_reg, 14271 opcode == BPF_JEQ); 14272 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], 14273 this_branch, other_branch) && 14274 is_pointer_value(env, insn->dst_reg)) { 14275 verbose(env, "R%d pointer comparison prohibited\n", 14276 insn->dst_reg); 14277 return -EACCES; 14278 } 14279 if (env->log.level & BPF_LOG_LEVEL) 14280 print_insn_state(env, this_branch->frame[this_branch->curframe]); 14281 return 0; 14282 } 14283 14284 /* verify BPF_LD_IMM64 instruction */ 14285 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) 14286 { 14287 struct bpf_insn_aux_data *aux = cur_aux(env); 14288 struct bpf_reg_state *regs = cur_regs(env); 14289 struct bpf_reg_state *dst_reg; 14290 struct bpf_map *map; 14291 int err; 14292 14293 if (BPF_SIZE(insn->code) != BPF_DW) { 14294 verbose(env, "invalid BPF_LD_IMM insn\n"); 14295 return -EINVAL; 14296 } 14297 if (insn->off != 0) { 14298 verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); 14299 return -EINVAL; 14300 } 14301 14302 err = check_reg_arg(env, insn->dst_reg, DST_OP); 14303 if (err) 14304 return err; 14305 14306 dst_reg = ®s[insn->dst_reg]; 14307 if (insn->src_reg == 0) { 14308 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 14309 14310 dst_reg->type = SCALAR_VALUE; 14311 __mark_reg_known(®s[insn->dst_reg], imm); 14312 return 0; 14313 } 14314 14315 /* All special src_reg cases are listed below. From this point onwards 14316 * we either succeed and assign a corresponding dst_reg->type after 14317 * zeroing the offset, or fail and reject the program. 14318 */ 14319 mark_reg_known_zero(env, regs, insn->dst_reg); 14320 14321 if (insn->src_reg == BPF_PSEUDO_BTF_ID) { 14322 dst_reg->type = aux->btf_var.reg_type; 14323 switch (base_type(dst_reg->type)) { 14324 case PTR_TO_MEM: 14325 dst_reg->mem_size = aux->btf_var.mem_size; 14326 break; 14327 case PTR_TO_BTF_ID: 14328 dst_reg->btf = aux->btf_var.btf; 14329 dst_reg->btf_id = aux->btf_var.btf_id; 14330 break; 14331 default: 14332 verbose(env, "bpf verifier is misconfigured\n"); 14333 return -EFAULT; 14334 } 14335 return 0; 14336 } 14337 14338 if (insn->src_reg == BPF_PSEUDO_FUNC) { 14339 struct bpf_prog_aux *aux = env->prog->aux; 14340 u32 subprogno = find_subprog(env, 14341 env->insn_idx + insn->imm + 1); 14342 14343 if (!aux->func_info) { 14344 verbose(env, "missing btf func_info\n"); 14345 return -EINVAL; 14346 } 14347 if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) { 14348 verbose(env, "callback function not static\n"); 14349 return -EINVAL; 14350 } 14351 14352 dst_reg->type = PTR_TO_FUNC; 14353 dst_reg->subprogno = subprogno; 14354 return 0; 14355 } 14356 14357 map = env->used_maps[aux->map_index]; 14358 dst_reg->map_ptr = map; 14359 14360 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE || 14361 insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) { 14362 dst_reg->type = PTR_TO_MAP_VALUE; 14363 dst_reg->off = aux->map_off; 14364 WARN_ON_ONCE(map->max_entries != 1); 14365 /* We want reg->id to be same (0) as map_value is not distinct */ 14366 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD || 14367 insn->src_reg == BPF_PSEUDO_MAP_IDX) { 14368 dst_reg->type = CONST_PTR_TO_MAP; 14369 } else { 14370 verbose(env, "bpf verifier is misconfigured\n"); 14371 return -EINVAL; 14372 } 14373 14374 return 0; 14375 } 14376 14377 static bool may_access_skb(enum bpf_prog_type type) 14378 { 14379 switch (type) { 14380 case BPF_PROG_TYPE_SOCKET_FILTER: 14381 case BPF_PROG_TYPE_SCHED_CLS: 14382 case BPF_PROG_TYPE_SCHED_ACT: 14383 return true; 14384 default: 14385 return false; 14386 } 14387 } 14388 14389 /* verify safety of LD_ABS|LD_IND instructions: 14390 * - they can only appear in the programs where ctx == skb 14391 * - since they are wrappers of function calls, they scratch R1-R5 registers, 14392 * preserve R6-R9, and store return value into R0 14393 * 14394 * Implicit input: 14395 * ctx == skb == R6 == CTX 14396 * 14397 * Explicit input: 14398 * SRC == any register 14399 * IMM == 32-bit immediate 14400 * 14401 * Output: 14402 * R0 - 8/16/32-bit skb data converted to cpu endianness 14403 */ 14404 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 14405 { 14406 struct bpf_reg_state *regs = cur_regs(env); 14407 static const int ctx_reg = BPF_REG_6; 14408 u8 mode = BPF_MODE(insn->code); 14409 int i, err; 14410 14411 if (!may_access_skb(resolve_prog_type(env->prog))) { 14412 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 14413 return -EINVAL; 14414 } 14415 14416 if (!env->ops->gen_ld_abs) { 14417 verbose(env, "bpf verifier is misconfigured\n"); 14418 return -EINVAL; 14419 } 14420 14421 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 14422 BPF_SIZE(insn->code) == BPF_DW || 14423 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 14424 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); 14425 return -EINVAL; 14426 } 14427 14428 /* check whether implicit source operand (register R6) is readable */ 14429 err = check_reg_arg(env, ctx_reg, SRC_OP); 14430 if (err) 14431 return err; 14432 14433 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as 14434 * gen_ld_abs() may terminate the program at runtime, leading to 14435 * reference leak. 14436 */ 14437 err = check_reference_leak(env); 14438 if (err) { 14439 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); 14440 return err; 14441 } 14442 14443 if (env->cur_state->active_lock.ptr) { 14444 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); 14445 return -EINVAL; 14446 } 14447 14448 if (env->cur_state->active_rcu_lock) { 14449 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_rcu_read_lock-ed region\n"); 14450 return -EINVAL; 14451 } 14452 14453 if (regs[ctx_reg].type != PTR_TO_CTX) { 14454 verbose(env, 14455 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 14456 return -EINVAL; 14457 } 14458 14459 if (mode == BPF_IND) { 14460 /* check explicit source operand */ 14461 err = check_reg_arg(env, insn->src_reg, SRC_OP); 14462 if (err) 14463 return err; 14464 } 14465 14466 err = check_ptr_off_reg(env, ®s[ctx_reg], ctx_reg); 14467 if (err < 0) 14468 return err; 14469 14470 /* reset caller saved regs to unreadable */ 14471 for (i = 0; i < CALLER_SAVED_REGS; i++) { 14472 mark_reg_not_init(env, regs, caller_saved[i]); 14473 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 14474 } 14475 14476 /* mark destination R0 register as readable, since it contains 14477 * the value fetched from the packet. 14478 * Already marked as written above. 14479 */ 14480 mark_reg_unknown(env, regs, BPF_REG_0); 14481 /* ld_abs load up to 32-bit skb data. */ 14482 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; 14483 return 0; 14484 } 14485 14486 static int check_return_code(struct bpf_verifier_env *env) 14487 { 14488 struct tnum enforce_attach_type_range = tnum_unknown; 14489 const struct bpf_prog *prog = env->prog; 14490 struct bpf_reg_state *reg; 14491 struct tnum range = tnum_range(0, 1), const_0 = tnum_const(0); 14492 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 14493 int err; 14494 struct bpf_func_state *frame = env->cur_state->frame[0]; 14495 const bool is_subprog = frame->subprogno; 14496 14497 /* LSM and struct_ops func-ptr's return type could be "void" */ 14498 if (!is_subprog) { 14499 switch (prog_type) { 14500 case BPF_PROG_TYPE_LSM: 14501 if (prog->expected_attach_type == BPF_LSM_CGROUP) 14502 /* See below, can be 0 or 0-1 depending on hook. */ 14503 break; 14504 fallthrough; 14505 case BPF_PROG_TYPE_STRUCT_OPS: 14506 if (!prog->aux->attach_func_proto->type) 14507 return 0; 14508 break; 14509 default: 14510 break; 14511 } 14512 } 14513 14514 /* eBPF calling convention is such that R0 is used 14515 * to return the value from eBPF program. 14516 * Make sure that it's readable at this time 14517 * of bpf_exit, which means that program wrote 14518 * something into it earlier 14519 */ 14520 err = check_reg_arg(env, BPF_REG_0, SRC_OP); 14521 if (err) 14522 return err; 14523 14524 if (is_pointer_value(env, BPF_REG_0)) { 14525 verbose(env, "R0 leaks addr as return value\n"); 14526 return -EACCES; 14527 } 14528 14529 reg = cur_regs(env) + BPF_REG_0; 14530 14531 if (frame->in_async_callback_fn) { 14532 /* enforce return zero from async callbacks like timer */ 14533 if (reg->type != SCALAR_VALUE) { 14534 verbose(env, "In async callback the register R0 is not a known value (%s)\n", 14535 reg_type_str(env, reg->type)); 14536 return -EINVAL; 14537 } 14538 14539 if (!tnum_in(const_0, reg->var_off)) { 14540 verbose_invalid_scalar(env, reg, &const_0, "async callback", "R0"); 14541 return -EINVAL; 14542 } 14543 return 0; 14544 } 14545 14546 if (is_subprog) { 14547 if (reg->type != SCALAR_VALUE) { 14548 verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n", 14549 reg_type_str(env, reg->type)); 14550 return -EINVAL; 14551 } 14552 return 0; 14553 } 14554 14555 switch (prog_type) { 14556 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 14557 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || 14558 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || 14559 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME || 14560 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || 14561 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || 14562 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME) 14563 range = tnum_range(1, 1); 14564 if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND || 14565 env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND) 14566 range = tnum_range(0, 3); 14567 break; 14568 case BPF_PROG_TYPE_CGROUP_SKB: 14569 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { 14570 range = tnum_range(0, 3); 14571 enforce_attach_type_range = tnum_range(2, 3); 14572 } 14573 break; 14574 case BPF_PROG_TYPE_CGROUP_SOCK: 14575 case BPF_PROG_TYPE_SOCK_OPS: 14576 case BPF_PROG_TYPE_CGROUP_DEVICE: 14577 case BPF_PROG_TYPE_CGROUP_SYSCTL: 14578 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 14579 break; 14580 case BPF_PROG_TYPE_RAW_TRACEPOINT: 14581 if (!env->prog->aux->attach_btf_id) 14582 return 0; 14583 range = tnum_const(0); 14584 break; 14585 case BPF_PROG_TYPE_TRACING: 14586 switch (env->prog->expected_attach_type) { 14587 case BPF_TRACE_FENTRY: 14588 case BPF_TRACE_FEXIT: 14589 range = tnum_const(0); 14590 break; 14591 case BPF_TRACE_RAW_TP: 14592 case BPF_MODIFY_RETURN: 14593 return 0; 14594 case BPF_TRACE_ITER: 14595 break; 14596 default: 14597 return -ENOTSUPP; 14598 } 14599 break; 14600 case BPF_PROG_TYPE_SK_LOOKUP: 14601 range = tnum_range(SK_DROP, SK_PASS); 14602 break; 14603 14604 case BPF_PROG_TYPE_LSM: 14605 if (env->prog->expected_attach_type != BPF_LSM_CGROUP) { 14606 /* Regular BPF_PROG_TYPE_LSM programs can return 14607 * any value. 14608 */ 14609 return 0; 14610 } 14611 if (!env->prog->aux->attach_func_proto->type) { 14612 /* Make sure programs that attach to void 14613 * hooks don't try to modify return value. 14614 */ 14615 range = tnum_range(1, 1); 14616 } 14617 break; 14618 14619 case BPF_PROG_TYPE_NETFILTER: 14620 range = tnum_range(NF_DROP, NF_ACCEPT); 14621 break; 14622 case BPF_PROG_TYPE_EXT: 14623 /* freplace program can return anything as its return value 14624 * depends on the to-be-replaced kernel func or bpf program. 14625 */ 14626 default: 14627 return 0; 14628 } 14629 14630 if (reg->type != SCALAR_VALUE) { 14631 verbose(env, "At program exit the register R0 is not a known value (%s)\n", 14632 reg_type_str(env, reg->type)); 14633 return -EINVAL; 14634 } 14635 14636 if (!tnum_in(range, reg->var_off)) { 14637 verbose_invalid_scalar(env, reg, &range, "program exit", "R0"); 14638 if (prog->expected_attach_type == BPF_LSM_CGROUP && 14639 prog_type == BPF_PROG_TYPE_LSM && 14640 !prog->aux->attach_func_proto->type) 14641 verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n"); 14642 return -EINVAL; 14643 } 14644 14645 if (!tnum_is_unknown(enforce_attach_type_range) && 14646 tnum_in(enforce_attach_type_range, reg->var_off)) 14647 env->prog->enforce_expected_attach_type = 1; 14648 return 0; 14649 } 14650 14651 /* non-recursive DFS pseudo code 14652 * 1 procedure DFS-iterative(G,v): 14653 * 2 label v as discovered 14654 * 3 let S be a stack 14655 * 4 S.push(v) 14656 * 5 while S is not empty 14657 * 6 t <- S.peek() 14658 * 7 if t is what we're looking for: 14659 * 8 return t 14660 * 9 for all edges e in G.adjacentEdges(t) do 14661 * 10 if edge e is already labelled 14662 * 11 continue with the next edge 14663 * 12 w <- G.adjacentVertex(t,e) 14664 * 13 if vertex w is not discovered and not explored 14665 * 14 label e as tree-edge 14666 * 15 label w as discovered 14667 * 16 S.push(w) 14668 * 17 continue at 5 14669 * 18 else if vertex w is discovered 14670 * 19 label e as back-edge 14671 * 20 else 14672 * 21 // vertex w is explored 14673 * 22 label e as forward- or cross-edge 14674 * 23 label t as explored 14675 * 24 S.pop() 14676 * 14677 * convention: 14678 * 0x10 - discovered 14679 * 0x11 - discovered and fall-through edge labelled 14680 * 0x12 - discovered and fall-through and branch edges labelled 14681 * 0x20 - explored 14682 */ 14683 14684 enum { 14685 DISCOVERED = 0x10, 14686 EXPLORED = 0x20, 14687 FALLTHROUGH = 1, 14688 BRANCH = 2, 14689 }; 14690 14691 static u32 state_htab_size(struct bpf_verifier_env *env) 14692 { 14693 return env->prog->len; 14694 } 14695 14696 static struct bpf_verifier_state_list **explored_state( 14697 struct bpf_verifier_env *env, 14698 int idx) 14699 { 14700 struct bpf_verifier_state *cur = env->cur_state; 14701 struct bpf_func_state *state = cur->frame[cur->curframe]; 14702 14703 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; 14704 } 14705 14706 static void mark_prune_point(struct bpf_verifier_env *env, int idx) 14707 { 14708 env->insn_aux_data[idx].prune_point = true; 14709 } 14710 14711 static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx) 14712 { 14713 return env->insn_aux_data[insn_idx].prune_point; 14714 } 14715 14716 static void mark_force_checkpoint(struct bpf_verifier_env *env, int idx) 14717 { 14718 env->insn_aux_data[idx].force_checkpoint = true; 14719 } 14720 14721 static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx) 14722 { 14723 return env->insn_aux_data[insn_idx].force_checkpoint; 14724 } 14725 14726 14727 enum { 14728 DONE_EXPLORING = 0, 14729 KEEP_EXPLORING = 1, 14730 }; 14731 14732 /* t, w, e - match pseudo-code above: 14733 * t - index of current instruction 14734 * w - next instruction 14735 * e - edge 14736 */ 14737 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, 14738 bool loop_ok) 14739 { 14740 int *insn_stack = env->cfg.insn_stack; 14741 int *insn_state = env->cfg.insn_state; 14742 14743 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 14744 return DONE_EXPLORING; 14745 14746 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 14747 return DONE_EXPLORING; 14748 14749 if (w < 0 || w >= env->prog->len) { 14750 verbose_linfo(env, t, "%d: ", t); 14751 verbose(env, "jump out of range from insn %d to %d\n", t, w); 14752 return -EINVAL; 14753 } 14754 14755 if (e == BRANCH) { 14756 /* mark branch target for state pruning */ 14757 mark_prune_point(env, w); 14758 mark_jmp_point(env, w); 14759 } 14760 14761 if (insn_state[w] == 0) { 14762 /* tree-edge */ 14763 insn_state[t] = DISCOVERED | e; 14764 insn_state[w] = DISCOVERED; 14765 if (env->cfg.cur_stack >= env->prog->len) 14766 return -E2BIG; 14767 insn_stack[env->cfg.cur_stack++] = w; 14768 return KEEP_EXPLORING; 14769 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 14770 if (loop_ok && env->bpf_capable) 14771 return DONE_EXPLORING; 14772 verbose_linfo(env, t, "%d: ", t); 14773 verbose_linfo(env, w, "%d: ", w); 14774 verbose(env, "back-edge from insn %d to %d\n", t, w); 14775 return -EINVAL; 14776 } else if (insn_state[w] == EXPLORED) { 14777 /* forward- or cross-edge */ 14778 insn_state[t] = DISCOVERED | e; 14779 } else { 14780 verbose(env, "insn state internal bug\n"); 14781 return -EFAULT; 14782 } 14783 return DONE_EXPLORING; 14784 } 14785 14786 static int visit_func_call_insn(int t, struct bpf_insn *insns, 14787 struct bpf_verifier_env *env, 14788 bool visit_callee) 14789 { 14790 int ret; 14791 14792 ret = push_insn(t, t + 1, FALLTHROUGH, env, false); 14793 if (ret) 14794 return ret; 14795 14796 mark_prune_point(env, t + 1); 14797 /* when we exit from subprog, we need to record non-linear history */ 14798 mark_jmp_point(env, t + 1); 14799 14800 if (visit_callee) { 14801 mark_prune_point(env, t); 14802 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env, 14803 /* It's ok to allow recursion from CFG point of 14804 * view. __check_func_call() will do the actual 14805 * check. 14806 */ 14807 bpf_pseudo_func(insns + t)); 14808 } 14809 return ret; 14810 } 14811 14812 /* Visits the instruction at index t and returns one of the following: 14813 * < 0 - an error occurred 14814 * DONE_EXPLORING - the instruction was fully explored 14815 * KEEP_EXPLORING - there is still work to be done before it is fully explored 14816 */ 14817 static int visit_insn(int t, struct bpf_verifier_env *env) 14818 { 14819 struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t]; 14820 int ret, off; 14821 14822 if (bpf_pseudo_func(insn)) 14823 return visit_func_call_insn(t, insns, env, true); 14824 14825 /* All non-branch instructions have a single fall-through edge. */ 14826 if (BPF_CLASS(insn->code) != BPF_JMP && 14827 BPF_CLASS(insn->code) != BPF_JMP32) 14828 return push_insn(t, t + 1, FALLTHROUGH, env, false); 14829 14830 switch (BPF_OP(insn->code)) { 14831 case BPF_EXIT: 14832 return DONE_EXPLORING; 14833 14834 case BPF_CALL: 14835 if (insn->src_reg == 0 && insn->imm == BPF_FUNC_timer_set_callback) 14836 /* Mark this call insn as a prune point to trigger 14837 * is_state_visited() check before call itself is 14838 * processed by __check_func_call(). Otherwise new 14839 * async state will be pushed for further exploration. 14840 */ 14841 mark_prune_point(env, t); 14842 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { 14843 struct bpf_kfunc_call_arg_meta meta; 14844 14845 ret = fetch_kfunc_meta(env, insn, &meta, NULL); 14846 if (ret == 0 && is_iter_next_kfunc(&meta)) { 14847 mark_prune_point(env, t); 14848 /* Checking and saving state checkpoints at iter_next() call 14849 * is crucial for fast convergence of open-coded iterator loop 14850 * logic, so we need to force it. If we don't do that, 14851 * is_state_visited() might skip saving a checkpoint, causing 14852 * unnecessarily long sequence of not checkpointed 14853 * instructions and jumps, leading to exhaustion of jump 14854 * history buffer, and potentially other undesired outcomes. 14855 * It is expected that with correct open-coded iterators 14856 * convergence will happen quickly, so we don't run a risk of 14857 * exhausting memory. 14858 */ 14859 mark_force_checkpoint(env, t); 14860 } 14861 } 14862 return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL); 14863 14864 case BPF_JA: 14865 if (BPF_SRC(insn->code) != BPF_K) 14866 return -EINVAL; 14867 14868 if (BPF_CLASS(insn->code) == BPF_JMP) 14869 off = insn->off; 14870 else 14871 off = insn->imm; 14872 14873 /* unconditional jump with single edge */ 14874 ret = push_insn(t, t + off + 1, FALLTHROUGH, env, 14875 true); 14876 if (ret) 14877 return ret; 14878 14879 mark_prune_point(env, t + off + 1); 14880 mark_jmp_point(env, t + off + 1); 14881 14882 return ret; 14883 14884 default: 14885 /* conditional jump with two edges */ 14886 mark_prune_point(env, t); 14887 14888 ret = push_insn(t, t + 1, FALLTHROUGH, env, true); 14889 if (ret) 14890 return ret; 14891 14892 return push_insn(t, t + insn->off + 1, BRANCH, env, true); 14893 } 14894 } 14895 14896 /* non-recursive depth-first-search to detect loops in BPF program 14897 * loop == back-edge in directed graph 14898 */ 14899 static int check_cfg(struct bpf_verifier_env *env) 14900 { 14901 int insn_cnt = env->prog->len; 14902 int *insn_stack, *insn_state; 14903 int ret = 0; 14904 int i; 14905 14906 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 14907 if (!insn_state) 14908 return -ENOMEM; 14909 14910 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 14911 if (!insn_stack) { 14912 kvfree(insn_state); 14913 return -ENOMEM; 14914 } 14915 14916 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 14917 insn_stack[0] = 0; /* 0 is the first instruction */ 14918 env->cfg.cur_stack = 1; 14919 14920 while (env->cfg.cur_stack > 0) { 14921 int t = insn_stack[env->cfg.cur_stack - 1]; 14922 14923 ret = visit_insn(t, env); 14924 switch (ret) { 14925 case DONE_EXPLORING: 14926 insn_state[t] = EXPLORED; 14927 env->cfg.cur_stack--; 14928 break; 14929 case KEEP_EXPLORING: 14930 break; 14931 default: 14932 if (ret > 0) { 14933 verbose(env, "visit_insn internal bug\n"); 14934 ret = -EFAULT; 14935 } 14936 goto err_free; 14937 } 14938 } 14939 14940 if (env->cfg.cur_stack < 0) { 14941 verbose(env, "pop stack internal bug\n"); 14942 ret = -EFAULT; 14943 goto err_free; 14944 } 14945 14946 for (i = 0; i < insn_cnt; i++) { 14947 if (insn_state[i] != EXPLORED) { 14948 verbose(env, "unreachable insn %d\n", i); 14949 ret = -EINVAL; 14950 goto err_free; 14951 } 14952 } 14953 ret = 0; /* cfg looks good */ 14954 14955 err_free: 14956 kvfree(insn_state); 14957 kvfree(insn_stack); 14958 env->cfg.insn_state = env->cfg.insn_stack = NULL; 14959 return ret; 14960 } 14961 14962 static int check_abnormal_return(struct bpf_verifier_env *env) 14963 { 14964 int i; 14965 14966 for (i = 1; i < env->subprog_cnt; i++) { 14967 if (env->subprog_info[i].has_ld_abs) { 14968 verbose(env, "LD_ABS is not allowed in subprogs without BTF\n"); 14969 return -EINVAL; 14970 } 14971 if (env->subprog_info[i].has_tail_call) { 14972 verbose(env, "tail_call is not allowed in subprogs without BTF\n"); 14973 return -EINVAL; 14974 } 14975 } 14976 return 0; 14977 } 14978 14979 /* The minimum supported BTF func info size */ 14980 #define MIN_BPF_FUNCINFO_SIZE 8 14981 #define MAX_FUNCINFO_REC_SIZE 252 14982 14983 static int check_btf_func(struct bpf_verifier_env *env, 14984 const union bpf_attr *attr, 14985 bpfptr_t uattr) 14986 { 14987 const struct btf_type *type, *func_proto, *ret_type; 14988 u32 i, nfuncs, urec_size, min_size; 14989 u32 krec_size = sizeof(struct bpf_func_info); 14990 struct bpf_func_info *krecord; 14991 struct bpf_func_info_aux *info_aux = NULL; 14992 struct bpf_prog *prog; 14993 const struct btf *btf; 14994 bpfptr_t urecord; 14995 u32 prev_offset = 0; 14996 bool scalar_return; 14997 int ret = -ENOMEM; 14998 14999 nfuncs = attr->func_info_cnt; 15000 if (!nfuncs) { 15001 if (check_abnormal_return(env)) 15002 return -EINVAL; 15003 return 0; 15004 } 15005 15006 if (nfuncs != env->subprog_cnt) { 15007 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n"); 15008 return -EINVAL; 15009 } 15010 15011 urec_size = attr->func_info_rec_size; 15012 if (urec_size < MIN_BPF_FUNCINFO_SIZE || 15013 urec_size > MAX_FUNCINFO_REC_SIZE || 15014 urec_size % sizeof(u32)) { 15015 verbose(env, "invalid func info rec size %u\n", urec_size); 15016 return -EINVAL; 15017 } 15018 15019 prog = env->prog; 15020 btf = prog->aux->btf; 15021 15022 urecord = make_bpfptr(attr->func_info, uattr.is_kernel); 15023 min_size = min_t(u32, krec_size, urec_size); 15024 15025 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); 15026 if (!krecord) 15027 return -ENOMEM; 15028 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN); 15029 if (!info_aux) 15030 goto err_free; 15031 15032 for (i = 0; i < nfuncs; i++) { 15033 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size); 15034 if (ret) { 15035 if (ret == -E2BIG) { 15036 verbose(env, "nonzero tailing record in func info"); 15037 /* set the size kernel expects so loader can zero 15038 * out the rest of the record. 15039 */ 15040 if (copy_to_bpfptr_offset(uattr, 15041 offsetof(union bpf_attr, func_info_rec_size), 15042 &min_size, sizeof(min_size))) 15043 ret = -EFAULT; 15044 } 15045 goto err_free; 15046 } 15047 15048 if (copy_from_bpfptr(&krecord[i], urecord, min_size)) { 15049 ret = -EFAULT; 15050 goto err_free; 15051 } 15052 15053 /* check insn_off */ 15054 ret = -EINVAL; 15055 if (i == 0) { 15056 if (krecord[i].insn_off) { 15057 verbose(env, 15058 "nonzero insn_off %u for the first func info record", 15059 krecord[i].insn_off); 15060 goto err_free; 15061 } 15062 } else if (krecord[i].insn_off <= prev_offset) { 15063 verbose(env, 15064 "same or smaller insn offset (%u) than previous func info record (%u)", 15065 krecord[i].insn_off, prev_offset); 15066 goto err_free; 15067 } 15068 15069 if (env->subprog_info[i].start != krecord[i].insn_off) { 15070 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); 15071 goto err_free; 15072 } 15073 15074 /* check type_id */ 15075 type = btf_type_by_id(btf, krecord[i].type_id); 15076 if (!type || !btf_type_is_func(type)) { 15077 verbose(env, "invalid type id %d in func info", 15078 krecord[i].type_id); 15079 goto err_free; 15080 } 15081 info_aux[i].linkage = BTF_INFO_VLEN(type->info); 15082 15083 func_proto = btf_type_by_id(btf, type->type); 15084 if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto))) 15085 /* btf_func_check() already verified it during BTF load */ 15086 goto err_free; 15087 ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL); 15088 scalar_return = 15089 btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type); 15090 if (i && !scalar_return && env->subprog_info[i].has_ld_abs) { 15091 verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n"); 15092 goto err_free; 15093 } 15094 if (i && !scalar_return && env->subprog_info[i].has_tail_call) { 15095 verbose(env, "tail_call is only allowed in functions that return 'int'.\n"); 15096 goto err_free; 15097 } 15098 15099 prev_offset = krecord[i].insn_off; 15100 bpfptr_add(&urecord, urec_size); 15101 } 15102 15103 prog->aux->func_info = krecord; 15104 prog->aux->func_info_cnt = nfuncs; 15105 prog->aux->func_info_aux = info_aux; 15106 return 0; 15107 15108 err_free: 15109 kvfree(krecord); 15110 kfree(info_aux); 15111 return ret; 15112 } 15113 15114 static void adjust_btf_func(struct bpf_verifier_env *env) 15115 { 15116 struct bpf_prog_aux *aux = env->prog->aux; 15117 int i; 15118 15119 if (!aux->func_info) 15120 return; 15121 15122 for (i = 0; i < env->subprog_cnt; i++) 15123 aux->func_info[i].insn_off = env->subprog_info[i].start; 15124 } 15125 15126 #define MIN_BPF_LINEINFO_SIZE offsetofend(struct bpf_line_info, line_col) 15127 #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE 15128 15129 static int check_btf_line(struct bpf_verifier_env *env, 15130 const union bpf_attr *attr, 15131 bpfptr_t uattr) 15132 { 15133 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0; 15134 struct bpf_subprog_info *sub; 15135 struct bpf_line_info *linfo; 15136 struct bpf_prog *prog; 15137 const struct btf *btf; 15138 bpfptr_t ulinfo; 15139 int err; 15140 15141 nr_linfo = attr->line_info_cnt; 15142 if (!nr_linfo) 15143 return 0; 15144 if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info)) 15145 return -EINVAL; 15146 15147 rec_size = attr->line_info_rec_size; 15148 if (rec_size < MIN_BPF_LINEINFO_SIZE || 15149 rec_size > MAX_LINEINFO_REC_SIZE || 15150 rec_size & (sizeof(u32) - 1)) 15151 return -EINVAL; 15152 15153 /* Need to zero it in case the userspace may 15154 * pass in a smaller bpf_line_info object. 15155 */ 15156 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info), 15157 GFP_KERNEL | __GFP_NOWARN); 15158 if (!linfo) 15159 return -ENOMEM; 15160 15161 prog = env->prog; 15162 btf = prog->aux->btf; 15163 15164 s = 0; 15165 sub = env->subprog_info; 15166 ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel); 15167 expected_size = sizeof(struct bpf_line_info); 15168 ncopy = min_t(u32, expected_size, rec_size); 15169 for (i = 0; i < nr_linfo; i++) { 15170 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size); 15171 if (err) { 15172 if (err == -E2BIG) { 15173 verbose(env, "nonzero tailing record in line_info"); 15174 if (copy_to_bpfptr_offset(uattr, 15175 offsetof(union bpf_attr, line_info_rec_size), 15176 &expected_size, sizeof(expected_size))) 15177 err = -EFAULT; 15178 } 15179 goto err_free; 15180 } 15181 15182 if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) { 15183 err = -EFAULT; 15184 goto err_free; 15185 } 15186 15187 /* 15188 * Check insn_off to ensure 15189 * 1) strictly increasing AND 15190 * 2) bounded by prog->len 15191 * 15192 * The linfo[0].insn_off == 0 check logically falls into 15193 * the later "missing bpf_line_info for func..." case 15194 * because the first linfo[0].insn_off must be the 15195 * first sub also and the first sub must have 15196 * subprog_info[0].start == 0. 15197 */ 15198 if ((i && linfo[i].insn_off <= prev_offset) || 15199 linfo[i].insn_off >= prog->len) { 15200 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", 15201 i, linfo[i].insn_off, prev_offset, 15202 prog->len); 15203 err = -EINVAL; 15204 goto err_free; 15205 } 15206 15207 if (!prog->insnsi[linfo[i].insn_off].code) { 15208 verbose(env, 15209 "Invalid insn code at line_info[%u].insn_off\n", 15210 i); 15211 err = -EINVAL; 15212 goto err_free; 15213 } 15214 15215 if (!btf_name_by_offset(btf, linfo[i].line_off) || 15216 !btf_name_by_offset(btf, linfo[i].file_name_off)) { 15217 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); 15218 err = -EINVAL; 15219 goto err_free; 15220 } 15221 15222 if (s != env->subprog_cnt) { 15223 if (linfo[i].insn_off == sub[s].start) { 15224 sub[s].linfo_idx = i; 15225 s++; 15226 } else if (sub[s].start < linfo[i].insn_off) { 15227 verbose(env, "missing bpf_line_info for func#%u\n", s); 15228 err = -EINVAL; 15229 goto err_free; 15230 } 15231 } 15232 15233 prev_offset = linfo[i].insn_off; 15234 bpfptr_add(&ulinfo, rec_size); 15235 } 15236 15237 if (s != env->subprog_cnt) { 15238 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n", 15239 env->subprog_cnt - s, s); 15240 err = -EINVAL; 15241 goto err_free; 15242 } 15243 15244 prog->aux->linfo = linfo; 15245 prog->aux->nr_linfo = nr_linfo; 15246 15247 return 0; 15248 15249 err_free: 15250 kvfree(linfo); 15251 return err; 15252 } 15253 15254 #define MIN_CORE_RELO_SIZE sizeof(struct bpf_core_relo) 15255 #define MAX_CORE_RELO_SIZE MAX_FUNCINFO_REC_SIZE 15256 15257 static int check_core_relo(struct bpf_verifier_env *env, 15258 const union bpf_attr *attr, 15259 bpfptr_t uattr) 15260 { 15261 u32 i, nr_core_relo, ncopy, expected_size, rec_size; 15262 struct bpf_core_relo core_relo = {}; 15263 struct bpf_prog *prog = env->prog; 15264 const struct btf *btf = prog->aux->btf; 15265 struct bpf_core_ctx ctx = { 15266 .log = &env->log, 15267 .btf = btf, 15268 }; 15269 bpfptr_t u_core_relo; 15270 int err; 15271 15272 nr_core_relo = attr->core_relo_cnt; 15273 if (!nr_core_relo) 15274 return 0; 15275 if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo)) 15276 return -EINVAL; 15277 15278 rec_size = attr->core_relo_rec_size; 15279 if (rec_size < MIN_CORE_RELO_SIZE || 15280 rec_size > MAX_CORE_RELO_SIZE || 15281 rec_size % sizeof(u32)) 15282 return -EINVAL; 15283 15284 u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel); 15285 expected_size = sizeof(struct bpf_core_relo); 15286 ncopy = min_t(u32, expected_size, rec_size); 15287 15288 /* Unlike func_info and line_info, copy and apply each CO-RE 15289 * relocation record one at a time. 15290 */ 15291 for (i = 0; i < nr_core_relo; i++) { 15292 /* future proofing when sizeof(bpf_core_relo) changes */ 15293 err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size); 15294 if (err) { 15295 if (err == -E2BIG) { 15296 verbose(env, "nonzero tailing record in core_relo"); 15297 if (copy_to_bpfptr_offset(uattr, 15298 offsetof(union bpf_attr, core_relo_rec_size), 15299 &expected_size, sizeof(expected_size))) 15300 err = -EFAULT; 15301 } 15302 break; 15303 } 15304 15305 if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) { 15306 err = -EFAULT; 15307 break; 15308 } 15309 15310 if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) { 15311 verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n", 15312 i, core_relo.insn_off, prog->len); 15313 err = -EINVAL; 15314 break; 15315 } 15316 15317 err = bpf_core_apply(&ctx, &core_relo, i, 15318 &prog->insnsi[core_relo.insn_off / 8]); 15319 if (err) 15320 break; 15321 bpfptr_add(&u_core_relo, rec_size); 15322 } 15323 return err; 15324 } 15325 15326 static int check_btf_info(struct bpf_verifier_env *env, 15327 const union bpf_attr *attr, 15328 bpfptr_t uattr) 15329 { 15330 struct btf *btf; 15331 int err; 15332 15333 if (!attr->func_info_cnt && !attr->line_info_cnt) { 15334 if (check_abnormal_return(env)) 15335 return -EINVAL; 15336 return 0; 15337 } 15338 15339 btf = btf_get_by_fd(attr->prog_btf_fd); 15340 if (IS_ERR(btf)) 15341 return PTR_ERR(btf); 15342 if (btf_is_kernel(btf)) { 15343 btf_put(btf); 15344 return -EACCES; 15345 } 15346 env->prog->aux->btf = btf; 15347 15348 err = check_btf_func(env, attr, uattr); 15349 if (err) 15350 return err; 15351 15352 err = check_btf_line(env, attr, uattr); 15353 if (err) 15354 return err; 15355 15356 err = check_core_relo(env, attr, uattr); 15357 if (err) 15358 return err; 15359 15360 return 0; 15361 } 15362 15363 /* check %cur's range satisfies %old's */ 15364 static bool range_within(struct bpf_reg_state *old, 15365 struct bpf_reg_state *cur) 15366 { 15367 return old->umin_value <= cur->umin_value && 15368 old->umax_value >= cur->umax_value && 15369 old->smin_value <= cur->smin_value && 15370 old->smax_value >= cur->smax_value && 15371 old->u32_min_value <= cur->u32_min_value && 15372 old->u32_max_value >= cur->u32_max_value && 15373 old->s32_min_value <= cur->s32_min_value && 15374 old->s32_max_value >= cur->s32_max_value; 15375 } 15376 15377 /* If in the old state two registers had the same id, then they need to have 15378 * the same id in the new state as well. But that id could be different from 15379 * the old state, so we need to track the mapping from old to new ids. 15380 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent 15381 * regs with old id 5 must also have new id 9 for the new state to be safe. But 15382 * regs with a different old id could still have new id 9, we don't care about 15383 * that. 15384 * So we look through our idmap to see if this old id has been seen before. If 15385 * so, we require the new id to match; otherwise, we add the id pair to the map. 15386 */ 15387 static bool check_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap) 15388 { 15389 struct bpf_id_pair *map = idmap->map; 15390 unsigned int i; 15391 15392 /* either both IDs should be set or both should be zero */ 15393 if (!!old_id != !!cur_id) 15394 return false; 15395 15396 if (old_id == 0) /* cur_id == 0 as well */ 15397 return true; 15398 15399 for (i = 0; i < BPF_ID_MAP_SIZE; i++) { 15400 if (!map[i].old) { 15401 /* Reached an empty slot; haven't seen this id before */ 15402 map[i].old = old_id; 15403 map[i].cur = cur_id; 15404 return true; 15405 } 15406 if (map[i].old == old_id) 15407 return map[i].cur == cur_id; 15408 if (map[i].cur == cur_id) 15409 return false; 15410 } 15411 /* We ran out of idmap slots, which should be impossible */ 15412 WARN_ON_ONCE(1); 15413 return false; 15414 } 15415 15416 /* Similar to check_ids(), but allocate a unique temporary ID 15417 * for 'old_id' or 'cur_id' of zero. 15418 * This makes pairs like '0 vs unique ID', 'unique ID vs 0' valid. 15419 */ 15420 static bool check_scalar_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap) 15421 { 15422 old_id = old_id ? old_id : ++idmap->tmp_id_gen; 15423 cur_id = cur_id ? cur_id : ++idmap->tmp_id_gen; 15424 15425 return check_ids(old_id, cur_id, idmap); 15426 } 15427 15428 static void clean_func_state(struct bpf_verifier_env *env, 15429 struct bpf_func_state *st) 15430 { 15431 enum bpf_reg_liveness live; 15432 int i, j; 15433 15434 for (i = 0; i < BPF_REG_FP; i++) { 15435 live = st->regs[i].live; 15436 /* liveness must not touch this register anymore */ 15437 st->regs[i].live |= REG_LIVE_DONE; 15438 if (!(live & REG_LIVE_READ)) 15439 /* since the register is unused, clear its state 15440 * to make further comparison simpler 15441 */ 15442 __mark_reg_not_init(env, &st->regs[i]); 15443 } 15444 15445 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { 15446 live = st->stack[i].spilled_ptr.live; 15447 /* liveness must not touch this stack slot anymore */ 15448 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; 15449 if (!(live & REG_LIVE_READ)) { 15450 __mark_reg_not_init(env, &st->stack[i].spilled_ptr); 15451 for (j = 0; j < BPF_REG_SIZE; j++) 15452 st->stack[i].slot_type[j] = STACK_INVALID; 15453 } 15454 } 15455 } 15456 15457 static void clean_verifier_state(struct bpf_verifier_env *env, 15458 struct bpf_verifier_state *st) 15459 { 15460 int i; 15461 15462 if (st->frame[0]->regs[0].live & REG_LIVE_DONE) 15463 /* all regs in this state in all frames were already marked */ 15464 return; 15465 15466 for (i = 0; i <= st->curframe; i++) 15467 clean_func_state(env, st->frame[i]); 15468 } 15469 15470 /* the parentage chains form a tree. 15471 * the verifier states are added to state lists at given insn and 15472 * pushed into state stack for future exploration. 15473 * when the verifier reaches bpf_exit insn some of the verifer states 15474 * stored in the state lists have their final liveness state already, 15475 * but a lot of states will get revised from liveness point of view when 15476 * the verifier explores other branches. 15477 * Example: 15478 * 1: r0 = 1 15479 * 2: if r1 == 100 goto pc+1 15480 * 3: r0 = 2 15481 * 4: exit 15482 * when the verifier reaches exit insn the register r0 in the state list of 15483 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch 15484 * of insn 2 and goes exploring further. At the insn 4 it will walk the 15485 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ. 15486 * 15487 * Since the verifier pushes the branch states as it sees them while exploring 15488 * the program the condition of walking the branch instruction for the second 15489 * time means that all states below this branch were already explored and 15490 * their final liveness marks are already propagated. 15491 * Hence when the verifier completes the search of state list in is_state_visited() 15492 * we can call this clean_live_states() function to mark all liveness states 15493 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state' 15494 * will not be used. 15495 * This function also clears the registers and stack for states that !READ 15496 * to simplify state merging. 15497 * 15498 * Important note here that walking the same branch instruction in the callee 15499 * doesn't meant that the states are DONE. The verifier has to compare 15500 * the callsites 15501 */ 15502 static void clean_live_states(struct bpf_verifier_env *env, int insn, 15503 struct bpf_verifier_state *cur) 15504 { 15505 struct bpf_verifier_state_list *sl; 15506 int i; 15507 15508 sl = *explored_state(env, insn); 15509 while (sl) { 15510 if (sl->state.branches) 15511 goto next; 15512 if (sl->state.insn_idx != insn || 15513 sl->state.curframe != cur->curframe) 15514 goto next; 15515 for (i = 0; i <= cur->curframe; i++) 15516 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) 15517 goto next; 15518 clean_verifier_state(env, &sl->state); 15519 next: 15520 sl = sl->next; 15521 } 15522 } 15523 15524 static bool regs_exact(const struct bpf_reg_state *rold, 15525 const struct bpf_reg_state *rcur, 15526 struct bpf_idmap *idmap) 15527 { 15528 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 15529 check_ids(rold->id, rcur->id, idmap) && 15530 check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap); 15531 } 15532 15533 /* Returns true if (rold safe implies rcur safe) */ 15534 static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, 15535 struct bpf_reg_state *rcur, struct bpf_idmap *idmap) 15536 { 15537 if (!(rold->live & REG_LIVE_READ)) 15538 /* explored state didn't use this */ 15539 return true; 15540 if (rold->type == NOT_INIT) 15541 /* explored state can't have used this */ 15542 return true; 15543 if (rcur->type == NOT_INIT) 15544 return false; 15545 15546 /* Enforce that register types have to match exactly, including their 15547 * modifiers (like PTR_MAYBE_NULL, MEM_RDONLY, etc), as a general 15548 * rule. 15549 * 15550 * One can make a point that using a pointer register as unbounded 15551 * SCALAR would be technically acceptable, but this could lead to 15552 * pointer leaks because scalars are allowed to leak while pointers 15553 * are not. We could make this safe in special cases if root is 15554 * calling us, but it's probably not worth the hassle. 15555 * 15556 * Also, register types that are *not* MAYBE_NULL could technically be 15557 * safe to use as their MAYBE_NULL variants (e.g., PTR_TO_MAP_VALUE 15558 * is safe to be used as PTR_TO_MAP_VALUE_OR_NULL, provided both point 15559 * to the same map). 15560 * However, if the old MAYBE_NULL register then got NULL checked, 15561 * doing so could have affected others with the same id, and we can't 15562 * check for that because we lost the id when we converted to 15563 * a non-MAYBE_NULL variant. 15564 * So, as a general rule we don't allow mixing MAYBE_NULL and 15565 * non-MAYBE_NULL registers as well. 15566 */ 15567 if (rold->type != rcur->type) 15568 return false; 15569 15570 switch (base_type(rold->type)) { 15571 case SCALAR_VALUE: 15572 if (env->explore_alu_limits) { 15573 /* explore_alu_limits disables tnum_in() and range_within() 15574 * logic and requires everything to be strict 15575 */ 15576 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 15577 check_scalar_ids(rold->id, rcur->id, idmap); 15578 } 15579 if (!rold->precise) 15580 return true; 15581 /* Why check_ids() for scalar registers? 15582 * 15583 * Consider the following BPF code: 15584 * 1: r6 = ... unbound scalar, ID=a ... 15585 * 2: r7 = ... unbound scalar, ID=b ... 15586 * 3: if (r6 > r7) goto +1 15587 * 4: r6 = r7 15588 * 5: if (r6 > X) goto ... 15589 * 6: ... memory operation using r7 ... 15590 * 15591 * First verification path is [1-6]: 15592 * - at (4) same bpf_reg_state::id (b) would be assigned to r6 and r7; 15593 * - at (5) r6 would be marked <= X, find_equal_scalars() would also mark 15594 * r7 <= X, because r6 and r7 share same id. 15595 * Next verification path is [1-4, 6]. 15596 * 15597 * Instruction (6) would be reached in two states: 15598 * I. r6{.id=b}, r7{.id=b} via path 1-6; 15599 * II. r6{.id=a}, r7{.id=b} via path 1-4, 6. 15600 * 15601 * Use check_ids() to distinguish these states. 15602 * --- 15603 * Also verify that new value satisfies old value range knowledge. 15604 */ 15605 return range_within(rold, rcur) && 15606 tnum_in(rold->var_off, rcur->var_off) && 15607 check_scalar_ids(rold->id, rcur->id, idmap); 15608 case PTR_TO_MAP_KEY: 15609 case PTR_TO_MAP_VALUE: 15610 case PTR_TO_MEM: 15611 case PTR_TO_BUF: 15612 case PTR_TO_TP_BUFFER: 15613 /* If the new min/max/var_off satisfy the old ones and 15614 * everything else matches, we are OK. 15615 */ 15616 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, var_off)) == 0 && 15617 range_within(rold, rcur) && 15618 tnum_in(rold->var_off, rcur->var_off) && 15619 check_ids(rold->id, rcur->id, idmap) && 15620 check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap); 15621 case PTR_TO_PACKET_META: 15622 case PTR_TO_PACKET: 15623 /* We must have at least as much range as the old ptr 15624 * did, so that any accesses which were safe before are 15625 * still safe. This is true even if old range < old off, 15626 * since someone could have accessed through (ptr - k), or 15627 * even done ptr -= k in a register, to get a safe access. 15628 */ 15629 if (rold->range > rcur->range) 15630 return false; 15631 /* If the offsets don't match, we can't trust our alignment; 15632 * nor can we be sure that we won't fall out of range. 15633 */ 15634 if (rold->off != rcur->off) 15635 return false; 15636 /* id relations must be preserved */ 15637 if (!check_ids(rold->id, rcur->id, idmap)) 15638 return false; 15639 /* new val must satisfy old val knowledge */ 15640 return range_within(rold, rcur) && 15641 tnum_in(rold->var_off, rcur->var_off); 15642 case PTR_TO_STACK: 15643 /* two stack pointers are equal only if they're pointing to 15644 * the same stack frame, since fp-8 in foo != fp-8 in bar 15645 */ 15646 return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno; 15647 default: 15648 return regs_exact(rold, rcur, idmap); 15649 } 15650 } 15651 15652 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, 15653 struct bpf_func_state *cur, struct bpf_idmap *idmap) 15654 { 15655 int i, spi; 15656 15657 /* walk slots of the explored stack and ignore any additional 15658 * slots in the current stack, since explored(safe) state 15659 * didn't use them 15660 */ 15661 for (i = 0; i < old->allocated_stack; i++) { 15662 struct bpf_reg_state *old_reg, *cur_reg; 15663 15664 spi = i / BPF_REG_SIZE; 15665 15666 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) { 15667 i += BPF_REG_SIZE - 1; 15668 /* explored state didn't use this */ 15669 continue; 15670 } 15671 15672 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) 15673 continue; 15674 15675 if (env->allow_uninit_stack && 15676 old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC) 15677 continue; 15678 15679 /* explored stack has more populated slots than current stack 15680 * and these slots were used 15681 */ 15682 if (i >= cur->allocated_stack) 15683 return false; 15684 15685 /* if old state was safe with misc data in the stack 15686 * it will be safe with zero-initialized stack. 15687 * The opposite is not true 15688 */ 15689 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && 15690 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) 15691 continue; 15692 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != 15693 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) 15694 /* Ex: old explored (safe) state has STACK_SPILL in 15695 * this stack slot, but current has STACK_MISC -> 15696 * this verifier states are not equivalent, 15697 * return false to continue verification of this path 15698 */ 15699 return false; 15700 if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) 15701 continue; 15702 /* Both old and cur are having same slot_type */ 15703 switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) { 15704 case STACK_SPILL: 15705 /* when explored and current stack slot are both storing 15706 * spilled registers, check that stored pointers types 15707 * are the same as well. 15708 * Ex: explored safe path could have stored 15709 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} 15710 * but current path has stored: 15711 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} 15712 * such verifier states are not equivalent. 15713 * return false to continue verification of this path 15714 */ 15715 if (!regsafe(env, &old->stack[spi].spilled_ptr, 15716 &cur->stack[spi].spilled_ptr, idmap)) 15717 return false; 15718 break; 15719 case STACK_DYNPTR: 15720 old_reg = &old->stack[spi].spilled_ptr; 15721 cur_reg = &cur->stack[spi].spilled_ptr; 15722 if (old_reg->dynptr.type != cur_reg->dynptr.type || 15723 old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot || 15724 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) 15725 return false; 15726 break; 15727 case STACK_ITER: 15728 old_reg = &old->stack[spi].spilled_ptr; 15729 cur_reg = &cur->stack[spi].spilled_ptr; 15730 /* iter.depth is not compared between states as it 15731 * doesn't matter for correctness and would otherwise 15732 * prevent convergence; we maintain it only to prevent 15733 * infinite loop check triggering, see 15734 * iter_active_depths_differ() 15735 */ 15736 if (old_reg->iter.btf != cur_reg->iter.btf || 15737 old_reg->iter.btf_id != cur_reg->iter.btf_id || 15738 old_reg->iter.state != cur_reg->iter.state || 15739 /* ignore {old_reg,cur_reg}->iter.depth, see above */ 15740 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) 15741 return false; 15742 break; 15743 case STACK_MISC: 15744 case STACK_ZERO: 15745 case STACK_INVALID: 15746 continue; 15747 /* Ensure that new unhandled slot types return false by default */ 15748 default: 15749 return false; 15750 } 15751 } 15752 return true; 15753 } 15754 15755 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur, 15756 struct bpf_idmap *idmap) 15757 { 15758 int i; 15759 15760 if (old->acquired_refs != cur->acquired_refs) 15761 return false; 15762 15763 for (i = 0; i < old->acquired_refs; i++) { 15764 if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap)) 15765 return false; 15766 } 15767 15768 return true; 15769 } 15770 15771 /* compare two verifier states 15772 * 15773 * all states stored in state_list are known to be valid, since 15774 * verifier reached 'bpf_exit' instruction through them 15775 * 15776 * this function is called when verifier exploring different branches of 15777 * execution popped from the state stack. If it sees an old state that has 15778 * more strict register state and more strict stack state then this execution 15779 * branch doesn't need to be explored further, since verifier already 15780 * concluded that more strict state leads to valid finish. 15781 * 15782 * Therefore two states are equivalent if register state is more conservative 15783 * and explored stack state is more conservative than the current one. 15784 * Example: 15785 * explored current 15786 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 15787 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 15788 * 15789 * In other words if current stack state (one being explored) has more 15790 * valid slots than old one that already passed validation, it means 15791 * the verifier can stop exploring and conclude that current state is valid too 15792 * 15793 * Similarly with registers. If explored state has register type as invalid 15794 * whereas register type in current state is meaningful, it means that 15795 * the current state will reach 'bpf_exit' instruction safely 15796 */ 15797 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old, 15798 struct bpf_func_state *cur) 15799 { 15800 int i; 15801 15802 for (i = 0; i < MAX_BPF_REG; i++) 15803 if (!regsafe(env, &old->regs[i], &cur->regs[i], 15804 &env->idmap_scratch)) 15805 return false; 15806 15807 if (!stacksafe(env, old, cur, &env->idmap_scratch)) 15808 return false; 15809 15810 if (!refsafe(old, cur, &env->idmap_scratch)) 15811 return false; 15812 15813 return true; 15814 } 15815 15816 static bool states_equal(struct bpf_verifier_env *env, 15817 struct bpf_verifier_state *old, 15818 struct bpf_verifier_state *cur) 15819 { 15820 int i; 15821 15822 if (old->curframe != cur->curframe) 15823 return false; 15824 15825 env->idmap_scratch.tmp_id_gen = env->id_gen; 15826 memset(&env->idmap_scratch.map, 0, sizeof(env->idmap_scratch.map)); 15827 15828 /* Verification state from speculative execution simulation 15829 * must never prune a non-speculative execution one. 15830 */ 15831 if (old->speculative && !cur->speculative) 15832 return false; 15833 15834 if (old->active_lock.ptr != cur->active_lock.ptr) 15835 return false; 15836 15837 /* Old and cur active_lock's have to be either both present 15838 * or both absent. 15839 */ 15840 if (!!old->active_lock.id != !!cur->active_lock.id) 15841 return false; 15842 15843 if (old->active_lock.id && 15844 !check_ids(old->active_lock.id, cur->active_lock.id, &env->idmap_scratch)) 15845 return false; 15846 15847 if (old->active_rcu_lock != cur->active_rcu_lock) 15848 return false; 15849 15850 /* for states to be equal callsites have to be the same 15851 * and all frame states need to be equivalent 15852 */ 15853 for (i = 0; i <= old->curframe; i++) { 15854 if (old->frame[i]->callsite != cur->frame[i]->callsite) 15855 return false; 15856 if (!func_states_equal(env, old->frame[i], cur->frame[i])) 15857 return false; 15858 } 15859 return true; 15860 } 15861 15862 /* Return 0 if no propagation happened. Return negative error code if error 15863 * happened. Otherwise, return the propagated bit. 15864 */ 15865 static int propagate_liveness_reg(struct bpf_verifier_env *env, 15866 struct bpf_reg_state *reg, 15867 struct bpf_reg_state *parent_reg) 15868 { 15869 u8 parent_flag = parent_reg->live & REG_LIVE_READ; 15870 u8 flag = reg->live & REG_LIVE_READ; 15871 int err; 15872 15873 /* When comes here, read flags of PARENT_REG or REG could be any of 15874 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need 15875 * of propagation if PARENT_REG has strongest REG_LIVE_READ64. 15876 */ 15877 if (parent_flag == REG_LIVE_READ64 || 15878 /* Or if there is no read flag from REG. */ 15879 !flag || 15880 /* Or if the read flag from REG is the same as PARENT_REG. */ 15881 parent_flag == flag) 15882 return 0; 15883 15884 err = mark_reg_read(env, reg, parent_reg, flag); 15885 if (err) 15886 return err; 15887 15888 return flag; 15889 } 15890 15891 /* A write screens off any subsequent reads; but write marks come from the 15892 * straight-line code between a state and its parent. When we arrive at an 15893 * equivalent state (jump target or such) we didn't arrive by the straight-line 15894 * code, so read marks in the state must propagate to the parent regardless 15895 * of the state's write marks. That's what 'parent == state->parent' comparison 15896 * in mark_reg_read() is for. 15897 */ 15898 static int propagate_liveness(struct bpf_verifier_env *env, 15899 const struct bpf_verifier_state *vstate, 15900 struct bpf_verifier_state *vparent) 15901 { 15902 struct bpf_reg_state *state_reg, *parent_reg; 15903 struct bpf_func_state *state, *parent; 15904 int i, frame, err = 0; 15905 15906 if (vparent->curframe != vstate->curframe) { 15907 WARN(1, "propagate_live: parent frame %d current frame %d\n", 15908 vparent->curframe, vstate->curframe); 15909 return -EFAULT; 15910 } 15911 /* Propagate read liveness of registers... */ 15912 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 15913 for (frame = 0; frame <= vstate->curframe; frame++) { 15914 parent = vparent->frame[frame]; 15915 state = vstate->frame[frame]; 15916 parent_reg = parent->regs; 15917 state_reg = state->regs; 15918 /* We don't need to worry about FP liveness, it's read-only */ 15919 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { 15920 err = propagate_liveness_reg(env, &state_reg[i], 15921 &parent_reg[i]); 15922 if (err < 0) 15923 return err; 15924 if (err == REG_LIVE_READ64) 15925 mark_insn_zext(env, &parent_reg[i]); 15926 } 15927 15928 /* Propagate stack slots. */ 15929 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && 15930 i < parent->allocated_stack / BPF_REG_SIZE; i++) { 15931 parent_reg = &parent->stack[i].spilled_ptr; 15932 state_reg = &state->stack[i].spilled_ptr; 15933 err = propagate_liveness_reg(env, state_reg, 15934 parent_reg); 15935 if (err < 0) 15936 return err; 15937 } 15938 } 15939 return 0; 15940 } 15941 15942 /* find precise scalars in the previous equivalent state and 15943 * propagate them into the current state 15944 */ 15945 static int propagate_precision(struct bpf_verifier_env *env, 15946 const struct bpf_verifier_state *old) 15947 { 15948 struct bpf_reg_state *state_reg; 15949 struct bpf_func_state *state; 15950 int i, err = 0, fr; 15951 bool first; 15952 15953 for (fr = old->curframe; fr >= 0; fr--) { 15954 state = old->frame[fr]; 15955 state_reg = state->regs; 15956 first = true; 15957 for (i = 0; i < BPF_REG_FP; i++, state_reg++) { 15958 if (state_reg->type != SCALAR_VALUE || 15959 !state_reg->precise || 15960 !(state_reg->live & REG_LIVE_READ)) 15961 continue; 15962 if (env->log.level & BPF_LOG_LEVEL2) { 15963 if (first) 15964 verbose(env, "frame %d: propagating r%d", fr, i); 15965 else 15966 verbose(env, ",r%d", i); 15967 } 15968 bt_set_frame_reg(&env->bt, fr, i); 15969 first = false; 15970 } 15971 15972 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 15973 if (!is_spilled_reg(&state->stack[i])) 15974 continue; 15975 state_reg = &state->stack[i].spilled_ptr; 15976 if (state_reg->type != SCALAR_VALUE || 15977 !state_reg->precise || 15978 !(state_reg->live & REG_LIVE_READ)) 15979 continue; 15980 if (env->log.level & BPF_LOG_LEVEL2) { 15981 if (first) 15982 verbose(env, "frame %d: propagating fp%d", 15983 fr, (-i - 1) * BPF_REG_SIZE); 15984 else 15985 verbose(env, ",fp%d", (-i - 1) * BPF_REG_SIZE); 15986 } 15987 bt_set_frame_slot(&env->bt, fr, i); 15988 first = false; 15989 } 15990 if (!first) 15991 verbose(env, "\n"); 15992 } 15993 15994 err = mark_chain_precision_batch(env); 15995 if (err < 0) 15996 return err; 15997 15998 return 0; 15999 } 16000 16001 static bool states_maybe_looping(struct bpf_verifier_state *old, 16002 struct bpf_verifier_state *cur) 16003 { 16004 struct bpf_func_state *fold, *fcur; 16005 int i, fr = cur->curframe; 16006 16007 if (old->curframe != fr) 16008 return false; 16009 16010 fold = old->frame[fr]; 16011 fcur = cur->frame[fr]; 16012 for (i = 0; i < MAX_BPF_REG; i++) 16013 if (memcmp(&fold->regs[i], &fcur->regs[i], 16014 offsetof(struct bpf_reg_state, parent))) 16015 return false; 16016 return true; 16017 } 16018 16019 static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx) 16020 { 16021 return env->insn_aux_data[insn_idx].is_iter_next; 16022 } 16023 16024 /* is_state_visited() handles iter_next() (see process_iter_next_call() for 16025 * terminology) calls specially: as opposed to bounded BPF loops, it *expects* 16026 * states to match, which otherwise would look like an infinite loop. So while 16027 * iter_next() calls are taken care of, we still need to be careful and 16028 * prevent erroneous and too eager declaration of "ininite loop", when 16029 * iterators are involved. 16030 * 16031 * Here's a situation in pseudo-BPF assembly form: 16032 * 16033 * 0: again: ; set up iter_next() call args 16034 * 1: r1 = &it ; <CHECKPOINT HERE> 16035 * 2: call bpf_iter_num_next ; this is iter_next() call 16036 * 3: if r0 == 0 goto done 16037 * 4: ... something useful here ... 16038 * 5: goto again ; another iteration 16039 * 6: done: 16040 * 7: r1 = &it 16041 * 8: call bpf_iter_num_destroy ; clean up iter state 16042 * 9: exit 16043 * 16044 * This is a typical loop. Let's assume that we have a prune point at 1:, 16045 * before we get to `call bpf_iter_num_next` (e.g., because of that `goto 16046 * again`, assuming other heuristics don't get in a way). 16047 * 16048 * When we first time come to 1:, let's say we have some state X. We proceed 16049 * to 2:, fork states, enqueue ACTIVE, validate NULL case successfully, exit. 16050 * Now we come back to validate that forked ACTIVE state. We proceed through 16051 * 3-5, come to goto, jump to 1:. Let's assume our state didn't change, so we 16052 * are converging. But the problem is that we don't know that yet, as this 16053 * convergence has to happen at iter_next() call site only. So if nothing is 16054 * done, at 1: verifier will use bounded loop logic and declare infinite 16055 * looping (and would be *technically* correct, if not for iterator's 16056 * "eventual sticky NULL" contract, see process_iter_next_call()). But we 16057 * don't want that. So what we do in process_iter_next_call() when we go on 16058 * another ACTIVE iteration, we bump slot->iter.depth, to mark that it's 16059 * a different iteration. So when we suspect an infinite loop, we additionally 16060 * check if any of the *ACTIVE* iterator states depths differ. If yes, we 16061 * pretend we are not looping and wait for next iter_next() call. 16062 * 16063 * This only applies to ACTIVE state. In DRAINED state we don't expect to 16064 * loop, because that would actually mean infinite loop, as DRAINED state is 16065 * "sticky", and so we'll keep returning into the same instruction with the 16066 * same state (at least in one of possible code paths). 16067 * 16068 * This approach allows to keep infinite loop heuristic even in the face of 16069 * active iterator. E.g., C snippet below is and will be detected as 16070 * inifintely looping: 16071 * 16072 * struct bpf_iter_num it; 16073 * int *p, x; 16074 * 16075 * bpf_iter_num_new(&it, 0, 10); 16076 * while ((p = bpf_iter_num_next(&t))) { 16077 * x = p; 16078 * while (x--) {} // <<-- infinite loop here 16079 * } 16080 * 16081 */ 16082 static bool iter_active_depths_differ(struct bpf_verifier_state *old, struct bpf_verifier_state *cur) 16083 { 16084 struct bpf_reg_state *slot, *cur_slot; 16085 struct bpf_func_state *state; 16086 int i, fr; 16087 16088 for (fr = old->curframe; fr >= 0; fr--) { 16089 state = old->frame[fr]; 16090 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 16091 if (state->stack[i].slot_type[0] != STACK_ITER) 16092 continue; 16093 16094 slot = &state->stack[i].spilled_ptr; 16095 if (slot->iter.state != BPF_ITER_STATE_ACTIVE) 16096 continue; 16097 16098 cur_slot = &cur->frame[fr]->stack[i].spilled_ptr; 16099 if (cur_slot->iter.depth != slot->iter.depth) 16100 return true; 16101 } 16102 } 16103 return false; 16104 } 16105 16106 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) 16107 { 16108 struct bpf_verifier_state_list *new_sl; 16109 struct bpf_verifier_state_list *sl, **pprev; 16110 struct bpf_verifier_state *cur = env->cur_state, *new; 16111 int i, j, err, states_cnt = 0; 16112 bool force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx); 16113 bool add_new_state = force_new_state; 16114 16115 /* bpf progs typically have pruning point every 4 instructions 16116 * http://vger.kernel.org/bpfconf2019.html#session-1 16117 * Do not add new state for future pruning if the verifier hasn't seen 16118 * at least 2 jumps and at least 8 instructions. 16119 * This heuristics helps decrease 'total_states' and 'peak_states' metric. 16120 * In tests that amounts to up to 50% reduction into total verifier 16121 * memory consumption and 20% verifier time speedup. 16122 */ 16123 if (env->jmps_processed - env->prev_jmps_processed >= 2 && 16124 env->insn_processed - env->prev_insn_processed >= 8) 16125 add_new_state = true; 16126 16127 pprev = explored_state(env, insn_idx); 16128 sl = *pprev; 16129 16130 clean_live_states(env, insn_idx, cur); 16131 16132 while (sl) { 16133 states_cnt++; 16134 if (sl->state.insn_idx != insn_idx) 16135 goto next; 16136 16137 if (sl->state.branches) { 16138 struct bpf_func_state *frame = sl->state.frame[sl->state.curframe]; 16139 16140 if (frame->in_async_callback_fn && 16141 frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) { 16142 /* Different async_entry_cnt means that the verifier is 16143 * processing another entry into async callback. 16144 * Seeing the same state is not an indication of infinite 16145 * loop or infinite recursion. 16146 * But finding the same state doesn't mean that it's safe 16147 * to stop processing the current state. The previous state 16148 * hasn't yet reached bpf_exit, since state.branches > 0. 16149 * Checking in_async_callback_fn alone is not enough either. 16150 * Since the verifier still needs to catch infinite loops 16151 * inside async callbacks. 16152 */ 16153 goto skip_inf_loop_check; 16154 } 16155 /* BPF open-coded iterators loop detection is special. 16156 * states_maybe_looping() logic is too simplistic in detecting 16157 * states that *might* be equivalent, because it doesn't know 16158 * about ID remapping, so don't even perform it. 16159 * See process_iter_next_call() and iter_active_depths_differ() 16160 * for overview of the logic. When current and one of parent 16161 * states are detected as equivalent, it's a good thing: we prove 16162 * convergence and can stop simulating further iterations. 16163 * It's safe to assume that iterator loop will finish, taking into 16164 * account iter_next() contract of eventually returning 16165 * sticky NULL result. 16166 */ 16167 if (is_iter_next_insn(env, insn_idx)) { 16168 if (states_equal(env, &sl->state, cur)) { 16169 struct bpf_func_state *cur_frame; 16170 struct bpf_reg_state *iter_state, *iter_reg; 16171 int spi; 16172 16173 cur_frame = cur->frame[cur->curframe]; 16174 /* btf_check_iter_kfuncs() enforces that 16175 * iter state pointer is always the first arg 16176 */ 16177 iter_reg = &cur_frame->regs[BPF_REG_1]; 16178 /* current state is valid due to states_equal(), 16179 * so we can assume valid iter and reg state, 16180 * no need for extra (re-)validations 16181 */ 16182 spi = __get_spi(iter_reg->off + iter_reg->var_off.value); 16183 iter_state = &func(env, iter_reg)->stack[spi].spilled_ptr; 16184 if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE) 16185 goto hit; 16186 } 16187 goto skip_inf_loop_check; 16188 } 16189 /* attempt to detect infinite loop to avoid unnecessary doomed work */ 16190 if (states_maybe_looping(&sl->state, cur) && 16191 states_equal(env, &sl->state, cur) && 16192 !iter_active_depths_differ(&sl->state, cur)) { 16193 verbose_linfo(env, insn_idx, "; "); 16194 verbose(env, "infinite loop detected at insn %d\n", insn_idx); 16195 return -EINVAL; 16196 } 16197 /* if the verifier is processing a loop, avoid adding new state 16198 * too often, since different loop iterations have distinct 16199 * states and may not help future pruning. 16200 * This threshold shouldn't be too low to make sure that 16201 * a loop with large bound will be rejected quickly. 16202 * The most abusive loop will be: 16203 * r1 += 1 16204 * if r1 < 1000000 goto pc-2 16205 * 1M insn_procssed limit / 100 == 10k peak states. 16206 * This threshold shouldn't be too high either, since states 16207 * at the end of the loop are likely to be useful in pruning. 16208 */ 16209 skip_inf_loop_check: 16210 if (!force_new_state && 16211 env->jmps_processed - env->prev_jmps_processed < 20 && 16212 env->insn_processed - env->prev_insn_processed < 100) 16213 add_new_state = false; 16214 goto miss; 16215 } 16216 if (states_equal(env, &sl->state, cur)) { 16217 hit: 16218 sl->hit_cnt++; 16219 /* reached equivalent register/stack state, 16220 * prune the search. 16221 * Registers read by the continuation are read by us. 16222 * If we have any write marks in env->cur_state, they 16223 * will prevent corresponding reads in the continuation 16224 * from reaching our parent (an explored_state). Our 16225 * own state will get the read marks recorded, but 16226 * they'll be immediately forgotten as we're pruning 16227 * this state and will pop a new one. 16228 */ 16229 err = propagate_liveness(env, &sl->state, cur); 16230 16231 /* if previous state reached the exit with precision and 16232 * current state is equivalent to it (except precsion marks) 16233 * the precision needs to be propagated back in 16234 * the current state. 16235 */ 16236 err = err ? : push_jmp_history(env, cur); 16237 err = err ? : propagate_precision(env, &sl->state); 16238 if (err) 16239 return err; 16240 return 1; 16241 } 16242 miss: 16243 /* when new state is not going to be added do not increase miss count. 16244 * Otherwise several loop iterations will remove the state 16245 * recorded earlier. The goal of these heuristics is to have 16246 * states from some iterations of the loop (some in the beginning 16247 * and some at the end) to help pruning. 16248 */ 16249 if (add_new_state) 16250 sl->miss_cnt++; 16251 /* heuristic to determine whether this state is beneficial 16252 * to keep checking from state equivalence point of view. 16253 * Higher numbers increase max_states_per_insn and verification time, 16254 * but do not meaningfully decrease insn_processed. 16255 */ 16256 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) { 16257 /* the state is unlikely to be useful. Remove it to 16258 * speed up verification 16259 */ 16260 *pprev = sl->next; 16261 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) { 16262 u32 br = sl->state.branches; 16263 16264 WARN_ONCE(br, 16265 "BUG live_done but branches_to_explore %d\n", 16266 br); 16267 free_verifier_state(&sl->state, false); 16268 kfree(sl); 16269 env->peak_states--; 16270 } else { 16271 /* cannot free this state, since parentage chain may 16272 * walk it later. Add it for free_list instead to 16273 * be freed at the end of verification 16274 */ 16275 sl->next = env->free_list; 16276 env->free_list = sl; 16277 } 16278 sl = *pprev; 16279 continue; 16280 } 16281 next: 16282 pprev = &sl->next; 16283 sl = *pprev; 16284 } 16285 16286 if (env->max_states_per_insn < states_cnt) 16287 env->max_states_per_insn = states_cnt; 16288 16289 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) 16290 return 0; 16291 16292 if (!add_new_state) 16293 return 0; 16294 16295 /* There were no equivalent states, remember the current one. 16296 * Technically the current state is not proven to be safe yet, 16297 * but it will either reach outer most bpf_exit (which means it's safe) 16298 * or it will be rejected. When there are no loops the verifier won't be 16299 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) 16300 * again on the way to bpf_exit. 16301 * When looping the sl->state.branches will be > 0 and this state 16302 * will not be considered for equivalence until branches == 0. 16303 */ 16304 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); 16305 if (!new_sl) 16306 return -ENOMEM; 16307 env->total_states++; 16308 env->peak_states++; 16309 env->prev_jmps_processed = env->jmps_processed; 16310 env->prev_insn_processed = env->insn_processed; 16311 16312 /* forget precise markings we inherited, see __mark_chain_precision */ 16313 if (env->bpf_capable) 16314 mark_all_scalars_imprecise(env, cur); 16315 16316 /* add new state to the head of linked list */ 16317 new = &new_sl->state; 16318 err = copy_verifier_state(new, cur); 16319 if (err) { 16320 free_verifier_state(new, false); 16321 kfree(new_sl); 16322 return err; 16323 } 16324 new->insn_idx = insn_idx; 16325 WARN_ONCE(new->branches != 1, 16326 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); 16327 16328 cur->parent = new; 16329 cur->first_insn_idx = insn_idx; 16330 clear_jmp_history(cur); 16331 new_sl->next = *explored_state(env, insn_idx); 16332 *explored_state(env, insn_idx) = new_sl; 16333 /* connect new state to parentage chain. Current frame needs all 16334 * registers connected. Only r6 - r9 of the callers are alive (pushed 16335 * to the stack implicitly by JITs) so in callers' frames connect just 16336 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to 16337 * the state of the call instruction (with WRITTEN set), and r0 comes 16338 * from callee with its full parentage chain, anyway. 16339 */ 16340 /* clear write marks in current state: the writes we did are not writes 16341 * our child did, so they don't screen off its reads from us. 16342 * (There are no read marks in current state, because reads always mark 16343 * their parent and current state never has children yet. Only 16344 * explored_states can get read marks.) 16345 */ 16346 for (j = 0; j <= cur->curframe; j++) { 16347 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) 16348 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; 16349 for (i = 0; i < BPF_REG_FP; i++) 16350 cur->frame[j]->regs[i].live = REG_LIVE_NONE; 16351 } 16352 16353 /* all stack frames are accessible from callee, clear them all */ 16354 for (j = 0; j <= cur->curframe; j++) { 16355 struct bpf_func_state *frame = cur->frame[j]; 16356 struct bpf_func_state *newframe = new->frame[j]; 16357 16358 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { 16359 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; 16360 frame->stack[i].spilled_ptr.parent = 16361 &newframe->stack[i].spilled_ptr; 16362 } 16363 } 16364 return 0; 16365 } 16366 16367 /* Return true if it's OK to have the same insn return a different type. */ 16368 static bool reg_type_mismatch_ok(enum bpf_reg_type type) 16369 { 16370 switch (base_type(type)) { 16371 case PTR_TO_CTX: 16372 case PTR_TO_SOCKET: 16373 case PTR_TO_SOCK_COMMON: 16374 case PTR_TO_TCP_SOCK: 16375 case PTR_TO_XDP_SOCK: 16376 case PTR_TO_BTF_ID: 16377 return false; 16378 default: 16379 return true; 16380 } 16381 } 16382 16383 /* If an instruction was previously used with particular pointer types, then we 16384 * need to be careful to avoid cases such as the below, where it may be ok 16385 * for one branch accessing the pointer, but not ok for the other branch: 16386 * 16387 * R1 = sock_ptr 16388 * goto X; 16389 * ... 16390 * R1 = some_other_valid_ptr; 16391 * goto X; 16392 * ... 16393 * R2 = *(u32 *)(R1 + 0); 16394 */ 16395 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) 16396 { 16397 return src != prev && (!reg_type_mismatch_ok(src) || 16398 !reg_type_mismatch_ok(prev)); 16399 } 16400 16401 static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type, 16402 bool allow_trust_missmatch) 16403 { 16404 enum bpf_reg_type *prev_type = &env->insn_aux_data[env->insn_idx].ptr_type; 16405 16406 if (*prev_type == NOT_INIT) { 16407 /* Saw a valid insn 16408 * dst_reg = *(u32 *)(src_reg + off) 16409 * save type to validate intersecting paths 16410 */ 16411 *prev_type = type; 16412 } else if (reg_type_mismatch(type, *prev_type)) { 16413 /* Abuser program is trying to use the same insn 16414 * dst_reg = *(u32*) (src_reg + off) 16415 * with different pointer types: 16416 * src_reg == ctx in one branch and 16417 * src_reg == stack|map in some other branch. 16418 * Reject it. 16419 */ 16420 if (allow_trust_missmatch && 16421 base_type(type) == PTR_TO_BTF_ID && 16422 base_type(*prev_type) == PTR_TO_BTF_ID) { 16423 /* 16424 * Have to support a use case when one path through 16425 * the program yields TRUSTED pointer while another 16426 * is UNTRUSTED. Fallback to UNTRUSTED to generate 16427 * BPF_PROBE_MEM/BPF_PROBE_MEMSX. 16428 */ 16429 *prev_type = PTR_TO_BTF_ID | PTR_UNTRUSTED; 16430 } else { 16431 verbose(env, "same insn cannot be used with different pointers\n"); 16432 return -EINVAL; 16433 } 16434 } 16435 16436 return 0; 16437 } 16438 16439 static int do_check(struct bpf_verifier_env *env) 16440 { 16441 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); 16442 struct bpf_verifier_state *state = env->cur_state; 16443 struct bpf_insn *insns = env->prog->insnsi; 16444 struct bpf_reg_state *regs; 16445 int insn_cnt = env->prog->len; 16446 bool do_print_state = false; 16447 int prev_insn_idx = -1; 16448 16449 for (;;) { 16450 struct bpf_insn *insn; 16451 u8 class; 16452 int err; 16453 16454 env->prev_insn_idx = prev_insn_idx; 16455 if (env->insn_idx >= insn_cnt) { 16456 verbose(env, "invalid insn idx %d insn_cnt %d\n", 16457 env->insn_idx, insn_cnt); 16458 return -EFAULT; 16459 } 16460 16461 insn = &insns[env->insn_idx]; 16462 class = BPF_CLASS(insn->code); 16463 16464 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 16465 verbose(env, 16466 "BPF program is too large. Processed %d insn\n", 16467 env->insn_processed); 16468 return -E2BIG; 16469 } 16470 16471 state->last_insn_idx = env->prev_insn_idx; 16472 16473 if (is_prune_point(env, env->insn_idx)) { 16474 err = is_state_visited(env, env->insn_idx); 16475 if (err < 0) 16476 return err; 16477 if (err == 1) { 16478 /* found equivalent state, can prune the search */ 16479 if (env->log.level & BPF_LOG_LEVEL) { 16480 if (do_print_state) 16481 verbose(env, "\nfrom %d to %d%s: safe\n", 16482 env->prev_insn_idx, env->insn_idx, 16483 env->cur_state->speculative ? 16484 " (speculative execution)" : ""); 16485 else 16486 verbose(env, "%d: safe\n", env->insn_idx); 16487 } 16488 goto process_bpf_exit; 16489 } 16490 } 16491 16492 if (is_jmp_point(env, env->insn_idx)) { 16493 err = push_jmp_history(env, state); 16494 if (err) 16495 return err; 16496 } 16497 16498 if (signal_pending(current)) 16499 return -EAGAIN; 16500 16501 if (need_resched()) 16502 cond_resched(); 16503 16504 if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) { 16505 verbose(env, "\nfrom %d to %d%s:", 16506 env->prev_insn_idx, env->insn_idx, 16507 env->cur_state->speculative ? 16508 " (speculative execution)" : ""); 16509 print_verifier_state(env, state->frame[state->curframe], true); 16510 do_print_state = false; 16511 } 16512 16513 if (env->log.level & BPF_LOG_LEVEL) { 16514 const struct bpf_insn_cbs cbs = { 16515 .cb_call = disasm_kfunc_name, 16516 .cb_print = verbose, 16517 .private_data = env, 16518 }; 16519 16520 if (verifier_state_scratched(env)) 16521 print_insn_state(env, state->frame[state->curframe]); 16522 16523 verbose_linfo(env, env->insn_idx, "; "); 16524 env->prev_log_pos = env->log.end_pos; 16525 verbose(env, "%d: ", env->insn_idx); 16526 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 16527 env->prev_insn_print_pos = env->log.end_pos - env->prev_log_pos; 16528 env->prev_log_pos = env->log.end_pos; 16529 } 16530 16531 if (bpf_prog_is_offloaded(env->prog->aux)) { 16532 err = bpf_prog_offload_verify_insn(env, env->insn_idx, 16533 env->prev_insn_idx); 16534 if (err) 16535 return err; 16536 } 16537 16538 regs = cur_regs(env); 16539 sanitize_mark_insn_seen(env); 16540 prev_insn_idx = env->insn_idx; 16541 16542 if (class == BPF_ALU || class == BPF_ALU64) { 16543 err = check_alu_op(env, insn); 16544 if (err) 16545 return err; 16546 16547 } else if (class == BPF_LDX) { 16548 enum bpf_reg_type src_reg_type; 16549 16550 /* check for reserved fields is already done */ 16551 16552 /* check src operand */ 16553 err = check_reg_arg(env, insn->src_reg, SRC_OP); 16554 if (err) 16555 return err; 16556 16557 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 16558 if (err) 16559 return err; 16560 16561 src_reg_type = regs[insn->src_reg].type; 16562 16563 /* check that memory (src_reg + off) is readable, 16564 * the state of dst_reg will be updated by this func 16565 */ 16566 err = check_mem_access(env, env->insn_idx, insn->src_reg, 16567 insn->off, BPF_SIZE(insn->code), 16568 BPF_READ, insn->dst_reg, false, 16569 BPF_MODE(insn->code) == BPF_MEMSX); 16570 if (err) 16571 return err; 16572 16573 err = save_aux_ptr_type(env, src_reg_type, true); 16574 if (err) 16575 return err; 16576 } else if (class == BPF_STX) { 16577 enum bpf_reg_type dst_reg_type; 16578 16579 if (BPF_MODE(insn->code) == BPF_ATOMIC) { 16580 err = check_atomic(env, env->insn_idx, insn); 16581 if (err) 16582 return err; 16583 env->insn_idx++; 16584 continue; 16585 } 16586 16587 if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) { 16588 verbose(env, "BPF_STX uses reserved fields\n"); 16589 return -EINVAL; 16590 } 16591 16592 /* check src1 operand */ 16593 err = check_reg_arg(env, insn->src_reg, SRC_OP); 16594 if (err) 16595 return err; 16596 /* check src2 operand */ 16597 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 16598 if (err) 16599 return err; 16600 16601 dst_reg_type = regs[insn->dst_reg].type; 16602 16603 /* check that memory (dst_reg + off) is writeable */ 16604 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 16605 insn->off, BPF_SIZE(insn->code), 16606 BPF_WRITE, insn->src_reg, false, false); 16607 if (err) 16608 return err; 16609 16610 err = save_aux_ptr_type(env, dst_reg_type, false); 16611 if (err) 16612 return err; 16613 } else if (class == BPF_ST) { 16614 enum bpf_reg_type dst_reg_type; 16615 16616 if (BPF_MODE(insn->code) != BPF_MEM || 16617 insn->src_reg != BPF_REG_0) { 16618 verbose(env, "BPF_ST uses reserved fields\n"); 16619 return -EINVAL; 16620 } 16621 /* check src operand */ 16622 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 16623 if (err) 16624 return err; 16625 16626 dst_reg_type = regs[insn->dst_reg].type; 16627 16628 /* check that memory (dst_reg + off) is writeable */ 16629 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 16630 insn->off, BPF_SIZE(insn->code), 16631 BPF_WRITE, -1, false, false); 16632 if (err) 16633 return err; 16634 16635 err = save_aux_ptr_type(env, dst_reg_type, false); 16636 if (err) 16637 return err; 16638 } else if (class == BPF_JMP || class == BPF_JMP32) { 16639 u8 opcode = BPF_OP(insn->code); 16640 16641 env->jmps_processed++; 16642 if (opcode == BPF_CALL) { 16643 if (BPF_SRC(insn->code) != BPF_K || 16644 (insn->src_reg != BPF_PSEUDO_KFUNC_CALL 16645 && insn->off != 0) || 16646 (insn->src_reg != BPF_REG_0 && 16647 insn->src_reg != BPF_PSEUDO_CALL && 16648 insn->src_reg != BPF_PSEUDO_KFUNC_CALL) || 16649 insn->dst_reg != BPF_REG_0 || 16650 class == BPF_JMP32) { 16651 verbose(env, "BPF_CALL uses reserved fields\n"); 16652 return -EINVAL; 16653 } 16654 16655 if (env->cur_state->active_lock.ptr) { 16656 if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) || 16657 (insn->src_reg == BPF_PSEUDO_CALL) || 16658 (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && 16659 (insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) { 16660 verbose(env, "function calls are not allowed while holding a lock\n"); 16661 return -EINVAL; 16662 } 16663 } 16664 if (insn->src_reg == BPF_PSEUDO_CALL) 16665 err = check_func_call(env, insn, &env->insn_idx); 16666 else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) 16667 err = check_kfunc_call(env, insn, &env->insn_idx); 16668 else 16669 err = check_helper_call(env, insn, &env->insn_idx); 16670 if (err) 16671 return err; 16672 16673 mark_reg_scratched(env, BPF_REG_0); 16674 } else if (opcode == BPF_JA) { 16675 if (BPF_SRC(insn->code) != BPF_K || 16676 insn->src_reg != BPF_REG_0 || 16677 insn->dst_reg != BPF_REG_0 || 16678 (class == BPF_JMP && insn->imm != 0) || 16679 (class == BPF_JMP32 && insn->off != 0)) { 16680 verbose(env, "BPF_JA uses reserved fields\n"); 16681 return -EINVAL; 16682 } 16683 16684 if (class == BPF_JMP) 16685 env->insn_idx += insn->off + 1; 16686 else 16687 env->insn_idx += insn->imm + 1; 16688 continue; 16689 16690 } else if (opcode == BPF_EXIT) { 16691 if (BPF_SRC(insn->code) != BPF_K || 16692 insn->imm != 0 || 16693 insn->src_reg != BPF_REG_0 || 16694 insn->dst_reg != BPF_REG_0 || 16695 class == BPF_JMP32) { 16696 verbose(env, "BPF_EXIT uses reserved fields\n"); 16697 return -EINVAL; 16698 } 16699 16700 if (env->cur_state->active_lock.ptr && 16701 !in_rbtree_lock_required_cb(env)) { 16702 verbose(env, "bpf_spin_unlock is missing\n"); 16703 return -EINVAL; 16704 } 16705 16706 if (env->cur_state->active_rcu_lock && 16707 !in_rbtree_lock_required_cb(env)) { 16708 verbose(env, "bpf_rcu_read_unlock is missing\n"); 16709 return -EINVAL; 16710 } 16711 16712 /* We must do check_reference_leak here before 16713 * prepare_func_exit to handle the case when 16714 * state->curframe > 0, it may be a callback 16715 * function, for which reference_state must 16716 * match caller reference state when it exits. 16717 */ 16718 err = check_reference_leak(env); 16719 if (err) 16720 return err; 16721 16722 if (state->curframe) { 16723 /* exit from nested function */ 16724 err = prepare_func_exit(env, &env->insn_idx); 16725 if (err) 16726 return err; 16727 do_print_state = true; 16728 continue; 16729 } 16730 16731 err = check_return_code(env); 16732 if (err) 16733 return err; 16734 process_bpf_exit: 16735 mark_verifier_state_scratched(env); 16736 update_branch_counts(env, env->cur_state); 16737 err = pop_stack(env, &prev_insn_idx, 16738 &env->insn_idx, pop_log); 16739 if (err < 0) { 16740 if (err != -ENOENT) 16741 return err; 16742 break; 16743 } else { 16744 do_print_state = true; 16745 continue; 16746 } 16747 } else { 16748 err = check_cond_jmp_op(env, insn, &env->insn_idx); 16749 if (err) 16750 return err; 16751 } 16752 } else if (class == BPF_LD) { 16753 u8 mode = BPF_MODE(insn->code); 16754 16755 if (mode == BPF_ABS || mode == BPF_IND) { 16756 err = check_ld_abs(env, insn); 16757 if (err) 16758 return err; 16759 16760 } else if (mode == BPF_IMM) { 16761 err = check_ld_imm(env, insn); 16762 if (err) 16763 return err; 16764 16765 env->insn_idx++; 16766 sanitize_mark_insn_seen(env); 16767 } else { 16768 verbose(env, "invalid BPF_LD mode\n"); 16769 return -EINVAL; 16770 } 16771 } else { 16772 verbose(env, "unknown insn class %d\n", class); 16773 return -EINVAL; 16774 } 16775 16776 env->insn_idx++; 16777 } 16778 16779 return 0; 16780 } 16781 16782 static int find_btf_percpu_datasec(struct btf *btf) 16783 { 16784 const struct btf_type *t; 16785 const char *tname; 16786 int i, n; 16787 16788 /* 16789 * Both vmlinux and module each have their own ".data..percpu" 16790 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF 16791 * types to look at only module's own BTF types. 16792 */ 16793 n = btf_nr_types(btf); 16794 if (btf_is_module(btf)) 16795 i = btf_nr_types(btf_vmlinux); 16796 else 16797 i = 1; 16798 16799 for(; i < n; i++) { 16800 t = btf_type_by_id(btf, i); 16801 if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC) 16802 continue; 16803 16804 tname = btf_name_by_offset(btf, t->name_off); 16805 if (!strcmp(tname, ".data..percpu")) 16806 return i; 16807 } 16808 16809 return -ENOENT; 16810 } 16811 16812 /* replace pseudo btf_id with kernel symbol address */ 16813 static int check_pseudo_btf_id(struct bpf_verifier_env *env, 16814 struct bpf_insn *insn, 16815 struct bpf_insn_aux_data *aux) 16816 { 16817 const struct btf_var_secinfo *vsi; 16818 const struct btf_type *datasec; 16819 struct btf_mod_pair *btf_mod; 16820 const struct btf_type *t; 16821 const char *sym_name; 16822 bool percpu = false; 16823 u32 type, id = insn->imm; 16824 struct btf *btf; 16825 s32 datasec_id; 16826 u64 addr; 16827 int i, btf_fd, err; 16828 16829 btf_fd = insn[1].imm; 16830 if (btf_fd) { 16831 btf = btf_get_by_fd(btf_fd); 16832 if (IS_ERR(btf)) { 16833 verbose(env, "invalid module BTF object FD specified.\n"); 16834 return -EINVAL; 16835 } 16836 } else { 16837 if (!btf_vmlinux) { 16838 verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n"); 16839 return -EINVAL; 16840 } 16841 btf = btf_vmlinux; 16842 btf_get(btf); 16843 } 16844 16845 t = btf_type_by_id(btf, id); 16846 if (!t) { 16847 verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id); 16848 err = -ENOENT; 16849 goto err_put; 16850 } 16851 16852 if (!btf_type_is_var(t) && !btf_type_is_func(t)) { 16853 verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR or KIND_FUNC\n", id); 16854 err = -EINVAL; 16855 goto err_put; 16856 } 16857 16858 sym_name = btf_name_by_offset(btf, t->name_off); 16859 addr = kallsyms_lookup_name(sym_name); 16860 if (!addr) { 16861 verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n", 16862 sym_name); 16863 err = -ENOENT; 16864 goto err_put; 16865 } 16866 insn[0].imm = (u32)addr; 16867 insn[1].imm = addr >> 32; 16868 16869 if (btf_type_is_func(t)) { 16870 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; 16871 aux->btf_var.mem_size = 0; 16872 goto check_btf; 16873 } 16874 16875 datasec_id = find_btf_percpu_datasec(btf); 16876 if (datasec_id > 0) { 16877 datasec = btf_type_by_id(btf, datasec_id); 16878 for_each_vsi(i, datasec, vsi) { 16879 if (vsi->type == id) { 16880 percpu = true; 16881 break; 16882 } 16883 } 16884 } 16885 16886 type = t->type; 16887 t = btf_type_skip_modifiers(btf, type, NULL); 16888 if (percpu) { 16889 aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU; 16890 aux->btf_var.btf = btf; 16891 aux->btf_var.btf_id = type; 16892 } else if (!btf_type_is_struct(t)) { 16893 const struct btf_type *ret; 16894 const char *tname; 16895 u32 tsize; 16896 16897 /* resolve the type size of ksym. */ 16898 ret = btf_resolve_size(btf, t, &tsize); 16899 if (IS_ERR(ret)) { 16900 tname = btf_name_by_offset(btf, t->name_off); 16901 verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n", 16902 tname, PTR_ERR(ret)); 16903 err = -EINVAL; 16904 goto err_put; 16905 } 16906 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; 16907 aux->btf_var.mem_size = tsize; 16908 } else { 16909 aux->btf_var.reg_type = PTR_TO_BTF_ID; 16910 aux->btf_var.btf = btf; 16911 aux->btf_var.btf_id = type; 16912 } 16913 check_btf: 16914 /* check whether we recorded this BTF (and maybe module) already */ 16915 for (i = 0; i < env->used_btf_cnt; i++) { 16916 if (env->used_btfs[i].btf == btf) { 16917 btf_put(btf); 16918 return 0; 16919 } 16920 } 16921 16922 if (env->used_btf_cnt >= MAX_USED_BTFS) { 16923 err = -E2BIG; 16924 goto err_put; 16925 } 16926 16927 btf_mod = &env->used_btfs[env->used_btf_cnt]; 16928 btf_mod->btf = btf; 16929 btf_mod->module = NULL; 16930 16931 /* if we reference variables from kernel module, bump its refcount */ 16932 if (btf_is_module(btf)) { 16933 btf_mod->module = btf_try_get_module(btf); 16934 if (!btf_mod->module) { 16935 err = -ENXIO; 16936 goto err_put; 16937 } 16938 } 16939 16940 env->used_btf_cnt++; 16941 16942 return 0; 16943 err_put: 16944 btf_put(btf); 16945 return err; 16946 } 16947 16948 static bool is_tracing_prog_type(enum bpf_prog_type type) 16949 { 16950 switch (type) { 16951 case BPF_PROG_TYPE_KPROBE: 16952 case BPF_PROG_TYPE_TRACEPOINT: 16953 case BPF_PROG_TYPE_PERF_EVENT: 16954 case BPF_PROG_TYPE_RAW_TRACEPOINT: 16955 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 16956 return true; 16957 default: 16958 return false; 16959 } 16960 } 16961 16962 static int check_map_prog_compatibility(struct bpf_verifier_env *env, 16963 struct bpf_map *map, 16964 struct bpf_prog *prog) 16965 16966 { 16967 enum bpf_prog_type prog_type = resolve_prog_type(prog); 16968 16969 if (btf_record_has_field(map->record, BPF_LIST_HEAD) || 16970 btf_record_has_field(map->record, BPF_RB_ROOT)) { 16971 if (is_tracing_prog_type(prog_type)) { 16972 verbose(env, "tracing progs cannot use bpf_{list_head,rb_root} yet\n"); 16973 return -EINVAL; 16974 } 16975 } 16976 16977 if (btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 16978 if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) { 16979 verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n"); 16980 return -EINVAL; 16981 } 16982 16983 if (is_tracing_prog_type(prog_type)) { 16984 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); 16985 return -EINVAL; 16986 } 16987 } 16988 16989 if (btf_record_has_field(map->record, BPF_TIMER)) { 16990 if (is_tracing_prog_type(prog_type)) { 16991 verbose(env, "tracing progs cannot use bpf_timer yet\n"); 16992 return -EINVAL; 16993 } 16994 } 16995 16996 if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) && 16997 !bpf_offload_prog_map_match(prog, map)) { 16998 verbose(env, "offload device mismatch between prog and map\n"); 16999 return -EINVAL; 17000 } 17001 17002 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 17003 verbose(env, "bpf_struct_ops map cannot be used in prog\n"); 17004 return -EINVAL; 17005 } 17006 17007 if (prog->aux->sleepable) 17008 switch (map->map_type) { 17009 case BPF_MAP_TYPE_HASH: 17010 case BPF_MAP_TYPE_LRU_HASH: 17011 case BPF_MAP_TYPE_ARRAY: 17012 case BPF_MAP_TYPE_PERCPU_HASH: 17013 case BPF_MAP_TYPE_PERCPU_ARRAY: 17014 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 17015 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 17016 case BPF_MAP_TYPE_HASH_OF_MAPS: 17017 case BPF_MAP_TYPE_RINGBUF: 17018 case BPF_MAP_TYPE_USER_RINGBUF: 17019 case BPF_MAP_TYPE_INODE_STORAGE: 17020 case BPF_MAP_TYPE_SK_STORAGE: 17021 case BPF_MAP_TYPE_TASK_STORAGE: 17022 case BPF_MAP_TYPE_CGRP_STORAGE: 17023 break; 17024 default: 17025 verbose(env, 17026 "Sleepable programs can only use array, hash, ringbuf and local storage maps\n"); 17027 return -EINVAL; 17028 } 17029 17030 return 0; 17031 } 17032 17033 static bool bpf_map_is_cgroup_storage(struct bpf_map *map) 17034 { 17035 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || 17036 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); 17037 } 17038 17039 /* find and rewrite pseudo imm in ld_imm64 instructions: 17040 * 17041 * 1. if it accesses map FD, replace it with actual map pointer. 17042 * 2. if it accesses btf_id of a VAR, replace it with pointer to the var. 17043 * 17044 * NOTE: btf_vmlinux is required for converting pseudo btf_id. 17045 */ 17046 static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) 17047 { 17048 struct bpf_insn *insn = env->prog->insnsi; 17049 int insn_cnt = env->prog->len; 17050 int i, j, err; 17051 17052 err = bpf_prog_calc_tag(env->prog); 17053 if (err) 17054 return err; 17055 17056 for (i = 0; i < insn_cnt; i++, insn++) { 17057 if (BPF_CLASS(insn->code) == BPF_LDX && 17058 ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) || 17059 insn->imm != 0)) { 17060 verbose(env, "BPF_LDX uses reserved fields\n"); 17061 return -EINVAL; 17062 } 17063 17064 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 17065 struct bpf_insn_aux_data *aux; 17066 struct bpf_map *map; 17067 struct fd f; 17068 u64 addr; 17069 u32 fd; 17070 17071 if (i == insn_cnt - 1 || insn[1].code != 0 || 17072 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 17073 insn[1].off != 0) { 17074 verbose(env, "invalid bpf_ld_imm64 insn\n"); 17075 return -EINVAL; 17076 } 17077 17078 if (insn[0].src_reg == 0) 17079 /* valid generic load 64-bit imm */ 17080 goto next_insn; 17081 17082 if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) { 17083 aux = &env->insn_aux_data[i]; 17084 err = check_pseudo_btf_id(env, insn, aux); 17085 if (err) 17086 return err; 17087 goto next_insn; 17088 } 17089 17090 if (insn[0].src_reg == BPF_PSEUDO_FUNC) { 17091 aux = &env->insn_aux_data[i]; 17092 aux->ptr_type = PTR_TO_FUNC; 17093 goto next_insn; 17094 } 17095 17096 /* In final convert_pseudo_ld_imm64() step, this is 17097 * converted into regular 64-bit imm load insn. 17098 */ 17099 switch (insn[0].src_reg) { 17100 case BPF_PSEUDO_MAP_VALUE: 17101 case BPF_PSEUDO_MAP_IDX_VALUE: 17102 break; 17103 case BPF_PSEUDO_MAP_FD: 17104 case BPF_PSEUDO_MAP_IDX: 17105 if (insn[1].imm == 0) 17106 break; 17107 fallthrough; 17108 default: 17109 verbose(env, "unrecognized bpf_ld_imm64 insn\n"); 17110 return -EINVAL; 17111 } 17112 17113 switch (insn[0].src_reg) { 17114 case BPF_PSEUDO_MAP_IDX_VALUE: 17115 case BPF_PSEUDO_MAP_IDX: 17116 if (bpfptr_is_null(env->fd_array)) { 17117 verbose(env, "fd_idx without fd_array is invalid\n"); 17118 return -EPROTO; 17119 } 17120 if (copy_from_bpfptr_offset(&fd, env->fd_array, 17121 insn[0].imm * sizeof(fd), 17122 sizeof(fd))) 17123 return -EFAULT; 17124 break; 17125 default: 17126 fd = insn[0].imm; 17127 break; 17128 } 17129 17130 f = fdget(fd); 17131 map = __bpf_map_get(f); 17132 if (IS_ERR(map)) { 17133 verbose(env, "fd %d is not pointing to valid bpf_map\n", 17134 insn[0].imm); 17135 return PTR_ERR(map); 17136 } 17137 17138 err = check_map_prog_compatibility(env, map, env->prog); 17139 if (err) { 17140 fdput(f); 17141 return err; 17142 } 17143 17144 aux = &env->insn_aux_data[i]; 17145 if (insn[0].src_reg == BPF_PSEUDO_MAP_FD || 17146 insn[0].src_reg == BPF_PSEUDO_MAP_IDX) { 17147 addr = (unsigned long)map; 17148 } else { 17149 u32 off = insn[1].imm; 17150 17151 if (off >= BPF_MAX_VAR_OFF) { 17152 verbose(env, "direct value offset of %u is not allowed\n", off); 17153 fdput(f); 17154 return -EINVAL; 17155 } 17156 17157 if (!map->ops->map_direct_value_addr) { 17158 verbose(env, "no direct value access support for this map type\n"); 17159 fdput(f); 17160 return -EINVAL; 17161 } 17162 17163 err = map->ops->map_direct_value_addr(map, &addr, off); 17164 if (err) { 17165 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n", 17166 map->value_size, off); 17167 fdput(f); 17168 return err; 17169 } 17170 17171 aux->map_off = off; 17172 addr += off; 17173 } 17174 17175 insn[0].imm = (u32)addr; 17176 insn[1].imm = addr >> 32; 17177 17178 /* check whether we recorded this map already */ 17179 for (j = 0; j < env->used_map_cnt; j++) { 17180 if (env->used_maps[j] == map) { 17181 aux->map_index = j; 17182 fdput(f); 17183 goto next_insn; 17184 } 17185 } 17186 17187 if (env->used_map_cnt >= MAX_USED_MAPS) { 17188 fdput(f); 17189 return -E2BIG; 17190 } 17191 17192 /* hold the map. If the program is rejected by verifier, 17193 * the map will be released by release_maps() or it 17194 * will be used by the valid program until it's unloaded 17195 * and all maps are released in free_used_maps() 17196 */ 17197 bpf_map_inc(map); 17198 17199 aux->map_index = env->used_map_cnt; 17200 env->used_maps[env->used_map_cnt++] = map; 17201 17202 if (bpf_map_is_cgroup_storage(map) && 17203 bpf_cgroup_storage_assign(env->prog->aux, map)) { 17204 verbose(env, "only one cgroup storage of each type is allowed\n"); 17205 fdput(f); 17206 return -EBUSY; 17207 } 17208 17209 fdput(f); 17210 next_insn: 17211 insn++; 17212 i++; 17213 continue; 17214 } 17215 17216 /* Basic sanity check before we invest more work here. */ 17217 if (!bpf_opcode_in_insntable(insn->code)) { 17218 verbose(env, "unknown opcode %02x\n", insn->code); 17219 return -EINVAL; 17220 } 17221 } 17222 17223 /* now all pseudo BPF_LD_IMM64 instructions load valid 17224 * 'struct bpf_map *' into a register instead of user map_fd. 17225 * These pointers will be used later by verifier to validate map access. 17226 */ 17227 return 0; 17228 } 17229 17230 /* drop refcnt of maps used by the rejected program */ 17231 static void release_maps(struct bpf_verifier_env *env) 17232 { 17233 __bpf_free_used_maps(env->prog->aux, env->used_maps, 17234 env->used_map_cnt); 17235 } 17236 17237 /* drop refcnt of maps used by the rejected program */ 17238 static void release_btfs(struct bpf_verifier_env *env) 17239 { 17240 __bpf_free_used_btfs(env->prog->aux, env->used_btfs, 17241 env->used_btf_cnt); 17242 } 17243 17244 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 17245 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) 17246 { 17247 struct bpf_insn *insn = env->prog->insnsi; 17248 int insn_cnt = env->prog->len; 17249 int i; 17250 17251 for (i = 0; i < insn_cnt; i++, insn++) { 17252 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) 17253 continue; 17254 if (insn->src_reg == BPF_PSEUDO_FUNC) 17255 continue; 17256 insn->src_reg = 0; 17257 } 17258 } 17259 17260 /* single env->prog->insni[off] instruction was replaced with the range 17261 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying 17262 * [0, off) and [off, end) to new locations, so the patched range stays zero 17263 */ 17264 static void adjust_insn_aux_data(struct bpf_verifier_env *env, 17265 struct bpf_insn_aux_data *new_data, 17266 struct bpf_prog *new_prog, u32 off, u32 cnt) 17267 { 17268 struct bpf_insn_aux_data *old_data = env->insn_aux_data; 17269 struct bpf_insn *insn = new_prog->insnsi; 17270 u32 old_seen = old_data[off].seen; 17271 u32 prog_len; 17272 int i; 17273 17274 /* aux info at OFF always needs adjustment, no matter fast path 17275 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the 17276 * original insn at old prog. 17277 */ 17278 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); 17279 17280 if (cnt == 1) 17281 return; 17282 prog_len = new_prog->len; 17283 17284 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 17285 memcpy(new_data + off + cnt - 1, old_data + off, 17286 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 17287 for (i = off; i < off + cnt - 1; i++) { 17288 /* Expand insni[off]'s seen count to the patched range. */ 17289 new_data[i].seen = old_seen; 17290 new_data[i].zext_dst = insn_has_def32(env, insn + i); 17291 } 17292 env->insn_aux_data = new_data; 17293 vfree(old_data); 17294 } 17295 17296 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) 17297 { 17298 int i; 17299 17300 if (len == 1) 17301 return; 17302 /* NOTE: fake 'exit' subprog should be updated as well. */ 17303 for (i = 0; i <= env->subprog_cnt; i++) { 17304 if (env->subprog_info[i].start <= off) 17305 continue; 17306 env->subprog_info[i].start += len - 1; 17307 } 17308 } 17309 17310 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len) 17311 { 17312 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; 17313 int i, sz = prog->aux->size_poke_tab; 17314 struct bpf_jit_poke_descriptor *desc; 17315 17316 for (i = 0; i < sz; i++) { 17317 desc = &tab[i]; 17318 if (desc->insn_idx <= off) 17319 continue; 17320 desc->insn_idx += len - 1; 17321 } 17322 } 17323 17324 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 17325 const struct bpf_insn *patch, u32 len) 17326 { 17327 struct bpf_prog *new_prog; 17328 struct bpf_insn_aux_data *new_data = NULL; 17329 17330 if (len > 1) { 17331 new_data = vzalloc(array_size(env->prog->len + len - 1, 17332 sizeof(struct bpf_insn_aux_data))); 17333 if (!new_data) 17334 return NULL; 17335 } 17336 17337 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); 17338 if (IS_ERR(new_prog)) { 17339 if (PTR_ERR(new_prog) == -ERANGE) 17340 verbose(env, 17341 "insn %d cannot be patched due to 16-bit range\n", 17342 env->insn_aux_data[off].orig_idx); 17343 vfree(new_data); 17344 return NULL; 17345 } 17346 adjust_insn_aux_data(env, new_data, new_prog, off, len); 17347 adjust_subprog_starts(env, off, len); 17348 adjust_poke_descs(new_prog, off, len); 17349 return new_prog; 17350 } 17351 17352 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env, 17353 u32 off, u32 cnt) 17354 { 17355 int i, j; 17356 17357 /* find first prog starting at or after off (first to remove) */ 17358 for (i = 0; i < env->subprog_cnt; i++) 17359 if (env->subprog_info[i].start >= off) 17360 break; 17361 /* find first prog starting at or after off + cnt (first to stay) */ 17362 for (j = i; j < env->subprog_cnt; j++) 17363 if (env->subprog_info[j].start >= off + cnt) 17364 break; 17365 /* if j doesn't start exactly at off + cnt, we are just removing 17366 * the front of previous prog 17367 */ 17368 if (env->subprog_info[j].start != off + cnt) 17369 j--; 17370 17371 if (j > i) { 17372 struct bpf_prog_aux *aux = env->prog->aux; 17373 int move; 17374 17375 /* move fake 'exit' subprog as well */ 17376 move = env->subprog_cnt + 1 - j; 17377 17378 memmove(env->subprog_info + i, 17379 env->subprog_info + j, 17380 sizeof(*env->subprog_info) * move); 17381 env->subprog_cnt -= j - i; 17382 17383 /* remove func_info */ 17384 if (aux->func_info) { 17385 move = aux->func_info_cnt - j; 17386 17387 memmove(aux->func_info + i, 17388 aux->func_info + j, 17389 sizeof(*aux->func_info) * move); 17390 aux->func_info_cnt -= j - i; 17391 /* func_info->insn_off is set after all code rewrites, 17392 * in adjust_btf_func() - no need to adjust 17393 */ 17394 } 17395 } else { 17396 /* convert i from "first prog to remove" to "first to adjust" */ 17397 if (env->subprog_info[i].start == off) 17398 i++; 17399 } 17400 17401 /* update fake 'exit' subprog as well */ 17402 for (; i <= env->subprog_cnt; i++) 17403 env->subprog_info[i].start -= cnt; 17404 17405 return 0; 17406 } 17407 17408 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off, 17409 u32 cnt) 17410 { 17411 struct bpf_prog *prog = env->prog; 17412 u32 i, l_off, l_cnt, nr_linfo; 17413 struct bpf_line_info *linfo; 17414 17415 nr_linfo = prog->aux->nr_linfo; 17416 if (!nr_linfo) 17417 return 0; 17418 17419 linfo = prog->aux->linfo; 17420 17421 /* find first line info to remove, count lines to be removed */ 17422 for (i = 0; i < nr_linfo; i++) 17423 if (linfo[i].insn_off >= off) 17424 break; 17425 17426 l_off = i; 17427 l_cnt = 0; 17428 for (; i < nr_linfo; i++) 17429 if (linfo[i].insn_off < off + cnt) 17430 l_cnt++; 17431 else 17432 break; 17433 17434 /* First live insn doesn't match first live linfo, it needs to "inherit" 17435 * last removed linfo. prog is already modified, so prog->len == off 17436 * means no live instructions after (tail of the program was removed). 17437 */ 17438 if (prog->len != off && l_cnt && 17439 (i == nr_linfo || linfo[i].insn_off != off + cnt)) { 17440 l_cnt--; 17441 linfo[--i].insn_off = off + cnt; 17442 } 17443 17444 /* remove the line info which refer to the removed instructions */ 17445 if (l_cnt) { 17446 memmove(linfo + l_off, linfo + i, 17447 sizeof(*linfo) * (nr_linfo - i)); 17448 17449 prog->aux->nr_linfo -= l_cnt; 17450 nr_linfo = prog->aux->nr_linfo; 17451 } 17452 17453 /* pull all linfo[i].insn_off >= off + cnt in by cnt */ 17454 for (i = l_off; i < nr_linfo; i++) 17455 linfo[i].insn_off -= cnt; 17456 17457 /* fix up all subprogs (incl. 'exit') which start >= off */ 17458 for (i = 0; i <= env->subprog_cnt; i++) 17459 if (env->subprog_info[i].linfo_idx > l_off) { 17460 /* program may have started in the removed region but 17461 * may not be fully removed 17462 */ 17463 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) 17464 env->subprog_info[i].linfo_idx -= l_cnt; 17465 else 17466 env->subprog_info[i].linfo_idx = l_off; 17467 } 17468 17469 return 0; 17470 } 17471 17472 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) 17473 { 17474 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 17475 unsigned int orig_prog_len = env->prog->len; 17476 int err; 17477 17478 if (bpf_prog_is_offloaded(env->prog->aux)) 17479 bpf_prog_offload_remove_insns(env, off, cnt); 17480 17481 err = bpf_remove_insns(env->prog, off, cnt); 17482 if (err) 17483 return err; 17484 17485 err = adjust_subprog_starts_after_remove(env, off, cnt); 17486 if (err) 17487 return err; 17488 17489 err = bpf_adj_linfo_after_remove(env, off, cnt); 17490 if (err) 17491 return err; 17492 17493 memmove(aux_data + off, aux_data + off + cnt, 17494 sizeof(*aux_data) * (orig_prog_len - off - cnt)); 17495 17496 return 0; 17497 } 17498 17499 /* The verifier does more data flow analysis than llvm and will not 17500 * explore branches that are dead at run time. Malicious programs can 17501 * have dead code too. Therefore replace all dead at-run-time code 17502 * with 'ja -1'. 17503 * 17504 * Just nops are not optimal, e.g. if they would sit at the end of the 17505 * program and through another bug we would manage to jump there, then 17506 * we'd execute beyond program memory otherwise. Returning exception 17507 * code also wouldn't work since we can have subprogs where the dead 17508 * code could be located. 17509 */ 17510 static void sanitize_dead_code(struct bpf_verifier_env *env) 17511 { 17512 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 17513 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); 17514 struct bpf_insn *insn = env->prog->insnsi; 17515 const int insn_cnt = env->prog->len; 17516 int i; 17517 17518 for (i = 0; i < insn_cnt; i++) { 17519 if (aux_data[i].seen) 17520 continue; 17521 memcpy(insn + i, &trap, sizeof(trap)); 17522 aux_data[i].zext_dst = false; 17523 } 17524 } 17525 17526 static bool insn_is_cond_jump(u8 code) 17527 { 17528 u8 op; 17529 17530 op = BPF_OP(code); 17531 if (BPF_CLASS(code) == BPF_JMP32) 17532 return op != BPF_JA; 17533 17534 if (BPF_CLASS(code) != BPF_JMP) 17535 return false; 17536 17537 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; 17538 } 17539 17540 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) 17541 { 17542 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 17543 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 17544 struct bpf_insn *insn = env->prog->insnsi; 17545 const int insn_cnt = env->prog->len; 17546 int i; 17547 17548 for (i = 0; i < insn_cnt; i++, insn++) { 17549 if (!insn_is_cond_jump(insn->code)) 17550 continue; 17551 17552 if (!aux_data[i + 1].seen) 17553 ja.off = insn->off; 17554 else if (!aux_data[i + 1 + insn->off].seen) 17555 ja.off = 0; 17556 else 17557 continue; 17558 17559 if (bpf_prog_is_offloaded(env->prog->aux)) 17560 bpf_prog_offload_replace_insn(env, i, &ja); 17561 17562 memcpy(insn, &ja, sizeof(ja)); 17563 } 17564 } 17565 17566 static int opt_remove_dead_code(struct bpf_verifier_env *env) 17567 { 17568 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 17569 int insn_cnt = env->prog->len; 17570 int i, err; 17571 17572 for (i = 0; i < insn_cnt; i++) { 17573 int j; 17574 17575 j = 0; 17576 while (i + j < insn_cnt && !aux_data[i + j].seen) 17577 j++; 17578 if (!j) 17579 continue; 17580 17581 err = verifier_remove_insns(env, i, j); 17582 if (err) 17583 return err; 17584 insn_cnt = env->prog->len; 17585 } 17586 17587 return 0; 17588 } 17589 17590 static int opt_remove_nops(struct bpf_verifier_env *env) 17591 { 17592 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 17593 struct bpf_insn *insn = env->prog->insnsi; 17594 int insn_cnt = env->prog->len; 17595 int i, err; 17596 17597 for (i = 0; i < insn_cnt; i++) { 17598 if (memcmp(&insn[i], &ja, sizeof(ja))) 17599 continue; 17600 17601 err = verifier_remove_insns(env, i, 1); 17602 if (err) 17603 return err; 17604 insn_cnt--; 17605 i--; 17606 } 17607 17608 return 0; 17609 } 17610 17611 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, 17612 const union bpf_attr *attr) 17613 { 17614 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4]; 17615 struct bpf_insn_aux_data *aux = env->insn_aux_data; 17616 int i, patch_len, delta = 0, len = env->prog->len; 17617 struct bpf_insn *insns = env->prog->insnsi; 17618 struct bpf_prog *new_prog; 17619 bool rnd_hi32; 17620 17621 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; 17622 zext_patch[1] = BPF_ZEXT_REG(0); 17623 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0); 17624 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 17625 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX); 17626 for (i = 0; i < len; i++) { 17627 int adj_idx = i + delta; 17628 struct bpf_insn insn; 17629 int load_reg; 17630 17631 insn = insns[adj_idx]; 17632 load_reg = insn_def_regno(&insn); 17633 if (!aux[adj_idx].zext_dst) { 17634 u8 code, class; 17635 u32 imm_rnd; 17636 17637 if (!rnd_hi32) 17638 continue; 17639 17640 code = insn.code; 17641 class = BPF_CLASS(code); 17642 if (load_reg == -1) 17643 continue; 17644 17645 /* NOTE: arg "reg" (the fourth one) is only used for 17646 * BPF_STX + SRC_OP, so it is safe to pass NULL 17647 * here. 17648 */ 17649 if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) { 17650 if (class == BPF_LD && 17651 BPF_MODE(code) == BPF_IMM) 17652 i++; 17653 continue; 17654 } 17655 17656 /* ctx load could be transformed into wider load. */ 17657 if (class == BPF_LDX && 17658 aux[adj_idx].ptr_type == PTR_TO_CTX) 17659 continue; 17660 17661 imm_rnd = get_random_u32(); 17662 rnd_hi32_patch[0] = insn; 17663 rnd_hi32_patch[1].imm = imm_rnd; 17664 rnd_hi32_patch[3].dst_reg = load_reg; 17665 patch = rnd_hi32_patch; 17666 patch_len = 4; 17667 goto apply_patch_buffer; 17668 } 17669 17670 /* Add in an zero-extend instruction if a) the JIT has requested 17671 * it or b) it's a CMPXCHG. 17672 * 17673 * The latter is because: BPF_CMPXCHG always loads a value into 17674 * R0, therefore always zero-extends. However some archs' 17675 * equivalent instruction only does this load when the 17676 * comparison is successful. This detail of CMPXCHG is 17677 * orthogonal to the general zero-extension behaviour of the 17678 * CPU, so it's treated independently of bpf_jit_needs_zext. 17679 */ 17680 if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn)) 17681 continue; 17682 17683 /* Zero-extension is done by the caller. */ 17684 if (bpf_pseudo_kfunc_call(&insn)) 17685 continue; 17686 17687 if (WARN_ON(load_reg == -1)) { 17688 verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n"); 17689 return -EFAULT; 17690 } 17691 17692 zext_patch[0] = insn; 17693 zext_patch[1].dst_reg = load_reg; 17694 zext_patch[1].src_reg = load_reg; 17695 patch = zext_patch; 17696 patch_len = 2; 17697 apply_patch_buffer: 17698 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len); 17699 if (!new_prog) 17700 return -ENOMEM; 17701 env->prog = new_prog; 17702 insns = new_prog->insnsi; 17703 aux = env->insn_aux_data; 17704 delta += patch_len - 1; 17705 } 17706 17707 return 0; 17708 } 17709 17710 /* convert load instructions that access fields of a context type into a 17711 * sequence of instructions that access fields of the underlying structure: 17712 * struct __sk_buff -> struct sk_buff 17713 * struct bpf_sock_ops -> struct sock 17714 */ 17715 static int convert_ctx_accesses(struct bpf_verifier_env *env) 17716 { 17717 const struct bpf_verifier_ops *ops = env->ops; 17718 int i, cnt, size, ctx_field_size, delta = 0; 17719 const int insn_cnt = env->prog->len; 17720 struct bpf_insn insn_buf[16], *insn; 17721 u32 target_size, size_default, off; 17722 struct bpf_prog *new_prog; 17723 enum bpf_access_type type; 17724 bool is_narrower_load; 17725 17726 if (ops->gen_prologue || env->seen_direct_write) { 17727 if (!ops->gen_prologue) { 17728 verbose(env, "bpf verifier is misconfigured\n"); 17729 return -EINVAL; 17730 } 17731 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 17732 env->prog); 17733 if (cnt >= ARRAY_SIZE(insn_buf)) { 17734 verbose(env, "bpf verifier is misconfigured\n"); 17735 return -EINVAL; 17736 } else if (cnt) { 17737 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 17738 if (!new_prog) 17739 return -ENOMEM; 17740 17741 env->prog = new_prog; 17742 delta += cnt - 1; 17743 } 17744 } 17745 17746 if (bpf_prog_is_offloaded(env->prog->aux)) 17747 return 0; 17748 17749 insn = env->prog->insnsi + delta; 17750 17751 for (i = 0; i < insn_cnt; i++, insn++) { 17752 bpf_convert_ctx_access_t convert_ctx_access; 17753 u8 mode; 17754 17755 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || 17756 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || 17757 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 17758 insn->code == (BPF_LDX | BPF_MEM | BPF_DW) || 17759 insn->code == (BPF_LDX | BPF_MEMSX | BPF_B) || 17760 insn->code == (BPF_LDX | BPF_MEMSX | BPF_H) || 17761 insn->code == (BPF_LDX | BPF_MEMSX | BPF_W)) { 17762 type = BPF_READ; 17763 } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || 17764 insn->code == (BPF_STX | BPF_MEM | BPF_H) || 17765 insn->code == (BPF_STX | BPF_MEM | BPF_W) || 17766 insn->code == (BPF_STX | BPF_MEM | BPF_DW) || 17767 insn->code == (BPF_ST | BPF_MEM | BPF_B) || 17768 insn->code == (BPF_ST | BPF_MEM | BPF_H) || 17769 insn->code == (BPF_ST | BPF_MEM | BPF_W) || 17770 insn->code == (BPF_ST | BPF_MEM | BPF_DW)) { 17771 type = BPF_WRITE; 17772 } else { 17773 continue; 17774 } 17775 17776 if (type == BPF_WRITE && 17777 env->insn_aux_data[i + delta].sanitize_stack_spill) { 17778 struct bpf_insn patch[] = { 17779 *insn, 17780 BPF_ST_NOSPEC(), 17781 }; 17782 17783 cnt = ARRAY_SIZE(patch); 17784 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); 17785 if (!new_prog) 17786 return -ENOMEM; 17787 17788 delta += cnt - 1; 17789 env->prog = new_prog; 17790 insn = new_prog->insnsi + i + delta; 17791 continue; 17792 } 17793 17794 switch ((int)env->insn_aux_data[i + delta].ptr_type) { 17795 case PTR_TO_CTX: 17796 if (!ops->convert_ctx_access) 17797 continue; 17798 convert_ctx_access = ops->convert_ctx_access; 17799 break; 17800 case PTR_TO_SOCKET: 17801 case PTR_TO_SOCK_COMMON: 17802 convert_ctx_access = bpf_sock_convert_ctx_access; 17803 break; 17804 case PTR_TO_TCP_SOCK: 17805 convert_ctx_access = bpf_tcp_sock_convert_ctx_access; 17806 break; 17807 case PTR_TO_XDP_SOCK: 17808 convert_ctx_access = bpf_xdp_sock_convert_ctx_access; 17809 break; 17810 case PTR_TO_BTF_ID: 17811 case PTR_TO_BTF_ID | PTR_UNTRUSTED: 17812 /* PTR_TO_BTF_ID | MEM_ALLOC always has a valid lifetime, unlike 17813 * PTR_TO_BTF_ID, and an active ref_obj_id, but the same cannot 17814 * be said once it is marked PTR_UNTRUSTED, hence we must handle 17815 * any faults for loads into such types. BPF_WRITE is disallowed 17816 * for this case. 17817 */ 17818 case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED: 17819 if (type == BPF_READ) { 17820 if (BPF_MODE(insn->code) == BPF_MEM) 17821 insn->code = BPF_LDX | BPF_PROBE_MEM | 17822 BPF_SIZE((insn)->code); 17823 else 17824 insn->code = BPF_LDX | BPF_PROBE_MEMSX | 17825 BPF_SIZE((insn)->code); 17826 env->prog->aux->num_exentries++; 17827 } 17828 continue; 17829 default: 17830 continue; 17831 } 17832 17833 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; 17834 size = BPF_LDST_BYTES(insn); 17835 mode = BPF_MODE(insn->code); 17836 17837 /* If the read access is a narrower load of the field, 17838 * convert to a 4/8-byte load, to minimum program type specific 17839 * convert_ctx_access changes. If conversion is successful, 17840 * we will apply proper mask to the result. 17841 */ 17842 is_narrower_load = size < ctx_field_size; 17843 size_default = bpf_ctx_off_adjust_machine(ctx_field_size); 17844 off = insn->off; 17845 if (is_narrower_load) { 17846 u8 size_code; 17847 17848 if (type == BPF_WRITE) { 17849 verbose(env, "bpf verifier narrow ctx access misconfigured\n"); 17850 return -EINVAL; 17851 } 17852 17853 size_code = BPF_H; 17854 if (ctx_field_size == 4) 17855 size_code = BPF_W; 17856 else if (ctx_field_size == 8) 17857 size_code = BPF_DW; 17858 17859 insn->off = off & ~(size_default - 1); 17860 insn->code = BPF_LDX | BPF_MEM | size_code; 17861 } 17862 17863 target_size = 0; 17864 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, 17865 &target_size); 17866 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || 17867 (ctx_field_size && !target_size)) { 17868 verbose(env, "bpf verifier is misconfigured\n"); 17869 return -EINVAL; 17870 } 17871 17872 if (is_narrower_load && size < target_size) { 17873 u8 shift = bpf_ctx_narrow_access_offset( 17874 off, size, size_default) * 8; 17875 if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) { 17876 verbose(env, "bpf verifier narrow ctx load misconfigured\n"); 17877 return -EINVAL; 17878 } 17879 if (ctx_field_size <= 4) { 17880 if (shift) 17881 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, 17882 insn->dst_reg, 17883 shift); 17884 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 17885 (1 << size * 8) - 1); 17886 } else { 17887 if (shift) 17888 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, 17889 insn->dst_reg, 17890 shift); 17891 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 17892 (1ULL << size * 8) - 1); 17893 } 17894 } 17895 if (mode == BPF_MEMSX) 17896 insn_buf[cnt++] = BPF_RAW_INSN(BPF_ALU64 | BPF_MOV | BPF_X, 17897 insn->dst_reg, insn->dst_reg, 17898 size * 8, 0); 17899 17900 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 17901 if (!new_prog) 17902 return -ENOMEM; 17903 17904 delta += cnt - 1; 17905 17906 /* keep walking new program and skip insns we just inserted */ 17907 env->prog = new_prog; 17908 insn = new_prog->insnsi + i + delta; 17909 } 17910 17911 return 0; 17912 } 17913 17914 static int jit_subprogs(struct bpf_verifier_env *env) 17915 { 17916 struct bpf_prog *prog = env->prog, **func, *tmp; 17917 int i, j, subprog_start, subprog_end = 0, len, subprog; 17918 struct bpf_map *map_ptr; 17919 struct bpf_insn *insn; 17920 void *old_bpf_func; 17921 int err, num_exentries; 17922 17923 if (env->subprog_cnt <= 1) 17924 return 0; 17925 17926 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 17927 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn)) 17928 continue; 17929 17930 /* Upon error here we cannot fall back to interpreter but 17931 * need a hard reject of the program. Thus -EFAULT is 17932 * propagated in any case. 17933 */ 17934 subprog = find_subprog(env, i + insn->imm + 1); 17935 if (subprog < 0) { 17936 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 17937 i + insn->imm + 1); 17938 return -EFAULT; 17939 } 17940 /* temporarily remember subprog id inside insn instead of 17941 * aux_data, since next loop will split up all insns into funcs 17942 */ 17943 insn->off = subprog; 17944 /* remember original imm in case JIT fails and fallback 17945 * to interpreter will be needed 17946 */ 17947 env->insn_aux_data[i].call_imm = insn->imm; 17948 /* point imm to __bpf_call_base+1 from JITs point of view */ 17949 insn->imm = 1; 17950 if (bpf_pseudo_func(insn)) 17951 /* jit (e.g. x86_64) may emit fewer instructions 17952 * if it learns a u32 imm is the same as a u64 imm. 17953 * Force a non zero here. 17954 */ 17955 insn[1].imm = 1; 17956 } 17957 17958 err = bpf_prog_alloc_jited_linfo(prog); 17959 if (err) 17960 goto out_undo_insn; 17961 17962 err = -ENOMEM; 17963 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); 17964 if (!func) 17965 goto out_undo_insn; 17966 17967 for (i = 0; i < env->subprog_cnt; i++) { 17968 subprog_start = subprog_end; 17969 subprog_end = env->subprog_info[i + 1].start; 17970 17971 len = subprog_end - subprog_start; 17972 /* bpf_prog_run() doesn't call subprogs directly, 17973 * hence main prog stats include the runtime of subprogs. 17974 * subprogs don't have IDs and not reachable via prog_get_next_id 17975 * func[i]->stats will never be accessed and stays NULL 17976 */ 17977 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER); 17978 if (!func[i]) 17979 goto out_free; 17980 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], 17981 len * sizeof(struct bpf_insn)); 17982 func[i]->type = prog->type; 17983 func[i]->len = len; 17984 if (bpf_prog_calc_tag(func[i])) 17985 goto out_free; 17986 func[i]->is_func = 1; 17987 func[i]->aux->func_idx = i; 17988 /* Below members will be freed only at prog->aux */ 17989 func[i]->aux->btf = prog->aux->btf; 17990 func[i]->aux->func_info = prog->aux->func_info; 17991 func[i]->aux->func_info_cnt = prog->aux->func_info_cnt; 17992 func[i]->aux->poke_tab = prog->aux->poke_tab; 17993 func[i]->aux->size_poke_tab = prog->aux->size_poke_tab; 17994 17995 for (j = 0; j < prog->aux->size_poke_tab; j++) { 17996 struct bpf_jit_poke_descriptor *poke; 17997 17998 poke = &prog->aux->poke_tab[j]; 17999 if (poke->insn_idx < subprog_end && 18000 poke->insn_idx >= subprog_start) 18001 poke->aux = func[i]->aux; 18002 } 18003 18004 func[i]->aux->name[0] = 'F'; 18005 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; 18006 func[i]->jit_requested = 1; 18007 func[i]->blinding_requested = prog->blinding_requested; 18008 func[i]->aux->kfunc_tab = prog->aux->kfunc_tab; 18009 func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab; 18010 func[i]->aux->linfo = prog->aux->linfo; 18011 func[i]->aux->nr_linfo = prog->aux->nr_linfo; 18012 func[i]->aux->jited_linfo = prog->aux->jited_linfo; 18013 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; 18014 num_exentries = 0; 18015 insn = func[i]->insnsi; 18016 for (j = 0; j < func[i]->len; j++, insn++) { 18017 if (BPF_CLASS(insn->code) == BPF_LDX && 18018 (BPF_MODE(insn->code) == BPF_PROBE_MEM || 18019 BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) 18020 num_exentries++; 18021 } 18022 func[i]->aux->num_exentries = num_exentries; 18023 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable; 18024 func[i] = bpf_int_jit_compile(func[i]); 18025 if (!func[i]->jited) { 18026 err = -ENOTSUPP; 18027 goto out_free; 18028 } 18029 cond_resched(); 18030 } 18031 18032 /* at this point all bpf functions were successfully JITed 18033 * now populate all bpf_calls with correct addresses and 18034 * run last pass of JIT 18035 */ 18036 for (i = 0; i < env->subprog_cnt; i++) { 18037 insn = func[i]->insnsi; 18038 for (j = 0; j < func[i]->len; j++, insn++) { 18039 if (bpf_pseudo_func(insn)) { 18040 subprog = insn->off; 18041 insn[0].imm = (u32)(long)func[subprog]->bpf_func; 18042 insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32; 18043 continue; 18044 } 18045 if (!bpf_pseudo_call(insn)) 18046 continue; 18047 subprog = insn->off; 18048 insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func); 18049 } 18050 18051 /* we use the aux data to keep a list of the start addresses 18052 * of the JITed images for each function in the program 18053 * 18054 * for some architectures, such as powerpc64, the imm field 18055 * might not be large enough to hold the offset of the start 18056 * address of the callee's JITed image from __bpf_call_base 18057 * 18058 * in such cases, we can lookup the start address of a callee 18059 * by using its subprog id, available from the off field of 18060 * the call instruction, as an index for this list 18061 */ 18062 func[i]->aux->func = func; 18063 func[i]->aux->func_cnt = env->subprog_cnt; 18064 } 18065 for (i = 0; i < env->subprog_cnt; i++) { 18066 old_bpf_func = func[i]->bpf_func; 18067 tmp = bpf_int_jit_compile(func[i]); 18068 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { 18069 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); 18070 err = -ENOTSUPP; 18071 goto out_free; 18072 } 18073 cond_resched(); 18074 } 18075 18076 /* finally lock prog and jit images for all functions and 18077 * populate kallsysm. Begin at the first subprogram, since 18078 * bpf_prog_load will add the kallsyms for the main program. 18079 */ 18080 for (i = 1; i < env->subprog_cnt; i++) { 18081 bpf_prog_lock_ro(func[i]); 18082 bpf_prog_kallsyms_add(func[i]); 18083 } 18084 18085 /* Last step: make now unused interpreter insns from main 18086 * prog consistent for later dump requests, so they can 18087 * later look the same as if they were interpreted only. 18088 */ 18089 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 18090 if (bpf_pseudo_func(insn)) { 18091 insn[0].imm = env->insn_aux_data[i].call_imm; 18092 insn[1].imm = insn->off; 18093 insn->off = 0; 18094 continue; 18095 } 18096 if (!bpf_pseudo_call(insn)) 18097 continue; 18098 insn->off = env->insn_aux_data[i].call_imm; 18099 subprog = find_subprog(env, i + insn->off + 1); 18100 insn->imm = subprog; 18101 } 18102 18103 prog->jited = 1; 18104 prog->bpf_func = func[0]->bpf_func; 18105 prog->jited_len = func[0]->jited_len; 18106 prog->aux->extable = func[0]->aux->extable; 18107 prog->aux->num_exentries = func[0]->aux->num_exentries; 18108 prog->aux->func = func; 18109 prog->aux->func_cnt = env->subprog_cnt; 18110 bpf_prog_jit_attempt_done(prog); 18111 return 0; 18112 out_free: 18113 /* We failed JIT'ing, so at this point we need to unregister poke 18114 * descriptors from subprogs, so that kernel is not attempting to 18115 * patch it anymore as we're freeing the subprog JIT memory. 18116 */ 18117 for (i = 0; i < prog->aux->size_poke_tab; i++) { 18118 map_ptr = prog->aux->poke_tab[i].tail_call.map; 18119 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux); 18120 } 18121 /* At this point we're guaranteed that poke descriptors are not 18122 * live anymore. We can just unlink its descriptor table as it's 18123 * released with the main prog. 18124 */ 18125 for (i = 0; i < env->subprog_cnt; i++) { 18126 if (!func[i]) 18127 continue; 18128 func[i]->aux->poke_tab = NULL; 18129 bpf_jit_free(func[i]); 18130 } 18131 kfree(func); 18132 out_undo_insn: 18133 /* cleanup main prog to be interpreted */ 18134 prog->jit_requested = 0; 18135 prog->blinding_requested = 0; 18136 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 18137 if (!bpf_pseudo_call(insn)) 18138 continue; 18139 insn->off = 0; 18140 insn->imm = env->insn_aux_data[i].call_imm; 18141 } 18142 bpf_prog_jit_attempt_done(prog); 18143 return err; 18144 } 18145 18146 static int fixup_call_args(struct bpf_verifier_env *env) 18147 { 18148 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 18149 struct bpf_prog *prog = env->prog; 18150 struct bpf_insn *insn = prog->insnsi; 18151 bool has_kfunc_call = bpf_prog_has_kfunc_call(prog); 18152 int i, depth; 18153 #endif 18154 int err = 0; 18155 18156 if (env->prog->jit_requested && 18157 !bpf_prog_is_offloaded(env->prog->aux)) { 18158 err = jit_subprogs(env); 18159 if (err == 0) 18160 return 0; 18161 if (err == -EFAULT) 18162 return err; 18163 } 18164 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 18165 if (has_kfunc_call) { 18166 verbose(env, "calling kernel functions are not allowed in non-JITed programs\n"); 18167 return -EINVAL; 18168 } 18169 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) { 18170 /* When JIT fails the progs with bpf2bpf calls and tail_calls 18171 * have to be rejected, since interpreter doesn't support them yet. 18172 */ 18173 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); 18174 return -EINVAL; 18175 } 18176 for (i = 0; i < prog->len; i++, insn++) { 18177 if (bpf_pseudo_func(insn)) { 18178 /* When JIT fails the progs with callback calls 18179 * have to be rejected, since interpreter doesn't support them yet. 18180 */ 18181 verbose(env, "callbacks are not allowed in non-JITed programs\n"); 18182 return -EINVAL; 18183 } 18184 18185 if (!bpf_pseudo_call(insn)) 18186 continue; 18187 depth = get_callee_stack_depth(env, insn, i); 18188 if (depth < 0) 18189 return depth; 18190 bpf_patch_call_args(insn, depth); 18191 } 18192 err = 0; 18193 #endif 18194 return err; 18195 } 18196 18197 /* replace a generic kfunc with a specialized version if necessary */ 18198 static void specialize_kfunc(struct bpf_verifier_env *env, 18199 u32 func_id, u16 offset, unsigned long *addr) 18200 { 18201 struct bpf_prog *prog = env->prog; 18202 bool seen_direct_write; 18203 void *xdp_kfunc; 18204 bool is_rdonly; 18205 18206 if (bpf_dev_bound_kfunc_id(func_id)) { 18207 xdp_kfunc = bpf_dev_bound_resolve_kfunc(prog, func_id); 18208 if (xdp_kfunc) { 18209 *addr = (unsigned long)xdp_kfunc; 18210 return; 18211 } 18212 /* fallback to default kfunc when not supported by netdev */ 18213 } 18214 18215 if (offset) 18216 return; 18217 18218 if (func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) { 18219 seen_direct_write = env->seen_direct_write; 18220 is_rdonly = !may_access_direct_pkt_data(env, NULL, BPF_WRITE); 18221 18222 if (is_rdonly) 18223 *addr = (unsigned long)bpf_dynptr_from_skb_rdonly; 18224 18225 /* restore env->seen_direct_write to its original value, since 18226 * may_access_direct_pkt_data mutates it 18227 */ 18228 env->seen_direct_write = seen_direct_write; 18229 } 18230 } 18231 18232 static void __fixup_collection_insert_kfunc(struct bpf_insn_aux_data *insn_aux, 18233 u16 struct_meta_reg, 18234 u16 node_offset_reg, 18235 struct bpf_insn *insn, 18236 struct bpf_insn *insn_buf, 18237 int *cnt) 18238 { 18239 struct btf_struct_meta *kptr_struct_meta = insn_aux->kptr_struct_meta; 18240 struct bpf_insn addr[2] = { BPF_LD_IMM64(struct_meta_reg, (long)kptr_struct_meta) }; 18241 18242 insn_buf[0] = addr[0]; 18243 insn_buf[1] = addr[1]; 18244 insn_buf[2] = BPF_MOV64_IMM(node_offset_reg, insn_aux->insert_off); 18245 insn_buf[3] = *insn; 18246 *cnt = 4; 18247 } 18248 18249 static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 18250 struct bpf_insn *insn_buf, int insn_idx, int *cnt) 18251 { 18252 const struct bpf_kfunc_desc *desc; 18253 18254 if (!insn->imm) { 18255 verbose(env, "invalid kernel function call not eliminated in verifier pass\n"); 18256 return -EINVAL; 18257 } 18258 18259 *cnt = 0; 18260 18261 /* insn->imm has the btf func_id. Replace it with an offset relative to 18262 * __bpf_call_base, unless the JIT needs to call functions that are 18263 * further than 32 bits away (bpf_jit_supports_far_kfunc_call()). 18264 */ 18265 desc = find_kfunc_desc(env->prog, insn->imm, insn->off); 18266 if (!desc) { 18267 verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n", 18268 insn->imm); 18269 return -EFAULT; 18270 } 18271 18272 if (!bpf_jit_supports_far_kfunc_call()) 18273 insn->imm = BPF_CALL_IMM(desc->addr); 18274 if (insn->off) 18275 return 0; 18276 if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl]) { 18277 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; 18278 struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) }; 18279 u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size; 18280 18281 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_1, obj_new_size); 18282 insn_buf[1] = addr[0]; 18283 insn_buf[2] = addr[1]; 18284 insn_buf[3] = *insn; 18285 *cnt = 4; 18286 } else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl] || 18287 desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { 18288 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; 18289 struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) }; 18290 18291 if (desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && 18292 !kptr_struct_meta) { 18293 verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n", 18294 insn_idx); 18295 return -EFAULT; 18296 } 18297 18298 insn_buf[0] = addr[0]; 18299 insn_buf[1] = addr[1]; 18300 insn_buf[2] = *insn; 18301 *cnt = 3; 18302 } else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || 18303 desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || 18304 desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { 18305 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; 18306 int struct_meta_reg = BPF_REG_3; 18307 int node_offset_reg = BPF_REG_4; 18308 18309 /* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */ 18310 if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { 18311 struct_meta_reg = BPF_REG_4; 18312 node_offset_reg = BPF_REG_5; 18313 } 18314 18315 if (!kptr_struct_meta) { 18316 verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n", 18317 insn_idx); 18318 return -EFAULT; 18319 } 18320 18321 __fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg, 18322 node_offset_reg, insn, insn_buf, cnt); 18323 } else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || 18324 desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { 18325 insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1); 18326 *cnt = 1; 18327 } 18328 return 0; 18329 } 18330 18331 /* Do various post-verification rewrites in a single program pass. 18332 * These rewrites simplify JIT and interpreter implementations. 18333 */ 18334 static int do_misc_fixups(struct bpf_verifier_env *env) 18335 { 18336 struct bpf_prog *prog = env->prog; 18337 enum bpf_attach_type eatype = prog->expected_attach_type; 18338 enum bpf_prog_type prog_type = resolve_prog_type(prog); 18339 struct bpf_insn *insn = prog->insnsi; 18340 const struct bpf_func_proto *fn; 18341 const int insn_cnt = prog->len; 18342 const struct bpf_map_ops *ops; 18343 struct bpf_insn_aux_data *aux; 18344 struct bpf_insn insn_buf[16]; 18345 struct bpf_prog *new_prog; 18346 struct bpf_map *map_ptr; 18347 int i, ret, cnt, delta = 0; 18348 18349 for (i = 0; i < insn_cnt; i++, insn++) { 18350 /* Make divide-by-zero exceptions impossible. */ 18351 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || 18352 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 18353 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || 18354 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 18355 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 18356 bool isdiv = BPF_OP(insn->code) == BPF_DIV; 18357 struct bpf_insn *patchlet; 18358 struct bpf_insn chk_and_div[] = { 18359 /* [R,W]x div 0 -> 0 */ 18360 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | 18361 BPF_JNE | BPF_K, insn->src_reg, 18362 0, 2, 0), 18363 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), 18364 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 18365 *insn, 18366 }; 18367 struct bpf_insn chk_and_mod[] = { 18368 /* [R,W]x mod 0 -> [R,W]x */ 18369 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | 18370 BPF_JEQ | BPF_K, insn->src_reg, 18371 0, 1 + (is64 ? 0 : 1), 0), 18372 *insn, 18373 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 18374 BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), 18375 }; 18376 18377 patchlet = isdiv ? chk_and_div : chk_and_mod; 18378 cnt = isdiv ? ARRAY_SIZE(chk_and_div) : 18379 ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); 18380 18381 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); 18382 if (!new_prog) 18383 return -ENOMEM; 18384 18385 delta += cnt - 1; 18386 env->prog = prog = new_prog; 18387 insn = new_prog->insnsi + i + delta; 18388 continue; 18389 } 18390 18391 /* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */ 18392 if (BPF_CLASS(insn->code) == BPF_LD && 18393 (BPF_MODE(insn->code) == BPF_ABS || 18394 BPF_MODE(insn->code) == BPF_IND)) { 18395 cnt = env->ops->gen_ld_abs(insn, insn_buf); 18396 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 18397 verbose(env, "bpf verifier is misconfigured\n"); 18398 return -EINVAL; 18399 } 18400 18401 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 18402 if (!new_prog) 18403 return -ENOMEM; 18404 18405 delta += cnt - 1; 18406 env->prog = prog = new_prog; 18407 insn = new_prog->insnsi + i + delta; 18408 continue; 18409 } 18410 18411 /* Rewrite pointer arithmetic to mitigate speculation attacks. */ 18412 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || 18413 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { 18414 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; 18415 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; 18416 struct bpf_insn *patch = &insn_buf[0]; 18417 bool issrc, isneg, isimm; 18418 u32 off_reg; 18419 18420 aux = &env->insn_aux_data[i + delta]; 18421 if (!aux->alu_state || 18422 aux->alu_state == BPF_ALU_NON_POINTER) 18423 continue; 18424 18425 isneg = aux->alu_state & BPF_ALU_NEG_VALUE; 18426 issrc = (aux->alu_state & BPF_ALU_SANITIZE) == 18427 BPF_ALU_SANITIZE_SRC; 18428 isimm = aux->alu_state & BPF_ALU_IMMEDIATE; 18429 18430 off_reg = issrc ? insn->src_reg : insn->dst_reg; 18431 if (isimm) { 18432 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); 18433 } else { 18434 if (isneg) 18435 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 18436 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); 18437 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); 18438 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); 18439 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); 18440 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); 18441 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); 18442 } 18443 if (!issrc) 18444 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg); 18445 insn->src_reg = BPF_REG_AX; 18446 if (isneg) 18447 insn->code = insn->code == code_add ? 18448 code_sub : code_add; 18449 *patch++ = *insn; 18450 if (issrc && isneg && !isimm) 18451 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 18452 cnt = patch - insn_buf; 18453 18454 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 18455 if (!new_prog) 18456 return -ENOMEM; 18457 18458 delta += cnt - 1; 18459 env->prog = prog = new_prog; 18460 insn = new_prog->insnsi + i + delta; 18461 continue; 18462 } 18463 18464 if (insn->code != (BPF_JMP | BPF_CALL)) 18465 continue; 18466 if (insn->src_reg == BPF_PSEUDO_CALL) 18467 continue; 18468 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { 18469 ret = fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt); 18470 if (ret) 18471 return ret; 18472 if (cnt == 0) 18473 continue; 18474 18475 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 18476 if (!new_prog) 18477 return -ENOMEM; 18478 18479 delta += cnt - 1; 18480 env->prog = prog = new_prog; 18481 insn = new_prog->insnsi + i + delta; 18482 continue; 18483 } 18484 18485 if (insn->imm == BPF_FUNC_get_route_realm) 18486 prog->dst_needed = 1; 18487 if (insn->imm == BPF_FUNC_get_prandom_u32) 18488 bpf_user_rnd_init_once(); 18489 if (insn->imm == BPF_FUNC_override_return) 18490 prog->kprobe_override = 1; 18491 if (insn->imm == BPF_FUNC_tail_call) { 18492 /* If we tail call into other programs, we 18493 * cannot make any assumptions since they can 18494 * be replaced dynamically during runtime in 18495 * the program array. 18496 */ 18497 prog->cb_access = 1; 18498 if (!allow_tail_call_in_subprogs(env)) 18499 prog->aux->stack_depth = MAX_BPF_STACK; 18500 prog->aux->max_pkt_offset = MAX_PACKET_OFF; 18501 18502 /* mark bpf_tail_call as different opcode to avoid 18503 * conditional branch in the interpreter for every normal 18504 * call and to prevent accidental JITing by JIT compiler 18505 * that doesn't support bpf_tail_call yet 18506 */ 18507 insn->imm = 0; 18508 insn->code = BPF_JMP | BPF_TAIL_CALL; 18509 18510 aux = &env->insn_aux_data[i + delta]; 18511 if (env->bpf_capable && !prog->blinding_requested && 18512 prog->jit_requested && 18513 !bpf_map_key_poisoned(aux) && 18514 !bpf_map_ptr_poisoned(aux) && 18515 !bpf_map_ptr_unpriv(aux)) { 18516 struct bpf_jit_poke_descriptor desc = { 18517 .reason = BPF_POKE_REASON_TAIL_CALL, 18518 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state), 18519 .tail_call.key = bpf_map_key_immediate(aux), 18520 .insn_idx = i + delta, 18521 }; 18522 18523 ret = bpf_jit_add_poke_descriptor(prog, &desc); 18524 if (ret < 0) { 18525 verbose(env, "adding tail call poke descriptor failed\n"); 18526 return ret; 18527 } 18528 18529 insn->imm = ret + 1; 18530 continue; 18531 } 18532 18533 if (!bpf_map_ptr_unpriv(aux)) 18534 continue; 18535 18536 /* instead of changing every JIT dealing with tail_call 18537 * emit two extra insns: 18538 * if (index >= max_entries) goto out; 18539 * index &= array->index_mask; 18540 * to avoid out-of-bounds cpu speculation 18541 */ 18542 if (bpf_map_ptr_poisoned(aux)) { 18543 verbose(env, "tail_call abusing map_ptr\n"); 18544 return -EINVAL; 18545 } 18546 18547 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 18548 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, 18549 map_ptr->max_entries, 2); 18550 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, 18551 container_of(map_ptr, 18552 struct bpf_array, 18553 map)->index_mask); 18554 insn_buf[2] = *insn; 18555 cnt = 3; 18556 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 18557 if (!new_prog) 18558 return -ENOMEM; 18559 18560 delta += cnt - 1; 18561 env->prog = prog = new_prog; 18562 insn = new_prog->insnsi + i + delta; 18563 continue; 18564 } 18565 18566 if (insn->imm == BPF_FUNC_timer_set_callback) { 18567 /* The verifier will process callback_fn as many times as necessary 18568 * with different maps and the register states prepared by 18569 * set_timer_callback_state will be accurate. 18570 * 18571 * The following use case is valid: 18572 * map1 is shared by prog1, prog2, prog3. 18573 * prog1 calls bpf_timer_init for some map1 elements 18574 * prog2 calls bpf_timer_set_callback for some map1 elements. 18575 * Those that were not bpf_timer_init-ed will return -EINVAL. 18576 * prog3 calls bpf_timer_start for some map1 elements. 18577 * Those that were not both bpf_timer_init-ed and 18578 * bpf_timer_set_callback-ed will return -EINVAL. 18579 */ 18580 struct bpf_insn ld_addrs[2] = { 18581 BPF_LD_IMM64(BPF_REG_3, (long)prog->aux), 18582 }; 18583 18584 insn_buf[0] = ld_addrs[0]; 18585 insn_buf[1] = ld_addrs[1]; 18586 insn_buf[2] = *insn; 18587 cnt = 3; 18588 18589 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 18590 if (!new_prog) 18591 return -ENOMEM; 18592 18593 delta += cnt - 1; 18594 env->prog = prog = new_prog; 18595 insn = new_prog->insnsi + i + delta; 18596 goto patch_call_imm; 18597 } 18598 18599 if (is_storage_get_function(insn->imm)) { 18600 if (!env->prog->aux->sleepable || 18601 env->insn_aux_data[i + delta].storage_get_func_atomic) 18602 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC); 18603 else 18604 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL); 18605 insn_buf[1] = *insn; 18606 cnt = 2; 18607 18608 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 18609 if (!new_prog) 18610 return -ENOMEM; 18611 18612 delta += cnt - 1; 18613 env->prog = prog = new_prog; 18614 insn = new_prog->insnsi + i + delta; 18615 goto patch_call_imm; 18616 } 18617 18618 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup 18619 * and other inlining handlers are currently limited to 64 bit 18620 * only. 18621 */ 18622 if (prog->jit_requested && BITS_PER_LONG == 64 && 18623 (insn->imm == BPF_FUNC_map_lookup_elem || 18624 insn->imm == BPF_FUNC_map_update_elem || 18625 insn->imm == BPF_FUNC_map_delete_elem || 18626 insn->imm == BPF_FUNC_map_push_elem || 18627 insn->imm == BPF_FUNC_map_pop_elem || 18628 insn->imm == BPF_FUNC_map_peek_elem || 18629 insn->imm == BPF_FUNC_redirect_map || 18630 insn->imm == BPF_FUNC_for_each_map_elem || 18631 insn->imm == BPF_FUNC_map_lookup_percpu_elem)) { 18632 aux = &env->insn_aux_data[i + delta]; 18633 if (bpf_map_ptr_poisoned(aux)) 18634 goto patch_call_imm; 18635 18636 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 18637 ops = map_ptr->ops; 18638 if (insn->imm == BPF_FUNC_map_lookup_elem && 18639 ops->map_gen_lookup) { 18640 cnt = ops->map_gen_lookup(map_ptr, insn_buf); 18641 if (cnt == -EOPNOTSUPP) 18642 goto patch_map_ops_generic; 18643 if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) { 18644 verbose(env, "bpf verifier is misconfigured\n"); 18645 return -EINVAL; 18646 } 18647 18648 new_prog = bpf_patch_insn_data(env, i + delta, 18649 insn_buf, cnt); 18650 if (!new_prog) 18651 return -ENOMEM; 18652 18653 delta += cnt - 1; 18654 env->prog = prog = new_prog; 18655 insn = new_prog->insnsi + i + delta; 18656 continue; 18657 } 18658 18659 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, 18660 (void *(*)(struct bpf_map *map, void *key))NULL)); 18661 BUILD_BUG_ON(!__same_type(ops->map_delete_elem, 18662 (long (*)(struct bpf_map *map, void *key))NULL)); 18663 BUILD_BUG_ON(!__same_type(ops->map_update_elem, 18664 (long (*)(struct bpf_map *map, void *key, void *value, 18665 u64 flags))NULL)); 18666 BUILD_BUG_ON(!__same_type(ops->map_push_elem, 18667 (long (*)(struct bpf_map *map, void *value, 18668 u64 flags))NULL)); 18669 BUILD_BUG_ON(!__same_type(ops->map_pop_elem, 18670 (long (*)(struct bpf_map *map, void *value))NULL)); 18671 BUILD_BUG_ON(!__same_type(ops->map_peek_elem, 18672 (long (*)(struct bpf_map *map, void *value))NULL)); 18673 BUILD_BUG_ON(!__same_type(ops->map_redirect, 18674 (long (*)(struct bpf_map *map, u64 index, u64 flags))NULL)); 18675 BUILD_BUG_ON(!__same_type(ops->map_for_each_callback, 18676 (long (*)(struct bpf_map *map, 18677 bpf_callback_t callback_fn, 18678 void *callback_ctx, 18679 u64 flags))NULL)); 18680 BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem, 18681 (void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL)); 18682 18683 patch_map_ops_generic: 18684 switch (insn->imm) { 18685 case BPF_FUNC_map_lookup_elem: 18686 insn->imm = BPF_CALL_IMM(ops->map_lookup_elem); 18687 continue; 18688 case BPF_FUNC_map_update_elem: 18689 insn->imm = BPF_CALL_IMM(ops->map_update_elem); 18690 continue; 18691 case BPF_FUNC_map_delete_elem: 18692 insn->imm = BPF_CALL_IMM(ops->map_delete_elem); 18693 continue; 18694 case BPF_FUNC_map_push_elem: 18695 insn->imm = BPF_CALL_IMM(ops->map_push_elem); 18696 continue; 18697 case BPF_FUNC_map_pop_elem: 18698 insn->imm = BPF_CALL_IMM(ops->map_pop_elem); 18699 continue; 18700 case BPF_FUNC_map_peek_elem: 18701 insn->imm = BPF_CALL_IMM(ops->map_peek_elem); 18702 continue; 18703 case BPF_FUNC_redirect_map: 18704 insn->imm = BPF_CALL_IMM(ops->map_redirect); 18705 continue; 18706 case BPF_FUNC_for_each_map_elem: 18707 insn->imm = BPF_CALL_IMM(ops->map_for_each_callback); 18708 continue; 18709 case BPF_FUNC_map_lookup_percpu_elem: 18710 insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem); 18711 continue; 18712 } 18713 18714 goto patch_call_imm; 18715 } 18716 18717 /* Implement bpf_jiffies64 inline. */ 18718 if (prog->jit_requested && BITS_PER_LONG == 64 && 18719 insn->imm == BPF_FUNC_jiffies64) { 18720 struct bpf_insn ld_jiffies_addr[2] = { 18721 BPF_LD_IMM64(BPF_REG_0, 18722 (unsigned long)&jiffies), 18723 }; 18724 18725 insn_buf[0] = ld_jiffies_addr[0]; 18726 insn_buf[1] = ld_jiffies_addr[1]; 18727 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, 18728 BPF_REG_0, 0); 18729 cnt = 3; 18730 18731 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 18732 cnt); 18733 if (!new_prog) 18734 return -ENOMEM; 18735 18736 delta += cnt - 1; 18737 env->prog = prog = new_prog; 18738 insn = new_prog->insnsi + i + delta; 18739 continue; 18740 } 18741 18742 /* Implement bpf_get_func_arg inline. */ 18743 if (prog_type == BPF_PROG_TYPE_TRACING && 18744 insn->imm == BPF_FUNC_get_func_arg) { 18745 /* Load nr_args from ctx - 8 */ 18746 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); 18747 insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6); 18748 insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3); 18749 insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1); 18750 insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0); 18751 insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0); 18752 insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0); 18753 insn_buf[7] = BPF_JMP_A(1); 18754 insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL); 18755 cnt = 9; 18756 18757 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 18758 if (!new_prog) 18759 return -ENOMEM; 18760 18761 delta += cnt - 1; 18762 env->prog = prog = new_prog; 18763 insn = new_prog->insnsi + i + delta; 18764 continue; 18765 } 18766 18767 /* Implement bpf_get_func_ret inline. */ 18768 if (prog_type == BPF_PROG_TYPE_TRACING && 18769 insn->imm == BPF_FUNC_get_func_ret) { 18770 if (eatype == BPF_TRACE_FEXIT || 18771 eatype == BPF_MODIFY_RETURN) { 18772 /* Load nr_args from ctx - 8 */ 18773 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); 18774 insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3); 18775 insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1); 18776 insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0); 18777 insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0); 18778 insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0); 18779 cnt = 6; 18780 } else { 18781 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP); 18782 cnt = 1; 18783 } 18784 18785 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 18786 if (!new_prog) 18787 return -ENOMEM; 18788 18789 delta += cnt - 1; 18790 env->prog = prog = new_prog; 18791 insn = new_prog->insnsi + i + delta; 18792 continue; 18793 } 18794 18795 /* Implement get_func_arg_cnt inline. */ 18796 if (prog_type == BPF_PROG_TYPE_TRACING && 18797 insn->imm == BPF_FUNC_get_func_arg_cnt) { 18798 /* Load nr_args from ctx - 8 */ 18799 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); 18800 18801 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); 18802 if (!new_prog) 18803 return -ENOMEM; 18804 18805 env->prog = prog = new_prog; 18806 insn = new_prog->insnsi + i + delta; 18807 continue; 18808 } 18809 18810 /* Implement bpf_get_func_ip inline. */ 18811 if (prog_type == BPF_PROG_TYPE_TRACING && 18812 insn->imm == BPF_FUNC_get_func_ip) { 18813 /* Load IP address from ctx - 16 */ 18814 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16); 18815 18816 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); 18817 if (!new_prog) 18818 return -ENOMEM; 18819 18820 env->prog = prog = new_prog; 18821 insn = new_prog->insnsi + i + delta; 18822 continue; 18823 } 18824 18825 patch_call_imm: 18826 fn = env->ops->get_func_proto(insn->imm, env->prog); 18827 /* all functions that have prototype and verifier allowed 18828 * programs to call them, must be real in-kernel functions 18829 */ 18830 if (!fn->func) { 18831 verbose(env, 18832 "kernel subsystem misconfigured func %s#%d\n", 18833 func_id_name(insn->imm), insn->imm); 18834 return -EFAULT; 18835 } 18836 insn->imm = fn->func - __bpf_call_base; 18837 } 18838 18839 /* Since poke tab is now finalized, publish aux to tracker. */ 18840 for (i = 0; i < prog->aux->size_poke_tab; i++) { 18841 map_ptr = prog->aux->poke_tab[i].tail_call.map; 18842 if (!map_ptr->ops->map_poke_track || 18843 !map_ptr->ops->map_poke_untrack || 18844 !map_ptr->ops->map_poke_run) { 18845 verbose(env, "bpf verifier is misconfigured\n"); 18846 return -EINVAL; 18847 } 18848 18849 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); 18850 if (ret < 0) { 18851 verbose(env, "tracking tail call prog failed\n"); 18852 return ret; 18853 } 18854 } 18855 18856 sort_kfunc_descs_by_imm_off(env->prog); 18857 18858 return 0; 18859 } 18860 18861 static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env, 18862 int position, 18863 s32 stack_base, 18864 u32 callback_subprogno, 18865 u32 *cnt) 18866 { 18867 s32 r6_offset = stack_base + 0 * BPF_REG_SIZE; 18868 s32 r7_offset = stack_base + 1 * BPF_REG_SIZE; 18869 s32 r8_offset = stack_base + 2 * BPF_REG_SIZE; 18870 int reg_loop_max = BPF_REG_6; 18871 int reg_loop_cnt = BPF_REG_7; 18872 int reg_loop_ctx = BPF_REG_8; 18873 18874 struct bpf_prog *new_prog; 18875 u32 callback_start; 18876 u32 call_insn_offset; 18877 s32 callback_offset; 18878 18879 /* This represents an inlined version of bpf_iter.c:bpf_loop, 18880 * be careful to modify this code in sync. 18881 */ 18882 struct bpf_insn insn_buf[] = { 18883 /* Return error and jump to the end of the patch if 18884 * expected number of iterations is too big. 18885 */ 18886 BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2), 18887 BPF_MOV32_IMM(BPF_REG_0, -E2BIG), 18888 BPF_JMP_IMM(BPF_JA, 0, 0, 16), 18889 /* spill R6, R7, R8 to use these as loop vars */ 18890 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset), 18891 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset), 18892 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset), 18893 /* initialize loop vars */ 18894 BPF_MOV64_REG(reg_loop_max, BPF_REG_1), 18895 BPF_MOV32_IMM(reg_loop_cnt, 0), 18896 BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3), 18897 /* loop header, 18898 * if reg_loop_cnt >= reg_loop_max skip the loop body 18899 */ 18900 BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5), 18901 /* callback call, 18902 * correct callback offset would be set after patching 18903 */ 18904 BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt), 18905 BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx), 18906 BPF_CALL_REL(0), 18907 /* increment loop counter */ 18908 BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1), 18909 /* jump to loop header if callback returned 0 */ 18910 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6), 18911 /* return value of bpf_loop, 18912 * set R0 to the number of iterations 18913 */ 18914 BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt), 18915 /* restore original values of R6, R7, R8 */ 18916 BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset), 18917 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset), 18918 BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset), 18919 }; 18920 18921 *cnt = ARRAY_SIZE(insn_buf); 18922 new_prog = bpf_patch_insn_data(env, position, insn_buf, *cnt); 18923 if (!new_prog) 18924 return new_prog; 18925 18926 /* callback start is known only after patching */ 18927 callback_start = env->subprog_info[callback_subprogno].start; 18928 /* Note: insn_buf[12] is an offset of BPF_CALL_REL instruction */ 18929 call_insn_offset = position + 12; 18930 callback_offset = callback_start - call_insn_offset - 1; 18931 new_prog->insnsi[call_insn_offset].imm = callback_offset; 18932 18933 return new_prog; 18934 } 18935 18936 static bool is_bpf_loop_call(struct bpf_insn *insn) 18937 { 18938 return insn->code == (BPF_JMP | BPF_CALL) && 18939 insn->src_reg == 0 && 18940 insn->imm == BPF_FUNC_loop; 18941 } 18942 18943 /* For all sub-programs in the program (including main) check 18944 * insn_aux_data to see if there are bpf_loop calls that require 18945 * inlining. If such calls are found the calls are replaced with a 18946 * sequence of instructions produced by `inline_bpf_loop` function and 18947 * subprog stack_depth is increased by the size of 3 registers. 18948 * This stack space is used to spill values of the R6, R7, R8. These 18949 * registers are used to store the loop bound, counter and context 18950 * variables. 18951 */ 18952 static int optimize_bpf_loop(struct bpf_verifier_env *env) 18953 { 18954 struct bpf_subprog_info *subprogs = env->subprog_info; 18955 int i, cur_subprog = 0, cnt, delta = 0; 18956 struct bpf_insn *insn = env->prog->insnsi; 18957 int insn_cnt = env->prog->len; 18958 u16 stack_depth = subprogs[cur_subprog].stack_depth; 18959 u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; 18960 u16 stack_depth_extra = 0; 18961 18962 for (i = 0; i < insn_cnt; i++, insn++) { 18963 struct bpf_loop_inline_state *inline_state = 18964 &env->insn_aux_data[i + delta].loop_inline_state; 18965 18966 if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) { 18967 struct bpf_prog *new_prog; 18968 18969 stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup; 18970 new_prog = inline_bpf_loop(env, 18971 i + delta, 18972 -(stack_depth + stack_depth_extra), 18973 inline_state->callback_subprogno, 18974 &cnt); 18975 if (!new_prog) 18976 return -ENOMEM; 18977 18978 delta += cnt - 1; 18979 env->prog = new_prog; 18980 insn = new_prog->insnsi + i + delta; 18981 } 18982 18983 if (subprogs[cur_subprog + 1].start == i + delta + 1) { 18984 subprogs[cur_subprog].stack_depth += stack_depth_extra; 18985 cur_subprog++; 18986 stack_depth = subprogs[cur_subprog].stack_depth; 18987 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; 18988 stack_depth_extra = 0; 18989 } 18990 } 18991 18992 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; 18993 18994 return 0; 18995 } 18996 18997 static void free_states(struct bpf_verifier_env *env) 18998 { 18999 struct bpf_verifier_state_list *sl, *sln; 19000 int i; 19001 19002 sl = env->free_list; 19003 while (sl) { 19004 sln = sl->next; 19005 free_verifier_state(&sl->state, false); 19006 kfree(sl); 19007 sl = sln; 19008 } 19009 env->free_list = NULL; 19010 19011 if (!env->explored_states) 19012 return; 19013 19014 for (i = 0; i < state_htab_size(env); i++) { 19015 sl = env->explored_states[i]; 19016 19017 while (sl) { 19018 sln = sl->next; 19019 free_verifier_state(&sl->state, false); 19020 kfree(sl); 19021 sl = sln; 19022 } 19023 env->explored_states[i] = NULL; 19024 } 19025 } 19026 19027 static int do_check_common(struct bpf_verifier_env *env, int subprog) 19028 { 19029 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); 19030 struct bpf_verifier_state *state; 19031 struct bpf_reg_state *regs; 19032 int ret, i; 19033 19034 env->prev_linfo = NULL; 19035 env->pass_cnt++; 19036 19037 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); 19038 if (!state) 19039 return -ENOMEM; 19040 state->curframe = 0; 19041 state->speculative = false; 19042 state->branches = 1; 19043 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); 19044 if (!state->frame[0]) { 19045 kfree(state); 19046 return -ENOMEM; 19047 } 19048 env->cur_state = state; 19049 init_func_state(env, state->frame[0], 19050 BPF_MAIN_FUNC /* callsite */, 19051 0 /* frameno */, 19052 subprog); 19053 state->first_insn_idx = env->subprog_info[subprog].start; 19054 state->last_insn_idx = -1; 19055 19056 regs = state->frame[state->curframe]->regs; 19057 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { 19058 ret = btf_prepare_func_args(env, subprog, regs); 19059 if (ret) 19060 goto out; 19061 for (i = BPF_REG_1; i <= BPF_REG_5; i++) { 19062 if (regs[i].type == PTR_TO_CTX) 19063 mark_reg_known_zero(env, regs, i); 19064 else if (regs[i].type == SCALAR_VALUE) 19065 mark_reg_unknown(env, regs, i); 19066 else if (base_type(regs[i].type) == PTR_TO_MEM) { 19067 const u32 mem_size = regs[i].mem_size; 19068 19069 mark_reg_known_zero(env, regs, i); 19070 regs[i].mem_size = mem_size; 19071 regs[i].id = ++env->id_gen; 19072 } 19073 } 19074 } else { 19075 /* 1st arg to a function */ 19076 regs[BPF_REG_1].type = PTR_TO_CTX; 19077 mark_reg_known_zero(env, regs, BPF_REG_1); 19078 ret = btf_check_subprog_arg_match(env, subprog, regs); 19079 if (ret == -EFAULT) 19080 /* unlikely verifier bug. abort. 19081 * ret == 0 and ret < 0 are sadly acceptable for 19082 * main() function due to backward compatibility. 19083 * Like socket filter program may be written as: 19084 * int bpf_prog(struct pt_regs *ctx) 19085 * and never dereference that ctx in the program. 19086 * 'struct pt_regs' is a type mismatch for socket 19087 * filter that should be using 'struct __sk_buff'. 19088 */ 19089 goto out; 19090 } 19091 19092 ret = do_check(env); 19093 out: 19094 /* check for NULL is necessary, since cur_state can be freed inside 19095 * do_check() under memory pressure. 19096 */ 19097 if (env->cur_state) { 19098 free_verifier_state(env->cur_state, true); 19099 env->cur_state = NULL; 19100 } 19101 while (!pop_stack(env, NULL, NULL, false)); 19102 if (!ret && pop_log) 19103 bpf_vlog_reset(&env->log, 0); 19104 free_states(env); 19105 return ret; 19106 } 19107 19108 /* Verify all global functions in a BPF program one by one based on their BTF. 19109 * All global functions must pass verification. Otherwise the whole program is rejected. 19110 * Consider: 19111 * int bar(int); 19112 * int foo(int f) 19113 * { 19114 * return bar(f); 19115 * } 19116 * int bar(int b) 19117 * { 19118 * ... 19119 * } 19120 * foo() will be verified first for R1=any_scalar_value. During verification it 19121 * will be assumed that bar() already verified successfully and call to bar() 19122 * from foo() will be checked for type match only. Later bar() will be verified 19123 * independently to check that it's safe for R1=any_scalar_value. 19124 */ 19125 static int do_check_subprogs(struct bpf_verifier_env *env) 19126 { 19127 struct bpf_prog_aux *aux = env->prog->aux; 19128 int i, ret; 19129 19130 if (!aux->func_info) 19131 return 0; 19132 19133 for (i = 1; i < env->subprog_cnt; i++) { 19134 if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL) 19135 continue; 19136 env->insn_idx = env->subprog_info[i].start; 19137 WARN_ON_ONCE(env->insn_idx == 0); 19138 ret = do_check_common(env, i); 19139 if (ret) { 19140 return ret; 19141 } else if (env->log.level & BPF_LOG_LEVEL) { 19142 verbose(env, 19143 "Func#%d is safe for any args that match its prototype\n", 19144 i); 19145 } 19146 } 19147 return 0; 19148 } 19149 19150 static int do_check_main(struct bpf_verifier_env *env) 19151 { 19152 int ret; 19153 19154 env->insn_idx = 0; 19155 ret = do_check_common(env, 0); 19156 if (!ret) 19157 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; 19158 return ret; 19159 } 19160 19161 19162 static void print_verification_stats(struct bpf_verifier_env *env) 19163 { 19164 int i; 19165 19166 if (env->log.level & BPF_LOG_STATS) { 19167 verbose(env, "verification time %lld usec\n", 19168 div_u64(env->verification_time, 1000)); 19169 verbose(env, "stack depth "); 19170 for (i = 0; i < env->subprog_cnt; i++) { 19171 u32 depth = env->subprog_info[i].stack_depth; 19172 19173 verbose(env, "%d", depth); 19174 if (i + 1 < env->subprog_cnt) 19175 verbose(env, "+"); 19176 } 19177 verbose(env, "\n"); 19178 } 19179 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d " 19180 "total_states %d peak_states %d mark_read %d\n", 19181 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, 19182 env->max_states_per_insn, env->total_states, 19183 env->peak_states, env->longest_mark_read_walk); 19184 } 19185 19186 static int check_struct_ops_btf_id(struct bpf_verifier_env *env) 19187 { 19188 const struct btf_type *t, *func_proto; 19189 const struct bpf_struct_ops *st_ops; 19190 const struct btf_member *member; 19191 struct bpf_prog *prog = env->prog; 19192 u32 btf_id, member_idx; 19193 const char *mname; 19194 19195 if (!prog->gpl_compatible) { 19196 verbose(env, "struct ops programs must have a GPL compatible license\n"); 19197 return -EINVAL; 19198 } 19199 19200 btf_id = prog->aux->attach_btf_id; 19201 st_ops = bpf_struct_ops_find(btf_id); 19202 if (!st_ops) { 19203 verbose(env, "attach_btf_id %u is not a supported struct\n", 19204 btf_id); 19205 return -ENOTSUPP; 19206 } 19207 19208 t = st_ops->type; 19209 member_idx = prog->expected_attach_type; 19210 if (member_idx >= btf_type_vlen(t)) { 19211 verbose(env, "attach to invalid member idx %u of struct %s\n", 19212 member_idx, st_ops->name); 19213 return -EINVAL; 19214 } 19215 19216 member = &btf_type_member(t)[member_idx]; 19217 mname = btf_name_by_offset(btf_vmlinux, member->name_off); 19218 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type, 19219 NULL); 19220 if (!func_proto) { 19221 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n", 19222 mname, member_idx, st_ops->name); 19223 return -EINVAL; 19224 } 19225 19226 if (st_ops->check_member) { 19227 int err = st_ops->check_member(t, member, prog); 19228 19229 if (err) { 19230 verbose(env, "attach to unsupported member %s of struct %s\n", 19231 mname, st_ops->name); 19232 return err; 19233 } 19234 } 19235 19236 prog->aux->attach_func_proto = func_proto; 19237 prog->aux->attach_func_name = mname; 19238 env->ops = st_ops->verifier_ops; 19239 19240 return 0; 19241 } 19242 #define SECURITY_PREFIX "security_" 19243 19244 static int check_attach_modify_return(unsigned long addr, const char *func_name) 19245 { 19246 if (within_error_injection_list(addr) || 19247 !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1)) 19248 return 0; 19249 19250 return -EINVAL; 19251 } 19252 19253 /* list of non-sleepable functions that are otherwise on 19254 * ALLOW_ERROR_INJECTION list 19255 */ 19256 BTF_SET_START(btf_non_sleepable_error_inject) 19257 /* Three functions below can be called from sleepable and non-sleepable context. 19258 * Assume non-sleepable from bpf safety point of view. 19259 */ 19260 BTF_ID(func, __filemap_add_folio) 19261 BTF_ID(func, should_fail_alloc_page) 19262 BTF_ID(func, should_failslab) 19263 BTF_SET_END(btf_non_sleepable_error_inject) 19264 19265 static int check_non_sleepable_error_inject(u32 btf_id) 19266 { 19267 return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id); 19268 } 19269 19270 int bpf_check_attach_target(struct bpf_verifier_log *log, 19271 const struct bpf_prog *prog, 19272 const struct bpf_prog *tgt_prog, 19273 u32 btf_id, 19274 struct bpf_attach_target_info *tgt_info) 19275 { 19276 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; 19277 const char prefix[] = "btf_trace_"; 19278 int ret = 0, subprog = -1, i; 19279 const struct btf_type *t; 19280 bool conservative = true; 19281 const char *tname; 19282 struct btf *btf; 19283 long addr = 0; 19284 struct module *mod = NULL; 19285 19286 if (!btf_id) { 19287 bpf_log(log, "Tracing programs must provide btf_id\n"); 19288 return -EINVAL; 19289 } 19290 btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf; 19291 if (!btf) { 19292 bpf_log(log, 19293 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n"); 19294 return -EINVAL; 19295 } 19296 t = btf_type_by_id(btf, btf_id); 19297 if (!t) { 19298 bpf_log(log, "attach_btf_id %u is invalid\n", btf_id); 19299 return -EINVAL; 19300 } 19301 tname = btf_name_by_offset(btf, t->name_off); 19302 if (!tname) { 19303 bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id); 19304 return -EINVAL; 19305 } 19306 if (tgt_prog) { 19307 struct bpf_prog_aux *aux = tgt_prog->aux; 19308 19309 if (bpf_prog_is_dev_bound(prog->aux) && 19310 !bpf_prog_dev_bound_match(prog, tgt_prog)) { 19311 bpf_log(log, "Target program bound device mismatch"); 19312 return -EINVAL; 19313 } 19314 19315 for (i = 0; i < aux->func_info_cnt; i++) 19316 if (aux->func_info[i].type_id == btf_id) { 19317 subprog = i; 19318 break; 19319 } 19320 if (subprog == -1) { 19321 bpf_log(log, "Subprog %s doesn't exist\n", tname); 19322 return -EINVAL; 19323 } 19324 conservative = aux->func_info_aux[subprog].unreliable; 19325 if (prog_extension) { 19326 if (conservative) { 19327 bpf_log(log, 19328 "Cannot replace static functions\n"); 19329 return -EINVAL; 19330 } 19331 if (!prog->jit_requested) { 19332 bpf_log(log, 19333 "Extension programs should be JITed\n"); 19334 return -EINVAL; 19335 } 19336 } 19337 if (!tgt_prog->jited) { 19338 bpf_log(log, "Can attach to only JITed progs\n"); 19339 return -EINVAL; 19340 } 19341 if (tgt_prog->type == prog->type) { 19342 /* Cannot fentry/fexit another fentry/fexit program. 19343 * Cannot attach program extension to another extension. 19344 * It's ok to attach fentry/fexit to extension program. 19345 */ 19346 bpf_log(log, "Cannot recursively attach\n"); 19347 return -EINVAL; 19348 } 19349 if (tgt_prog->type == BPF_PROG_TYPE_TRACING && 19350 prog_extension && 19351 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY || 19352 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) { 19353 /* Program extensions can extend all program types 19354 * except fentry/fexit. The reason is the following. 19355 * The fentry/fexit programs are used for performance 19356 * analysis, stats and can be attached to any program 19357 * type except themselves. When extension program is 19358 * replacing XDP function it is necessary to allow 19359 * performance analysis of all functions. Both original 19360 * XDP program and its program extension. Hence 19361 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is 19362 * allowed. If extending of fentry/fexit was allowed it 19363 * would be possible to create long call chain 19364 * fentry->extension->fentry->extension beyond 19365 * reasonable stack size. Hence extending fentry is not 19366 * allowed. 19367 */ 19368 bpf_log(log, "Cannot extend fentry/fexit\n"); 19369 return -EINVAL; 19370 } 19371 } else { 19372 if (prog_extension) { 19373 bpf_log(log, "Cannot replace kernel functions\n"); 19374 return -EINVAL; 19375 } 19376 } 19377 19378 switch (prog->expected_attach_type) { 19379 case BPF_TRACE_RAW_TP: 19380 if (tgt_prog) { 19381 bpf_log(log, 19382 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n"); 19383 return -EINVAL; 19384 } 19385 if (!btf_type_is_typedef(t)) { 19386 bpf_log(log, "attach_btf_id %u is not a typedef\n", 19387 btf_id); 19388 return -EINVAL; 19389 } 19390 if (strncmp(prefix, tname, sizeof(prefix) - 1)) { 19391 bpf_log(log, "attach_btf_id %u points to wrong type name %s\n", 19392 btf_id, tname); 19393 return -EINVAL; 19394 } 19395 tname += sizeof(prefix) - 1; 19396 t = btf_type_by_id(btf, t->type); 19397 if (!btf_type_is_ptr(t)) 19398 /* should never happen in valid vmlinux build */ 19399 return -EINVAL; 19400 t = btf_type_by_id(btf, t->type); 19401 if (!btf_type_is_func_proto(t)) 19402 /* should never happen in valid vmlinux build */ 19403 return -EINVAL; 19404 19405 break; 19406 case BPF_TRACE_ITER: 19407 if (!btf_type_is_func(t)) { 19408 bpf_log(log, "attach_btf_id %u is not a function\n", 19409 btf_id); 19410 return -EINVAL; 19411 } 19412 t = btf_type_by_id(btf, t->type); 19413 if (!btf_type_is_func_proto(t)) 19414 return -EINVAL; 19415 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); 19416 if (ret) 19417 return ret; 19418 break; 19419 default: 19420 if (!prog_extension) 19421 return -EINVAL; 19422 fallthrough; 19423 case BPF_MODIFY_RETURN: 19424 case BPF_LSM_MAC: 19425 case BPF_LSM_CGROUP: 19426 case BPF_TRACE_FENTRY: 19427 case BPF_TRACE_FEXIT: 19428 if (!btf_type_is_func(t)) { 19429 bpf_log(log, "attach_btf_id %u is not a function\n", 19430 btf_id); 19431 return -EINVAL; 19432 } 19433 if (prog_extension && 19434 btf_check_type_match(log, prog, btf, t)) 19435 return -EINVAL; 19436 t = btf_type_by_id(btf, t->type); 19437 if (!btf_type_is_func_proto(t)) 19438 return -EINVAL; 19439 19440 if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) && 19441 (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type || 19442 prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type)) 19443 return -EINVAL; 19444 19445 if (tgt_prog && conservative) 19446 t = NULL; 19447 19448 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); 19449 if (ret < 0) 19450 return ret; 19451 19452 if (tgt_prog) { 19453 if (subprog == 0) 19454 addr = (long) tgt_prog->bpf_func; 19455 else 19456 addr = (long) tgt_prog->aux->func[subprog]->bpf_func; 19457 } else { 19458 if (btf_is_module(btf)) { 19459 mod = btf_try_get_module(btf); 19460 if (mod) 19461 addr = find_kallsyms_symbol_value(mod, tname); 19462 else 19463 addr = 0; 19464 } else { 19465 addr = kallsyms_lookup_name(tname); 19466 } 19467 if (!addr) { 19468 module_put(mod); 19469 bpf_log(log, 19470 "The address of function %s cannot be found\n", 19471 tname); 19472 return -ENOENT; 19473 } 19474 } 19475 19476 if (prog->aux->sleepable) { 19477 ret = -EINVAL; 19478 switch (prog->type) { 19479 case BPF_PROG_TYPE_TRACING: 19480 19481 /* fentry/fexit/fmod_ret progs can be sleepable if they are 19482 * attached to ALLOW_ERROR_INJECTION and are not in denylist. 19483 */ 19484 if (!check_non_sleepable_error_inject(btf_id) && 19485 within_error_injection_list(addr)) 19486 ret = 0; 19487 /* fentry/fexit/fmod_ret progs can also be sleepable if they are 19488 * in the fmodret id set with the KF_SLEEPABLE flag. 19489 */ 19490 else { 19491 u32 *flags = btf_kfunc_is_modify_return(btf, btf_id, 19492 prog); 19493 19494 if (flags && (*flags & KF_SLEEPABLE)) 19495 ret = 0; 19496 } 19497 break; 19498 case BPF_PROG_TYPE_LSM: 19499 /* LSM progs check that they are attached to bpf_lsm_*() funcs. 19500 * Only some of them are sleepable. 19501 */ 19502 if (bpf_lsm_is_sleepable_hook(btf_id)) 19503 ret = 0; 19504 break; 19505 default: 19506 break; 19507 } 19508 if (ret) { 19509 module_put(mod); 19510 bpf_log(log, "%s is not sleepable\n", tname); 19511 return ret; 19512 } 19513 } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) { 19514 if (tgt_prog) { 19515 module_put(mod); 19516 bpf_log(log, "can't modify return codes of BPF programs\n"); 19517 return -EINVAL; 19518 } 19519 ret = -EINVAL; 19520 if (btf_kfunc_is_modify_return(btf, btf_id, prog) || 19521 !check_attach_modify_return(addr, tname)) 19522 ret = 0; 19523 if (ret) { 19524 module_put(mod); 19525 bpf_log(log, "%s() is not modifiable\n", tname); 19526 return ret; 19527 } 19528 } 19529 19530 break; 19531 } 19532 tgt_info->tgt_addr = addr; 19533 tgt_info->tgt_name = tname; 19534 tgt_info->tgt_type = t; 19535 tgt_info->tgt_mod = mod; 19536 return 0; 19537 } 19538 19539 BTF_SET_START(btf_id_deny) 19540 BTF_ID_UNUSED 19541 #ifdef CONFIG_SMP 19542 BTF_ID(func, migrate_disable) 19543 BTF_ID(func, migrate_enable) 19544 #endif 19545 #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU 19546 BTF_ID(func, rcu_read_unlock_strict) 19547 #endif 19548 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE) 19549 BTF_ID(func, preempt_count_add) 19550 BTF_ID(func, preempt_count_sub) 19551 #endif 19552 #ifdef CONFIG_PREEMPT_RCU 19553 BTF_ID(func, __rcu_read_lock) 19554 BTF_ID(func, __rcu_read_unlock) 19555 #endif 19556 BTF_SET_END(btf_id_deny) 19557 19558 static bool can_be_sleepable(struct bpf_prog *prog) 19559 { 19560 if (prog->type == BPF_PROG_TYPE_TRACING) { 19561 switch (prog->expected_attach_type) { 19562 case BPF_TRACE_FENTRY: 19563 case BPF_TRACE_FEXIT: 19564 case BPF_MODIFY_RETURN: 19565 case BPF_TRACE_ITER: 19566 return true; 19567 default: 19568 return false; 19569 } 19570 } 19571 return prog->type == BPF_PROG_TYPE_LSM || 19572 prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ || 19573 prog->type == BPF_PROG_TYPE_STRUCT_OPS; 19574 } 19575 19576 static int check_attach_btf_id(struct bpf_verifier_env *env) 19577 { 19578 struct bpf_prog *prog = env->prog; 19579 struct bpf_prog *tgt_prog = prog->aux->dst_prog; 19580 struct bpf_attach_target_info tgt_info = {}; 19581 u32 btf_id = prog->aux->attach_btf_id; 19582 struct bpf_trampoline *tr; 19583 int ret; 19584 u64 key; 19585 19586 if (prog->type == BPF_PROG_TYPE_SYSCALL) { 19587 if (prog->aux->sleepable) 19588 /* attach_btf_id checked to be zero already */ 19589 return 0; 19590 verbose(env, "Syscall programs can only be sleepable\n"); 19591 return -EINVAL; 19592 } 19593 19594 if (prog->aux->sleepable && !can_be_sleepable(prog)) { 19595 verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n"); 19596 return -EINVAL; 19597 } 19598 19599 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) 19600 return check_struct_ops_btf_id(env); 19601 19602 if (prog->type != BPF_PROG_TYPE_TRACING && 19603 prog->type != BPF_PROG_TYPE_LSM && 19604 prog->type != BPF_PROG_TYPE_EXT) 19605 return 0; 19606 19607 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info); 19608 if (ret) 19609 return ret; 19610 19611 if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) { 19612 /* to make freplace equivalent to their targets, they need to 19613 * inherit env->ops and expected_attach_type for the rest of the 19614 * verification 19615 */ 19616 env->ops = bpf_verifier_ops[tgt_prog->type]; 19617 prog->expected_attach_type = tgt_prog->expected_attach_type; 19618 } 19619 19620 /* store info about the attachment target that will be used later */ 19621 prog->aux->attach_func_proto = tgt_info.tgt_type; 19622 prog->aux->attach_func_name = tgt_info.tgt_name; 19623 prog->aux->mod = tgt_info.tgt_mod; 19624 19625 if (tgt_prog) { 19626 prog->aux->saved_dst_prog_type = tgt_prog->type; 19627 prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type; 19628 } 19629 19630 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) { 19631 prog->aux->attach_btf_trace = true; 19632 return 0; 19633 } else if (prog->expected_attach_type == BPF_TRACE_ITER) { 19634 if (!bpf_iter_prog_supported(prog)) 19635 return -EINVAL; 19636 return 0; 19637 } 19638 19639 if (prog->type == BPF_PROG_TYPE_LSM) { 19640 ret = bpf_lsm_verify_prog(&env->log, prog); 19641 if (ret < 0) 19642 return ret; 19643 } else if (prog->type == BPF_PROG_TYPE_TRACING && 19644 btf_id_set_contains(&btf_id_deny, btf_id)) { 19645 return -EINVAL; 19646 } 19647 19648 key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id); 19649 tr = bpf_trampoline_get(key, &tgt_info); 19650 if (!tr) 19651 return -ENOMEM; 19652 19653 if (tgt_prog && tgt_prog->aux->tail_call_reachable) 19654 tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX; 19655 19656 prog->aux->dst_trampoline = tr; 19657 return 0; 19658 } 19659 19660 struct btf *bpf_get_btf_vmlinux(void) 19661 { 19662 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { 19663 mutex_lock(&bpf_verifier_lock); 19664 if (!btf_vmlinux) 19665 btf_vmlinux = btf_parse_vmlinux(); 19666 mutex_unlock(&bpf_verifier_lock); 19667 } 19668 return btf_vmlinux; 19669 } 19670 19671 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size) 19672 { 19673 u64 start_time = ktime_get_ns(); 19674 struct bpf_verifier_env *env; 19675 int i, len, ret = -EINVAL, err; 19676 u32 log_true_size; 19677 bool is_priv; 19678 19679 /* no program is valid */ 19680 if (ARRAY_SIZE(bpf_verifier_ops) == 0) 19681 return -EINVAL; 19682 19683 /* 'struct bpf_verifier_env' can be global, but since it's not small, 19684 * allocate/free it every time bpf_check() is called 19685 */ 19686 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 19687 if (!env) 19688 return -ENOMEM; 19689 19690 env->bt.env = env; 19691 19692 len = (*prog)->len; 19693 env->insn_aux_data = 19694 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len)); 19695 ret = -ENOMEM; 19696 if (!env->insn_aux_data) 19697 goto err_free_env; 19698 for (i = 0; i < len; i++) 19699 env->insn_aux_data[i].orig_idx = i; 19700 env->prog = *prog; 19701 env->ops = bpf_verifier_ops[env->prog->type]; 19702 env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel); 19703 is_priv = bpf_capable(); 19704 19705 bpf_get_btf_vmlinux(); 19706 19707 /* grab the mutex to protect few globals used by verifier */ 19708 if (!is_priv) 19709 mutex_lock(&bpf_verifier_lock); 19710 19711 /* user could have requested verbose verifier output 19712 * and supplied buffer to store the verification trace 19713 */ 19714 ret = bpf_vlog_init(&env->log, attr->log_level, 19715 (char __user *) (unsigned long) attr->log_buf, 19716 attr->log_size); 19717 if (ret) 19718 goto err_unlock; 19719 19720 mark_verifier_state_clean(env); 19721 19722 if (IS_ERR(btf_vmlinux)) { 19723 /* Either gcc or pahole or kernel are broken. */ 19724 verbose(env, "in-kernel BTF is malformed\n"); 19725 ret = PTR_ERR(btf_vmlinux); 19726 goto skip_full_check; 19727 } 19728 19729 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); 19730 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 19731 env->strict_alignment = true; 19732 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) 19733 env->strict_alignment = false; 19734 19735 env->allow_ptr_leaks = bpf_allow_ptr_leaks(); 19736 env->allow_uninit_stack = bpf_allow_uninit_stack(); 19737 env->bypass_spec_v1 = bpf_bypass_spec_v1(); 19738 env->bypass_spec_v4 = bpf_bypass_spec_v4(); 19739 env->bpf_capable = bpf_capable(); 19740 19741 if (is_priv) 19742 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; 19743 19744 env->explored_states = kvcalloc(state_htab_size(env), 19745 sizeof(struct bpf_verifier_state_list *), 19746 GFP_USER); 19747 ret = -ENOMEM; 19748 if (!env->explored_states) 19749 goto skip_full_check; 19750 19751 ret = add_subprog_and_kfunc(env); 19752 if (ret < 0) 19753 goto skip_full_check; 19754 19755 ret = check_subprogs(env); 19756 if (ret < 0) 19757 goto skip_full_check; 19758 19759 ret = check_btf_info(env, attr, uattr); 19760 if (ret < 0) 19761 goto skip_full_check; 19762 19763 ret = check_attach_btf_id(env); 19764 if (ret) 19765 goto skip_full_check; 19766 19767 ret = resolve_pseudo_ldimm64(env); 19768 if (ret < 0) 19769 goto skip_full_check; 19770 19771 if (bpf_prog_is_offloaded(env->prog->aux)) { 19772 ret = bpf_prog_offload_verifier_prep(env->prog); 19773 if (ret) 19774 goto skip_full_check; 19775 } 19776 19777 ret = check_cfg(env); 19778 if (ret < 0) 19779 goto skip_full_check; 19780 19781 ret = do_check_subprogs(env); 19782 ret = ret ?: do_check_main(env); 19783 19784 if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux)) 19785 ret = bpf_prog_offload_finalize(env); 19786 19787 skip_full_check: 19788 kvfree(env->explored_states); 19789 19790 if (ret == 0) 19791 ret = check_max_stack_depth(env); 19792 19793 /* instruction rewrites happen after this point */ 19794 if (ret == 0) 19795 ret = optimize_bpf_loop(env); 19796 19797 if (is_priv) { 19798 if (ret == 0) 19799 opt_hard_wire_dead_code_branches(env); 19800 if (ret == 0) 19801 ret = opt_remove_dead_code(env); 19802 if (ret == 0) 19803 ret = opt_remove_nops(env); 19804 } else { 19805 if (ret == 0) 19806 sanitize_dead_code(env); 19807 } 19808 19809 if (ret == 0) 19810 /* program is valid, convert *(u32*)(ctx + off) accesses */ 19811 ret = convert_ctx_accesses(env); 19812 19813 if (ret == 0) 19814 ret = do_misc_fixups(env); 19815 19816 /* do 32-bit optimization after insn patching has done so those patched 19817 * insns could be handled correctly. 19818 */ 19819 if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) { 19820 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr); 19821 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret 19822 : false; 19823 } 19824 19825 if (ret == 0) 19826 ret = fixup_call_args(env); 19827 19828 env->verification_time = ktime_get_ns() - start_time; 19829 print_verification_stats(env); 19830 env->prog->aux->verified_insns = env->insn_processed; 19831 19832 /* preserve original error even if log finalization is successful */ 19833 err = bpf_vlog_finalize(&env->log, &log_true_size); 19834 if (err) 19835 ret = err; 19836 19837 if (uattr_size >= offsetofend(union bpf_attr, log_true_size) && 19838 copy_to_bpfptr_offset(uattr, offsetof(union bpf_attr, log_true_size), 19839 &log_true_size, sizeof(log_true_size))) { 19840 ret = -EFAULT; 19841 goto err_release_maps; 19842 } 19843 19844 if (ret) 19845 goto err_release_maps; 19846 19847 if (env->used_map_cnt) { 19848 /* if program passed verifier, update used_maps in bpf_prog_info */ 19849 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 19850 sizeof(env->used_maps[0]), 19851 GFP_KERNEL); 19852 19853 if (!env->prog->aux->used_maps) { 19854 ret = -ENOMEM; 19855 goto err_release_maps; 19856 } 19857 19858 memcpy(env->prog->aux->used_maps, env->used_maps, 19859 sizeof(env->used_maps[0]) * env->used_map_cnt); 19860 env->prog->aux->used_map_cnt = env->used_map_cnt; 19861 } 19862 if (env->used_btf_cnt) { 19863 /* if program passed verifier, update used_btfs in bpf_prog_aux */ 19864 env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt, 19865 sizeof(env->used_btfs[0]), 19866 GFP_KERNEL); 19867 if (!env->prog->aux->used_btfs) { 19868 ret = -ENOMEM; 19869 goto err_release_maps; 19870 } 19871 19872 memcpy(env->prog->aux->used_btfs, env->used_btfs, 19873 sizeof(env->used_btfs[0]) * env->used_btf_cnt); 19874 env->prog->aux->used_btf_cnt = env->used_btf_cnt; 19875 } 19876 if (env->used_map_cnt || env->used_btf_cnt) { 19877 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 19878 * bpf_ld_imm64 instructions 19879 */ 19880 convert_pseudo_ld_imm64(env); 19881 } 19882 19883 adjust_btf_func(env); 19884 19885 err_release_maps: 19886 if (!env->prog->aux->used_maps) 19887 /* if we didn't copy map pointers into bpf_prog_info, release 19888 * them now. Otherwise free_used_maps() will release them. 19889 */ 19890 release_maps(env); 19891 if (!env->prog->aux->used_btfs) 19892 release_btfs(env); 19893 19894 /* extension progs temporarily inherit the attach_type of their targets 19895 for verification purposes, so set it back to zero before returning 19896 */ 19897 if (env->prog->type == BPF_PROG_TYPE_EXT) 19898 env->prog->expected_attach_type = 0; 19899 19900 *prog = env->prog; 19901 err_unlock: 19902 if (!is_priv) 19903 mutex_unlock(&bpf_verifier_lock); 19904 vfree(env->insn_aux_data); 19905 err_free_env: 19906 kfree(env); 19907 return ret; 19908 } 19909