1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io 5 */ 6 #include <uapi/linux/btf.h> 7 #include <linux/bpf-cgroup.h> 8 #include <linux/kernel.h> 9 #include <linux/types.h> 10 #include <linux/slab.h> 11 #include <linux/bpf.h> 12 #include <linux/btf.h> 13 #include <linux/bpf_verifier.h> 14 #include <linux/filter.h> 15 #include <net/netlink.h> 16 #include <linux/file.h> 17 #include <linux/vmalloc.h> 18 #include <linux/stringify.h> 19 #include <linux/bsearch.h> 20 #include <linux/sort.h> 21 #include <linux/perf_event.h> 22 #include <linux/ctype.h> 23 #include <linux/error-injection.h> 24 #include <linux/bpf_lsm.h> 25 #include <linux/btf_ids.h> 26 #include <linux/poison.h> 27 28 #include "disasm.h" 29 30 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { 31 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 32 [_id] = & _name ## _verifier_ops, 33 #define BPF_MAP_TYPE(_id, _ops) 34 #define BPF_LINK_TYPE(_id, _name) 35 #include <linux/bpf_types.h> 36 #undef BPF_PROG_TYPE 37 #undef BPF_MAP_TYPE 38 #undef BPF_LINK_TYPE 39 }; 40 41 /* bpf_check() is a static code analyzer that walks eBPF program 42 * instruction by instruction and updates register/stack state. 43 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 44 * 45 * The first pass is depth-first-search to check that the program is a DAG. 46 * It rejects the following programs: 47 * - larger than BPF_MAXINSNS insns 48 * - if loop is present (detected via back-edge) 49 * - unreachable insns exist (shouldn't be a forest. program = one function) 50 * - out of bounds or malformed jumps 51 * The second pass is all possible path descent from the 1st insn. 52 * Since it's analyzing all paths through the program, the length of the 53 * analysis is limited to 64k insn, which may be hit even if total number of 54 * insn is less then 4K, but there are too many branches that change stack/regs. 55 * Number of 'branches to be analyzed' is limited to 1k 56 * 57 * On entry to each instruction, each register has a type, and the instruction 58 * changes the types of the registers depending on instruction semantics. 59 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 60 * copied to R1. 61 * 62 * All registers are 64-bit. 63 * R0 - return register 64 * R1-R5 argument passing registers 65 * R6-R9 callee saved registers 66 * R10 - frame pointer read-only 67 * 68 * At the start of BPF program the register R1 contains a pointer to bpf_context 69 * and has type PTR_TO_CTX. 70 * 71 * Verifier tracks arithmetic operations on pointers in case: 72 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 73 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 74 * 1st insn copies R10 (which has FRAME_PTR) type into R1 75 * and 2nd arithmetic instruction is pattern matched to recognize 76 * that it wants to construct a pointer to some element within stack. 77 * So after 2nd insn, the register R1 has type PTR_TO_STACK 78 * (and -20 constant is saved for further stack bounds checking). 79 * Meaning that this reg is a pointer to stack plus known immediate constant. 80 * 81 * Most of the time the registers have SCALAR_VALUE type, which 82 * means the register has some value, but it's not a valid pointer. 83 * (like pointer plus pointer becomes SCALAR_VALUE type) 84 * 85 * When verifier sees load or store instructions the type of base register 86 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are 87 * four pointer types recognized by check_mem_access() function. 88 * 89 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 90 * and the range of [ptr, ptr + map's value_size) is accessible. 91 * 92 * registers used to pass values to function calls are checked against 93 * function argument constraints. 94 * 95 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 96 * It means that the register type passed to this function must be 97 * PTR_TO_STACK and it will be used inside the function as 98 * 'pointer to map element key' 99 * 100 * For example the argument constraints for bpf_map_lookup_elem(): 101 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 102 * .arg1_type = ARG_CONST_MAP_PTR, 103 * .arg2_type = ARG_PTR_TO_MAP_KEY, 104 * 105 * ret_type says that this function returns 'pointer to map elem value or null' 106 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 107 * 2nd argument should be a pointer to stack, which will be used inside 108 * the helper function as a pointer to map element key. 109 * 110 * On the kernel side the helper function looks like: 111 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 112 * { 113 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 114 * void *key = (void *) (unsigned long) r2; 115 * void *value; 116 * 117 * here kernel can access 'key' and 'map' pointers safely, knowing that 118 * [key, key + map->key_size) bytes are valid and were initialized on 119 * the stack of eBPF program. 120 * } 121 * 122 * Corresponding eBPF program may look like: 123 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 124 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 125 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 126 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 127 * here verifier looks at prototype of map_lookup_elem() and sees: 128 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 129 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 130 * 131 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 132 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 133 * and were initialized prior to this call. 134 * If it's ok, then verifier allows this BPF_CALL insn and looks at 135 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 136 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 137 * returns either pointer to map value or NULL. 138 * 139 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 140 * insn, the register holding that pointer in the true branch changes state to 141 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 142 * branch. See check_cond_jmp_op(). 143 * 144 * After the call R0 is set to return type of the function and registers R1-R5 145 * are set to NOT_INIT to indicate that they are no longer readable. 146 * 147 * The following reference types represent a potential reference to a kernel 148 * resource which, after first being allocated, must be checked and freed by 149 * the BPF program: 150 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET 151 * 152 * When the verifier sees a helper call return a reference type, it allocates a 153 * pointer id for the reference and stores it in the current function state. 154 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into 155 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type 156 * passes through a NULL-check conditional. For the branch wherein the state is 157 * changed to CONST_IMM, the verifier releases the reference. 158 * 159 * For each helper function that allocates a reference, such as 160 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as 161 * bpf_sk_release(). When a reference type passes into the release function, 162 * the verifier also releases the reference. If any unchecked or unreleased 163 * reference remains at the end of the program, the verifier rejects it. 164 */ 165 166 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 167 struct bpf_verifier_stack_elem { 168 /* verifer state is 'st' 169 * before processing instruction 'insn_idx' 170 * and after processing instruction 'prev_insn_idx' 171 */ 172 struct bpf_verifier_state st; 173 int insn_idx; 174 int prev_insn_idx; 175 struct bpf_verifier_stack_elem *next; 176 /* length of verifier log at the time this state was pushed on stack */ 177 u32 log_pos; 178 }; 179 180 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192 181 #define BPF_COMPLEXITY_LIMIT_STATES 64 182 183 #define BPF_MAP_KEY_POISON (1ULL << 63) 184 #define BPF_MAP_KEY_SEEN (1ULL << 62) 185 186 #define BPF_MAP_PTR_UNPRIV 1UL 187 #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ 188 POISON_POINTER_DELTA)) 189 #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) 190 191 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx); 192 static int release_reference(struct bpf_verifier_env *env, int ref_obj_id); 193 194 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) 195 { 196 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON; 197 } 198 199 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) 200 { 201 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV; 202 } 203 204 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, 205 const struct bpf_map *map, bool unpriv) 206 { 207 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); 208 unpriv |= bpf_map_ptr_unpriv(aux); 209 aux->map_ptr_state = (unsigned long)map | 210 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); 211 } 212 213 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux) 214 { 215 return aux->map_key_state & BPF_MAP_KEY_POISON; 216 } 217 218 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux) 219 { 220 return !(aux->map_key_state & BPF_MAP_KEY_SEEN); 221 } 222 223 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux) 224 { 225 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON); 226 } 227 228 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state) 229 { 230 bool poisoned = bpf_map_key_poisoned(aux); 231 232 aux->map_key_state = state | BPF_MAP_KEY_SEEN | 233 (poisoned ? BPF_MAP_KEY_POISON : 0ULL); 234 } 235 236 static bool bpf_pseudo_call(const struct bpf_insn *insn) 237 { 238 return insn->code == (BPF_JMP | BPF_CALL) && 239 insn->src_reg == BPF_PSEUDO_CALL; 240 } 241 242 static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn) 243 { 244 return insn->code == (BPF_JMP | BPF_CALL) && 245 insn->src_reg == BPF_PSEUDO_KFUNC_CALL; 246 } 247 248 struct bpf_call_arg_meta { 249 struct bpf_map *map_ptr; 250 bool raw_mode; 251 bool pkt_access; 252 u8 release_regno; 253 int regno; 254 int access_size; 255 int mem_size; 256 u64 msize_max_value; 257 int ref_obj_id; 258 int map_uid; 259 int func_id; 260 struct btf *btf; 261 u32 btf_id; 262 struct btf *ret_btf; 263 u32 ret_btf_id; 264 u32 subprogno; 265 struct bpf_map_value_off_desc *kptr_off_desc; 266 u8 uninit_dynptr_regno; 267 }; 268 269 struct btf *btf_vmlinux; 270 271 static DEFINE_MUTEX(bpf_verifier_lock); 272 273 static const struct bpf_line_info * 274 find_linfo(const struct bpf_verifier_env *env, u32 insn_off) 275 { 276 const struct bpf_line_info *linfo; 277 const struct bpf_prog *prog; 278 u32 i, nr_linfo; 279 280 prog = env->prog; 281 nr_linfo = prog->aux->nr_linfo; 282 283 if (!nr_linfo || insn_off >= prog->len) 284 return NULL; 285 286 linfo = prog->aux->linfo; 287 for (i = 1; i < nr_linfo; i++) 288 if (insn_off < linfo[i].insn_off) 289 break; 290 291 return &linfo[i - 1]; 292 } 293 294 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, 295 va_list args) 296 { 297 unsigned int n; 298 299 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); 300 301 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, 302 "verifier log line truncated - local buffer too short\n"); 303 304 if (log->level == BPF_LOG_KERNEL) { 305 bool newline = n > 0 && log->kbuf[n - 1] == '\n'; 306 307 pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n"); 308 return; 309 } 310 311 n = min(log->len_total - log->len_used - 1, n); 312 log->kbuf[n] = '\0'; 313 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) 314 log->len_used += n; 315 else 316 log->ubuf = NULL; 317 } 318 319 static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos) 320 { 321 char zero = 0; 322 323 if (!bpf_verifier_log_needed(log)) 324 return; 325 326 log->len_used = new_pos; 327 if (put_user(zero, log->ubuf + new_pos)) 328 log->ubuf = NULL; 329 } 330 331 /* log_level controls verbosity level of eBPF verifier. 332 * bpf_verifier_log_write() is used to dump the verification trace to the log, 333 * so the user can figure out what's wrong with the program 334 */ 335 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, 336 const char *fmt, ...) 337 { 338 va_list args; 339 340 if (!bpf_verifier_log_needed(&env->log)) 341 return; 342 343 va_start(args, fmt); 344 bpf_verifier_vlog(&env->log, fmt, args); 345 va_end(args); 346 } 347 EXPORT_SYMBOL_GPL(bpf_verifier_log_write); 348 349 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) 350 { 351 struct bpf_verifier_env *env = private_data; 352 va_list args; 353 354 if (!bpf_verifier_log_needed(&env->log)) 355 return; 356 357 va_start(args, fmt); 358 bpf_verifier_vlog(&env->log, fmt, args); 359 va_end(args); 360 } 361 362 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log, 363 const char *fmt, ...) 364 { 365 va_list args; 366 367 if (!bpf_verifier_log_needed(log)) 368 return; 369 370 va_start(args, fmt); 371 bpf_verifier_vlog(log, fmt, args); 372 va_end(args); 373 } 374 EXPORT_SYMBOL_GPL(bpf_log); 375 376 static const char *ltrim(const char *s) 377 { 378 while (isspace(*s)) 379 s++; 380 381 return s; 382 } 383 384 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env, 385 u32 insn_off, 386 const char *prefix_fmt, ...) 387 { 388 const struct bpf_line_info *linfo; 389 390 if (!bpf_verifier_log_needed(&env->log)) 391 return; 392 393 linfo = find_linfo(env, insn_off); 394 if (!linfo || linfo == env->prev_linfo) 395 return; 396 397 if (prefix_fmt) { 398 va_list args; 399 400 va_start(args, prefix_fmt); 401 bpf_verifier_vlog(&env->log, prefix_fmt, args); 402 va_end(args); 403 } 404 405 verbose(env, "%s\n", 406 ltrim(btf_name_by_offset(env->prog->aux->btf, 407 linfo->line_off))); 408 409 env->prev_linfo = linfo; 410 } 411 412 static void verbose_invalid_scalar(struct bpf_verifier_env *env, 413 struct bpf_reg_state *reg, 414 struct tnum *range, const char *ctx, 415 const char *reg_name) 416 { 417 char tn_buf[48]; 418 419 verbose(env, "At %s the register %s ", ctx, reg_name); 420 if (!tnum_is_unknown(reg->var_off)) { 421 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 422 verbose(env, "has value %s", tn_buf); 423 } else { 424 verbose(env, "has unknown scalar value"); 425 } 426 tnum_strn(tn_buf, sizeof(tn_buf), *range); 427 verbose(env, " should have been in %s\n", tn_buf); 428 } 429 430 static bool type_is_pkt_pointer(enum bpf_reg_type type) 431 { 432 type = base_type(type); 433 return type == PTR_TO_PACKET || 434 type == PTR_TO_PACKET_META; 435 } 436 437 static bool type_is_sk_pointer(enum bpf_reg_type type) 438 { 439 return type == PTR_TO_SOCKET || 440 type == PTR_TO_SOCK_COMMON || 441 type == PTR_TO_TCP_SOCK || 442 type == PTR_TO_XDP_SOCK; 443 } 444 445 static bool reg_type_not_null(enum bpf_reg_type type) 446 { 447 return type == PTR_TO_SOCKET || 448 type == PTR_TO_TCP_SOCK || 449 type == PTR_TO_MAP_VALUE || 450 type == PTR_TO_MAP_KEY || 451 type == PTR_TO_SOCK_COMMON; 452 } 453 454 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) 455 { 456 return reg->type == PTR_TO_MAP_VALUE && 457 map_value_has_spin_lock(reg->map_ptr); 458 } 459 460 static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type) 461 { 462 type = base_type(type); 463 return type == PTR_TO_SOCKET || type == PTR_TO_TCP_SOCK || 464 type == PTR_TO_MEM || type == PTR_TO_BTF_ID; 465 } 466 467 static bool type_is_rdonly_mem(u32 type) 468 { 469 return type & MEM_RDONLY; 470 } 471 472 static bool type_may_be_null(u32 type) 473 { 474 return type & PTR_MAYBE_NULL; 475 } 476 477 static bool is_acquire_function(enum bpf_func_id func_id, 478 const struct bpf_map *map) 479 { 480 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC; 481 482 if (func_id == BPF_FUNC_sk_lookup_tcp || 483 func_id == BPF_FUNC_sk_lookup_udp || 484 func_id == BPF_FUNC_skc_lookup_tcp || 485 func_id == BPF_FUNC_ringbuf_reserve || 486 func_id == BPF_FUNC_kptr_xchg) 487 return true; 488 489 if (func_id == BPF_FUNC_map_lookup_elem && 490 (map_type == BPF_MAP_TYPE_SOCKMAP || 491 map_type == BPF_MAP_TYPE_SOCKHASH)) 492 return true; 493 494 return false; 495 } 496 497 static bool is_ptr_cast_function(enum bpf_func_id func_id) 498 { 499 return func_id == BPF_FUNC_tcp_sock || 500 func_id == BPF_FUNC_sk_fullsock || 501 func_id == BPF_FUNC_skc_to_tcp_sock || 502 func_id == BPF_FUNC_skc_to_tcp6_sock || 503 func_id == BPF_FUNC_skc_to_udp6_sock || 504 func_id == BPF_FUNC_skc_to_mptcp_sock || 505 func_id == BPF_FUNC_skc_to_tcp_timewait_sock || 506 func_id == BPF_FUNC_skc_to_tcp_request_sock; 507 } 508 509 static bool is_dynptr_ref_function(enum bpf_func_id func_id) 510 { 511 return func_id == BPF_FUNC_dynptr_data; 512 } 513 514 static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id, 515 const struct bpf_map *map) 516 { 517 int ref_obj_uses = 0; 518 519 if (is_ptr_cast_function(func_id)) 520 ref_obj_uses++; 521 if (is_acquire_function(func_id, map)) 522 ref_obj_uses++; 523 if (is_dynptr_ref_function(func_id)) 524 ref_obj_uses++; 525 526 return ref_obj_uses > 1; 527 } 528 529 static bool is_cmpxchg_insn(const struct bpf_insn *insn) 530 { 531 return BPF_CLASS(insn->code) == BPF_STX && 532 BPF_MODE(insn->code) == BPF_ATOMIC && 533 insn->imm == BPF_CMPXCHG; 534 } 535 536 /* string representation of 'enum bpf_reg_type' 537 * 538 * Note that reg_type_str() can not appear more than once in a single verbose() 539 * statement. 540 */ 541 static const char *reg_type_str(struct bpf_verifier_env *env, 542 enum bpf_reg_type type) 543 { 544 char postfix[16] = {0}, prefix[32] = {0}; 545 static const char * const str[] = { 546 [NOT_INIT] = "?", 547 [SCALAR_VALUE] = "scalar", 548 [PTR_TO_CTX] = "ctx", 549 [CONST_PTR_TO_MAP] = "map_ptr", 550 [PTR_TO_MAP_VALUE] = "map_value", 551 [PTR_TO_STACK] = "fp", 552 [PTR_TO_PACKET] = "pkt", 553 [PTR_TO_PACKET_META] = "pkt_meta", 554 [PTR_TO_PACKET_END] = "pkt_end", 555 [PTR_TO_FLOW_KEYS] = "flow_keys", 556 [PTR_TO_SOCKET] = "sock", 557 [PTR_TO_SOCK_COMMON] = "sock_common", 558 [PTR_TO_TCP_SOCK] = "tcp_sock", 559 [PTR_TO_TP_BUFFER] = "tp_buffer", 560 [PTR_TO_XDP_SOCK] = "xdp_sock", 561 [PTR_TO_BTF_ID] = "ptr_", 562 [PTR_TO_MEM] = "mem", 563 [PTR_TO_BUF] = "buf", 564 [PTR_TO_FUNC] = "func", 565 [PTR_TO_MAP_KEY] = "map_key", 566 [PTR_TO_DYNPTR] = "dynptr_ptr", 567 }; 568 569 if (type & PTR_MAYBE_NULL) { 570 if (base_type(type) == PTR_TO_BTF_ID) 571 strncpy(postfix, "or_null_", 16); 572 else 573 strncpy(postfix, "_or_null", 16); 574 } 575 576 if (type & MEM_RDONLY) 577 strncpy(prefix, "rdonly_", 32); 578 if (type & MEM_ALLOC) 579 strncpy(prefix, "alloc_", 32); 580 if (type & MEM_USER) 581 strncpy(prefix, "user_", 32); 582 if (type & MEM_PERCPU) 583 strncpy(prefix, "percpu_", 32); 584 if (type & PTR_UNTRUSTED) 585 strncpy(prefix, "untrusted_", 32); 586 587 snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s", 588 prefix, str[base_type(type)], postfix); 589 return env->type_str_buf; 590 } 591 592 static char slot_type_char[] = { 593 [STACK_INVALID] = '?', 594 [STACK_SPILL] = 'r', 595 [STACK_MISC] = 'm', 596 [STACK_ZERO] = '0', 597 [STACK_DYNPTR] = 'd', 598 }; 599 600 static void print_liveness(struct bpf_verifier_env *env, 601 enum bpf_reg_liveness live) 602 { 603 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE)) 604 verbose(env, "_"); 605 if (live & REG_LIVE_READ) 606 verbose(env, "r"); 607 if (live & REG_LIVE_WRITTEN) 608 verbose(env, "w"); 609 if (live & REG_LIVE_DONE) 610 verbose(env, "D"); 611 } 612 613 static int get_spi(s32 off) 614 { 615 return (-off - 1) / BPF_REG_SIZE; 616 } 617 618 static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots) 619 { 620 int allocated_slots = state->allocated_stack / BPF_REG_SIZE; 621 622 /* We need to check that slots between [spi - nr_slots + 1, spi] are 623 * within [0, allocated_stack). 624 * 625 * Please note that the spi grows downwards. For example, a dynptr 626 * takes the size of two stack slots; the first slot will be at 627 * spi and the second slot will be at spi - 1. 628 */ 629 return spi - nr_slots + 1 >= 0 && spi < allocated_slots; 630 } 631 632 static struct bpf_func_state *func(struct bpf_verifier_env *env, 633 const struct bpf_reg_state *reg) 634 { 635 struct bpf_verifier_state *cur = env->cur_state; 636 637 return cur->frame[reg->frameno]; 638 } 639 640 static const char *kernel_type_name(const struct btf* btf, u32 id) 641 { 642 return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); 643 } 644 645 static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno) 646 { 647 env->scratched_regs |= 1U << regno; 648 } 649 650 static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi) 651 { 652 env->scratched_stack_slots |= 1ULL << spi; 653 } 654 655 static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno) 656 { 657 return (env->scratched_regs >> regno) & 1; 658 } 659 660 static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno) 661 { 662 return (env->scratched_stack_slots >> regno) & 1; 663 } 664 665 static bool verifier_state_scratched(const struct bpf_verifier_env *env) 666 { 667 return env->scratched_regs || env->scratched_stack_slots; 668 } 669 670 static void mark_verifier_state_clean(struct bpf_verifier_env *env) 671 { 672 env->scratched_regs = 0U; 673 env->scratched_stack_slots = 0ULL; 674 } 675 676 /* Used for printing the entire verifier state. */ 677 static void mark_verifier_state_scratched(struct bpf_verifier_env *env) 678 { 679 env->scratched_regs = ~0U; 680 env->scratched_stack_slots = ~0ULL; 681 } 682 683 static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type) 684 { 685 switch (arg_type & DYNPTR_TYPE_FLAG_MASK) { 686 case DYNPTR_TYPE_LOCAL: 687 return BPF_DYNPTR_TYPE_LOCAL; 688 case DYNPTR_TYPE_RINGBUF: 689 return BPF_DYNPTR_TYPE_RINGBUF; 690 default: 691 return BPF_DYNPTR_TYPE_INVALID; 692 } 693 } 694 695 static bool dynptr_type_refcounted(enum bpf_dynptr_type type) 696 { 697 return type == BPF_DYNPTR_TYPE_RINGBUF; 698 } 699 700 static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 701 enum bpf_arg_type arg_type, int insn_idx) 702 { 703 struct bpf_func_state *state = func(env, reg); 704 enum bpf_dynptr_type type; 705 int spi, i, id; 706 707 spi = get_spi(reg->off); 708 709 if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) 710 return -EINVAL; 711 712 for (i = 0; i < BPF_REG_SIZE; i++) { 713 state->stack[spi].slot_type[i] = STACK_DYNPTR; 714 state->stack[spi - 1].slot_type[i] = STACK_DYNPTR; 715 } 716 717 type = arg_to_dynptr_type(arg_type); 718 if (type == BPF_DYNPTR_TYPE_INVALID) 719 return -EINVAL; 720 721 state->stack[spi].spilled_ptr.dynptr.first_slot = true; 722 state->stack[spi].spilled_ptr.dynptr.type = type; 723 state->stack[spi - 1].spilled_ptr.dynptr.type = type; 724 725 if (dynptr_type_refcounted(type)) { 726 /* The id is used to track proper releasing */ 727 id = acquire_reference_state(env, insn_idx); 728 if (id < 0) 729 return id; 730 731 state->stack[spi].spilled_ptr.id = id; 732 state->stack[spi - 1].spilled_ptr.id = id; 733 } 734 735 return 0; 736 } 737 738 static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 739 { 740 struct bpf_func_state *state = func(env, reg); 741 int spi, i; 742 743 spi = get_spi(reg->off); 744 745 if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) 746 return -EINVAL; 747 748 for (i = 0; i < BPF_REG_SIZE; i++) { 749 state->stack[spi].slot_type[i] = STACK_INVALID; 750 state->stack[spi - 1].slot_type[i] = STACK_INVALID; 751 } 752 753 /* Invalidate any slices associated with this dynptr */ 754 if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { 755 release_reference(env, state->stack[spi].spilled_ptr.id); 756 state->stack[spi].spilled_ptr.id = 0; 757 state->stack[spi - 1].spilled_ptr.id = 0; 758 } 759 760 state->stack[spi].spilled_ptr.dynptr.first_slot = false; 761 state->stack[spi].spilled_ptr.dynptr.type = 0; 762 state->stack[spi - 1].spilled_ptr.dynptr.type = 0; 763 764 return 0; 765 } 766 767 static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 768 { 769 struct bpf_func_state *state = func(env, reg); 770 int spi = get_spi(reg->off); 771 int i; 772 773 if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) 774 return true; 775 776 for (i = 0; i < BPF_REG_SIZE; i++) { 777 if (state->stack[spi].slot_type[i] == STACK_DYNPTR || 778 state->stack[spi - 1].slot_type[i] == STACK_DYNPTR) 779 return false; 780 } 781 782 return true; 783 } 784 785 bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, 786 struct bpf_reg_state *reg) 787 { 788 struct bpf_func_state *state = func(env, reg); 789 int spi = get_spi(reg->off); 790 int i; 791 792 if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) || 793 !state->stack[spi].spilled_ptr.dynptr.first_slot) 794 return false; 795 796 for (i = 0; i < BPF_REG_SIZE; i++) { 797 if (state->stack[spi].slot_type[i] != STACK_DYNPTR || 798 state->stack[spi - 1].slot_type[i] != STACK_DYNPTR) 799 return false; 800 } 801 802 return true; 803 } 804 805 bool is_dynptr_type_expected(struct bpf_verifier_env *env, 806 struct bpf_reg_state *reg, 807 enum bpf_arg_type arg_type) 808 { 809 struct bpf_func_state *state = func(env, reg); 810 enum bpf_dynptr_type dynptr_type; 811 int spi = get_spi(reg->off); 812 813 /* ARG_PTR_TO_DYNPTR takes any type of dynptr */ 814 if (arg_type == ARG_PTR_TO_DYNPTR) 815 return true; 816 817 dynptr_type = arg_to_dynptr_type(arg_type); 818 819 return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type; 820 } 821 822 /* The reg state of a pointer or a bounded scalar was saved when 823 * it was spilled to the stack. 824 */ 825 static bool is_spilled_reg(const struct bpf_stack_state *stack) 826 { 827 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; 828 } 829 830 static void scrub_spilled_slot(u8 *stype) 831 { 832 if (*stype != STACK_INVALID) 833 *stype = STACK_MISC; 834 } 835 836 static void print_verifier_state(struct bpf_verifier_env *env, 837 const struct bpf_func_state *state, 838 bool print_all) 839 { 840 const struct bpf_reg_state *reg; 841 enum bpf_reg_type t; 842 int i; 843 844 if (state->frameno) 845 verbose(env, " frame%d:", state->frameno); 846 for (i = 0; i < MAX_BPF_REG; i++) { 847 reg = &state->regs[i]; 848 t = reg->type; 849 if (t == NOT_INIT) 850 continue; 851 if (!print_all && !reg_scratched(env, i)) 852 continue; 853 verbose(env, " R%d", i); 854 print_liveness(env, reg->live); 855 verbose(env, "="); 856 if (t == SCALAR_VALUE && reg->precise) 857 verbose(env, "P"); 858 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && 859 tnum_is_const(reg->var_off)) { 860 /* reg->off should be 0 for SCALAR_VALUE */ 861 verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t)); 862 verbose(env, "%lld", reg->var_off.value + reg->off); 863 } else { 864 const char *sep = ""; 865 866 verbose(env, "%s", reg_type_str(env, t)); 867 if (base_type(t) == PTR_TO_BTF_ID) 868 verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id)); 869 verbose(env, "("); 870 /* 871 * _a stands for append, was shortened to avoid multiline statements below. 872 * This macro is used to output a comma separated list of attributes. 873 */ 874 #define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; }) 875 876 if (reg->id) 877 verbose_a("id=%d", reg->id); 878 if (reg_type_may_be_refcounted_or_null(t) && reg->ref_obj_id) 879 verbose_a("ref_obj_id=%d", reg->ref_obj_id); 880 if (t != SCALAR_VALUE) 881 verbose_a("off=%d", reg->off); 882 if (type_is_pkt_pointer(t)) 883 verbose_a("r=%d", reg->range); 884 else if (base_type(t) == CONST_PTR_TO_MAP || 885 base_type(t) == PTR_TO_MAP_KEY || 886 base_type(t) == PTR_TO_MAP_VALUE) 887 verbose_a("ks=%d,vs=%d", 888 reg->map_ptr->key_size, 889 reg->map_ptr->value_size); 890 if (tnum_is_const(reg->var_off)) { 891 /* Typically an immediate SCALAR_VALUE, but 892 * could be a pointer whose offset is too big 893 * for reg->off 894 */ 895 verbose_a("imm=%llx", reg->var_off.value); 896 } else { 897 if (reg->smin_value != reg->umin_value && 898 reg->smin_value != S64_MIN) 899 verbose_a("smin=%lld", (long long)reg->smin_value); 900 if (reg->smax_value != reg->umax_value && 901 reg->smax_value != S64_MAX) 902 verbose_a("smax=%lld", (long long)reg->smax_value); 903 if (reg->umin_value != 0) 904 verbose_a("umin=%llu", (unsigned long long)reg->umin_value); 905 if (reg->umax_value != U64_MAX) 906 verbose_a("umax=%llu", (unsigned long long)reg->umax_value); 907 if (!tnum_is_unknown(reg->var_off)) { 908 char tn_buf[48]; 909 910 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 911 verbose_a("var_off=%s", tn_buf); 912 } 913 if (reg->s32_min_value != reg->smin_value && 914 reg->s32_min_value != S32_MIN) 915 verbose_a("s32_min=%d", (int)(reg->s32_min_value)); 916 if (reg->s32_max_value != reg->smax_value && 917 reg->s32_max_value != S32_MAX) 918 verbose_a("s32_max=%d", (int)(reg->s32_max_value)); 919 if (reg->u32_min_value != reg->umin_value && 920 reg->u32_min_value != U32_MIN) 921 verbose_a("u32_min=%d", (int)(reg->u32_min_value)); 922 if (reg->u32_max_value != reg->umax_value && 923 reg->u32_max_value != U32_MAX) 924 verbose_a("u32_max=%d", (int)(reg->u32_max_value)); 925 } 926 #undef verbose_a 927 928 verbose(env, ")"); 929 } 930 } 931 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 932 char types_buf[BPF_REG_SIZE + 1]; 933 bool valid = false; 934 int j; 935 936 for (j = 0; j < BPF_REG_SIZE; j++) { 937 if (state->stack[i].slot_type[j] != STACK_INVALID) 938 valid = true; 939 types_buf[j] = slot_type_char[ 940 state->stack[i].slot_type[j]]; 941 } 942 types_buf[BPF_REG_SIZE] = 0; 943 if (!valid) 944 continue; 945 if (!print_all && !stack_slot_scratched(env, i)) 946 continue; 947 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); 948 print_liveness(env, state->stack[i].spilled_ptr.live); 949 if (is_spilled_reg(&state->stack[i])) { 950 reg = &state->stack[i].spilled_ptr; 951 t = reg->type; 952 verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t)); 953 if (t == SCALAR_VALUE && reg->precise) 954 verbose(env, "P"); 955 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) 956 verbose(env, "%lld", reg->var_off.value + reg->off); 957 } else { 958 verbose(env, "=%s", types_buf); 959 } 960 } 961 if (state->acquired_refs && state->refs[0].id) { 962 verbose(env, " refs=%d", state->refs[0].id); 963 for (i = 1; i < state->acquired_refs; i++) 964 if (state->refs[i].id) 965 verbose(env, ",%d", state->refs[i].id); 966 } 967 if (state->in_callback_fn) 968 verbose(env, " cb"); 969 if (state->in_async_callback_fn) 970 verbose(env, " async_cb"); 971 verbose(env, "\n"); 972 mark_verifier_state_clean(env); 973 } 974 975 static inline u32 vlog_alignment(u32 pos) 976 { 977 return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT), 978 BPF_LOG_MIN_ALIGNMENT) - pos - 1; 979 } 980 981 static void print_insn_state(struct bpf_verifier_env *env, 982 const struct bpf_func_state *state) 983 { 984 if (env->prev_log_len && env->prev_log_len == env->log.len_used) { 985 /* remove new line character */ 986 bpf_vlog_reset(&env->log, env->prev_log_len - 1); 987 verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_len), ' '); 988 } else { 989 verbose(env, "%d:", env->insn_idx); 990 } 991 print_verifier_state(env, state, false); 992 } 993 994 /* copy array src of length n * size bytes to dst. dst is reallocated if it's too 995 * small to hold src. This is different from krealloc since we don't want to preserve 996 * the contents of dst. 997 * 998 * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could 999 * not be allocated. 1000 */ 1001 static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags) 1002 { 1003 size_t bytes; 1004 1005 if (ZERO_OR_NULL_PTR(src)) 1006 goto out; 1007 1008 if (unlikely(check_mul_overflow(n, size, &bytes))) 1009 return NULL; 1010 1011 if (ksize(dst) < bytes) { 1012 kfree(dst); 1013 dst = kmalloc_track_caller(bytes, flags); 1014 if (!dst) 1015 return NULL; 1016 } 1017 1018 memcpy(dst, src, bytes); 1019 out: 1020 return dst ? dst : ZERO_SIZE_PTR; 1021 } 1022 1023 /* resize an array from old_n items to new_n items. the array is reallocated if it's too 1024 * small to hold new_n items. new items are zeroed out if the array grows. 1025 * 1026 * Contrary to krealloc_array, does not free arr if new_n is zero. 1027 */ 1028 static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size) 1029 { 1030 void *new_arr; 1031 1032 if (!new_n || old_n == new_n) 1033 goto out; 1034 1035 new_arr = krealloc_array(arr, new_n, size, GFP_KERNEL); 1036 if (!new_arr) { 1037 kfree(arr); 1038 return NULL; 1039 } 1040 arr = new_arr; 1041 1042 if (new_n > old_n) 1043 memset(arr + old_n * size, 0, (new_n - old_n) * size); 1044 1045 out: 1046 return arr ? arr : ZERO_SIZE_PTR; 1047 } 1048 1049 static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src) 1050 { 1051 dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs, 1052 sizeof(struct bpf_reference_state), GFP_KERNEL); 1053 if (!dst->refs) 1054 return -ENOMEM; 1055 1056 dst->acquired_refs = src->acquired_refs; 1057 return 0; 1058 } 1059 1060 static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src) 1061 { 1062 size_t n = src->allocated_stack / BPF_REG_SIZE; 1063 1064 dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state), 1065 GFP_KERNEL); 1066 if (!dst->stack) 1067 return -ENOMEM; 1068 1069 dst->allocated_stack = src->allocated_stack; 1070 return 0; 1071 } 1072 1073 static int resize_reference_state(struct bpf_func_state *state, size_t n) 1074 { 1075 state->refs = realloc_array(state->refs, state->acquired_refs, n, 1076 sizeof(struct bpf_reference_state)); 1077 if (!state->refs) 1078 return -ENOMEM; 1079 1080 state->acquired_refs = n; 1081 return 0; 1082 } 1083 1084 static int grow_stack_state(struct bpf_func_state *state, int size) 1085 { 1086 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE; 1087 1088 if (old_n >= n) 1089 return 0; 1090 1091 state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state)); 1092 if (!state->stack) 1093 return -ENOMEM; 1094 1095 state->allocated_stack = size; 1096 return 0; 1097 } 1098 1099 /* Acquire a pointer id from the env and update the state->refs to include 1100 * this new pointer reference. 1101 * On success, returns a valid pointer id to associate with the register 1102 * On failure, returns a negative errno. 1103 */ 1104 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) 1105 { 1106 struct bpf_func_state *state = cur_func(env); 1107 int new_ofs = state->acquired_refs; 1108 int id, err; 1109 1110 err = resize_reference_state(state, state->acquired_refs + 1); 1111 if (err) 1112 return err; 1113 id = ++env->id_gen; 1114 state->refs[new_ofs].id = id; 1115 state->refs[new_ofs].insn_idx = insn_idx; 1116 state->refs[new_ofs].callback_ref = state->in_callback_fn ? state->frameno : 0; 1117 1118 return id; 1119 } 1120 1121 /* release function corresponding to acquire_reference_state(). Idempotent. */ 1122 static int release_reference_state(struct bpf_func_state *state, int ptr_id) 1123 { 1124 int i, last_idx; 1125 1126 last_idx = state->acquired_refs - 1; 1127 for (i = 0; i < state->acquired_refs; i++) { 1128 if (state->refs[i].id == ptr_id) { 1129 /* Cannot release caller references in callbacks */ 1130 if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno) 1131 return -EINVAL; 1132 if (last_idx && i != last_idx) 1133 memcpy(&state->refs[i], &state->refs[last_idx], 1134 sizeof(*state->refs)); 1135 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); 1136 state->acquired_refs--; 1137 return 0; 1138 } 1139 } 1140 return -EINVAL; 1141 } 1142 1143 static void free_func_state(struct bpf_func_state *state) 1144 { 1145 if (!state) 1146 return; 1147 kfree(state->refs); 1148 kfree(state->stack); 1149 kfree(state); 1150 } 1151 1152 static void clear_jmp_history(struct bpf_verifier_state *state) 1153 { 1154 kfree(state->jmp_history); 1155 state->jmp_history = NULL; 1156 state->jmp_history_cnt = 0; 1157 } 1158 1159 static void free_verifier_state(struct bpf_verifier_state *state, 1160 bool free_self) 1161 { 1162 int i; 1163 1164 for (i = 0; i <= state->curframe; i++) { 1165 free_func_state(state->frame[i]); 1166 state->frame[i] = NULL; 1167 } 1168 clear_jmp_history(state); 1169 if (free_self) 1170 kfree(state); 1171 } 1172 1173 /* copy verifier state from src to dst growing dst stack space 1174 * when necessary to accommodate larger src stack 1175 */ 1176 static int copy_func_state(struct bpf_func_state *dst, 1177 const struct bpf_func_state *src) 1178 { 1179 int err; 1180 1181 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); 1182 err = copy_reference_state(dst, src); 1183 if (err) 1184 return err; 1185 return copy_stack_state(dst, src); 1186 } 1187 1188 static int copy_verifier_state(struct bpf_verifier_state *dst_state, 1189 const struct bpf_verifier_state *src) 1190 { 1191 struct bpf_func_state *dst; 1192 int i, err; 1193 1194 dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history, 1195 src->jmp_history_cnt, sizeof(struct bpf_idx_pair), 1196 GFP_USER); 1197 if (!dst_state->jmp_history) 1198 return -ENOMEM; 1199 dst_state->jmp_history_cnt = src->jmp_history_cnt; 1200 1201 /* if dst has more stack frames then src frame, free them */ 1202 for (i = src->curframe + 1; i <= dst_state->curframe; i++) { 1203 free_func_state(dst_state->frame[i]); 1204 dst_state->frame[i] = NULL; 1205 } 1206 dst_state->speculative = src->speculative; 1207 dst_state->curframe = src->curframe; 1208 dst_state->active_spin_lock = src->active_spin_lock; 1209 dst_state->branches = src->branches; 1210 dst_state->parent = src->parent; 1211 dst_state->first_insn_idx = src->first_insn_idx; 1212 dst_state->last_insn_idx = src->last_insn_idx; 1213 for (i = 0; i <= src->curframe; i++) { 1214 dst = dst_state->frame[i]; 1215 if (!dst) { 1216 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 1217 if (!dst) 1218 return -ENOMEM; 1219 dst_state->frame[i] = dst; 1220 } 1221 err = copy_func_state(dst, src->frame[i]); 1222 if (err) 1223 return err; 1224 } 1225 return 0; 1226 } 1227 1228 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) 1229 { 1230 while (st) { 1231 u32 br = --st->branches; 1232 1233 /* WARN_ON(br > 1) technically makes sense here, 1234 * but see comment in push_stack(), hence: 1235 */ 1236 WARN_ONCE((int)br < 0, 1237 "BUG update_branch_counts:branches_to_explore=%d\n", 1238 br); 1239 if (br) 1240 break; 1241 st = st->parent; 1242 } 1243 } 1244 1245 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, 1246 int *insn_idx, bool pop_log) 1247 { 1248 struct bpf_verifier_state *cur = env->cur_state; 1249 struct bpf_verifier_stack_elem *elem, *head = env->head; 1250 int err; 1251 1252 if (env->head == NULL) 1253 return -ENOENT; 1254 1255 if (cur) { 1256 err = copy_verifier_state(cur, &head->st); 1257 if (err) 1258 return err; 1259 } 1260 if (pop_log) 1261 bpf_vlog_reset(&env->log, head->log_pos); 1262 if (insn_idx) 1263 *insn_idx = head->insn_idx; 1264 if (prev_insn_idx) 1265 *prev_insn_idx = head->prev_insn_idx; 1266 elem = head->next; 1267 free_verifier_state(&head->st, false); 1268 kfree(head); 1269 env->head = elem; 1270 env->stack_size--; 1271 return 0; 1272 } 1273 1274 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, 1275 int insn_idx, int prev_insn_idx, 1276 bool speculative) 1277 { 1278 struct bpf_verifier_state *cur = env->cur_state; 1279 struct bpf_verifier_stack_elem *elem; 1280 int err; 1281 1282 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 1283 if (!elem) 1284 goto err; 1285 1286 elem->insn_idx = insn_idx; 1287 elem->prev_insn_idx = prev_insn_idx; 1288 elem->next = env->head; 1289 elem->log_pos = env->log.len_used; 1290 env->head = elem; 1291 env->stack_size++; 1292 err = copy_verifier_state(&elem->st, cur); 1293 if (err) 1294 goto err; 1295 elem->st.speculative |= speculative; 1296 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { 1297 verbose(env, "The sequence of %d jumps is too complex.\n", 1298 env->stack_size); 1299 goto err; 1300 } 1301 if (elem->st.parent) { 1302 ++elem->st.parent->branches; 1303 /* WARN_ON(branches > 2) technically makes sense here, 1304 * but 1305 * 1. speculative states will bump 'branches' for non-branch 1306 * instructions 1307 * 2. is_state_visited() heuristics may decide not to create 1308 * a new state for a sequence of branches and all such current 1309 * and cloned states will be pointing to a single parent state 1310 * which might have large 'branches' count. 1311 */ 1312 } 1313 return &elem->st; 1314 err: 1315 free_verifier_state(env->cur_state, true); 1316 env->cur_state = NULL; 1317 /* pop all elements and return */ 1318 while (!pop_stack(env, NULL, NULL, false)); 1319 return NULL; 1320 } 1321 1322 #define CALLER_SAVED_REGS 6 1323 static const int caller_saved[CALLER_SAVED_REGS] = { 1324 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 1325 }; 1326 1327 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 1328 struct bpf_reg_state *reg); 1329 1330 /* This helper doesn't clear reg->id */ 1331 static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm) 1332 { 1333 reg->var_off = tnum_const(imm); 1334 reg->smin_value = (s64)imm; 1335 reg->smax_value = (s64)imm; 1336 reg->umin_value = imm; 1337 reg->umax_value = imm; 1338 1339 reg->s32_min_value = (s32)imm; 1340 reg->s32_max_value = (s32)imm; 1341 reg->u32_min_value = (u32)imm; 1342 reg->u32_max_value = (u32)imm; 1343 } 1344 1345 /* Mark the unknown part of a register (variable offset or scalar value) as 1346 * known to have the value @imm. 1347 */ 1348 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) 1349 { 1350 /* Clear id, off, and union(map_ptr, range) */ 1351 memset(((u8 *)reg) + sizeof(reg->type), 0, 1352 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); 1353 ___mark_reg_known(reg, imm); 1354 } 1355 1356 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm) 1357 { 1358 reg->var_off = tnum_const_subreg(reg->var_off, imm); 1359 reg->s32_min_value = (s32)imm; 1360 reg->s32_max_value = (s32)imm; 1361 reg->u32_min_value = (u32)imm; 1362 reg->u32_max_value = (u32)imm; 1363 } 1364 1365 /* Mark the 'variable offset' part of a register as zero. This should be 1366 * used only on registers holding a pointer type. 1367 */ 1368 static void __mark_reg_known_zero(struct bpf_reg_state *reg) 1369 { 1370 __mark_reg_known(reg, 0); 1371 } 1372 1373 static void __mark_reg_const_zero(struct bpf_reg_state *reg) 1374 { 1375 __mark_reg_known(reg, 0); 1376 reg->type = SCALAR_VALUE; 1377 } 1378 1379 static void mark_reg_known_zero(struct bpf_verifier_env *env, 1380 struct bpf_reg_state *regs, u32 regno) 1381 { 1382 if (WARN_ON(regno >= MAX_BPF_REG)) { 1383 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); 1384 /* Something bad happened, let's kill all regs */ 1385 for (regno = 0; regno < MAX_BPF_REG; regno++) 1386 __mark_reg_not_init(env, regs + regno); 1387 return; 1388 } 1389 __mark_reg_known_zero(regs + regno); 1390 } 1391 1392 static void mark_ptr_not_null_reg(struct bpf_reg_state *reg) 1393 { 1394 if (base_type(reg->type) == PTR_TO_MAP_VALUE) { 1395 const struct bpf_map *map = reg->map_ptr; 1396 1397 if (map->inner_map_meta) { 1398 reg->type = CONST_PTR_TO_MAP; 1399 reg->map_ptr = map->inner_map_meta; 1400 /* transfer reg's id which is unique for every map_lookup_elem 1401 * as UID of the inner map. 1402 */ 1403 if (map_value_has_timer(map->inner_map_meta)) 1404 reg->map_uid = reg->id; 1405 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { 1406 reg->type = PTR_TO_XDP_SOCK; 1407 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || 1408 map->map_type == BPF_MAP_TYPE_SOCKHASH) { 1409 reg->type = PTR_TO_SOCKET; 1410 } else { 1411 reg->type = PTR_TO_MAP_VALUE; 1412 } 1413 return; 1414 } 1415 1416 reg->type &= ~PTR_MAYBE_NULL; 1417 } 1418 1419 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) 1420 { 1421 return type_is_pkt_pointer(reg->type); 1422 } 1423 1424 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) 1425 { 1426 return reg_is_pkt_pointer(reg) || 1427 reg->type == PTR_TO_PACKET_END; 1428 } 1429 1430 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ 1431 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, 1432 enum bpf_reg_type which) 1433 { 1434 /* The register can already have a range from prior markings. 1435 * This is fine as long as it hasn't been advanced from its 1436 * origin. 1437 */ 1438 return reg->type == which && 1439 reg->id == 0 && 1440 reg->off == 0 && 1441 tnum_equals_const(reg->var_off, 0); 1442 } 1443 1444 /* Reset the min/max bounds of a register */ 1445 static void __mark_reg_unbounded(struct bpf_reg_state *reg) 1446 { 1447 reg->smin_value = S64_MIN; 1448 reg->smax_value = S64_MAX; 1449 reg->umin_value = 0; 1450 reg->umax_value = U64_MAX; 1451 1452 reg->s32_min_value = S32_MIN; 1453 reg->s32_max_value = S32_MAX; 1454 reg->u32_min_value = 0; 1455 reg->u32_max_value = U32_MAX; 1456 } 1457 1458 static void __mark_reg64_unbounded(struct bpf_reg_state *reg) 1459 { 1460 reg->smin_value = S64_MIN; 1461 reg->smax_value = S64_MAX; 1462 reg->umin_value = 0; 1463 reg->umax_value = U64_MAX; 1464 } 1465 1466 static void __mark_reg32_unbounded(struct bpf_reg_state *reg) 1467 { 1468 reg->s32_min_value = S32_MIN; 1469 reg->s32_max_value = S32_MAX; 1470 reg->u32_min_value = 0; 1471 reg->u32_max_value = U32_MAX; 1472 } 1473 1474 static void __update_reg32_bounds(struct bpf_reg_state *reg) 1475 { 1476 struct tnum var32_off = tnum_subreg(reg->var_off); 1477 1478 /* min signed is max(sign bit) | min(other bits) */ 1479 reg->s32_min_value = max_t(s32, reg->s32_min_value, 1480 var32_off.value | (var32_off.mask & S32_MIN)); 1481 /* max signed is min(sign bit) | max(other bits) */ 1482 reg->s32_max_value = min_t(s32, reg->s32_max_value, 1483 var32_off.value | (var32_off.mask & S32_MAX)); 1484 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value); 1485 reg->u32_max_value = min(reg->u32_max_value, 1486 (u32)(var32_off.value | var32_off.mask)); 1487 } 1488 1489 static void __update_reg64_bounds(struct bpf_reg_state *reg) 1490 { 1491 /* min signed is max(sign bit) | min(other bits) */ 1492 reg->smin_value = max_t(s64, reg->smin_value, 1493 reg->var_off.value | (reg->var_off.mask & S64_MIN)); 1494 /* max signed is min(sign bit) | max(other bits) */ 1495 reg->smax_value = min_t(s64, reg->smax_value, 1496 reg->var_off.value | (reg->var_off.mask & S64_MAX)); 1497 reg->umin_value = max(reg->umin_value, reg->var_off.value); 1498 reg->umax_value = min(reg->umax_value, 1499 reg->var_off.value | reg->var_off.mask); 1500 } 1501 1502 static void __update_reg_bounds(struct bpf_reg_state *reg) 1503 { 1504 __update_reg32_bounds(reg); 1505 __update_reg64_bounds(reg); 1506 } 1507 1508 /* Uses signed min/max values to inform unsigned, and vice-versa */ 1509 static void __reg32_deduce_bounds(struct bpf_reg_state *reg) 1510 { 1511 /* Learn sign from signed bounds. 1512 * If we cannot cross the sign boundary, then signed and unsigned bounds 1513 * are the same, so combine. This works even in the negative case, e.g. 1514 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 1515 */ 1516 if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) { 1517 reg->s32_min_value = reg->u32_min_value = 1518 max_t(u32, reg->s32_min_value, reg->u32_min_value); 1519 reg->s32_max_value = reg->u32_max_value = 1520 min_t(u32, reg->s32_max_value, reg->u32_max_value); 1521 return; 1522 } 1523 /* Learn sign from unsigned bounds. Signed bounds cross the sign 1524 * boundary, so we must be careful. 1525 */ 1526 if ((s32)reg->u32_max_value >= 0) { 1527 /* Positive. We can't learn anything from the smin, but smax 1528 * is positive, hence safe. 1529 */ 1530 reg->s32_min_value = reg->u32_min_value; 1531 reg->s32_max_value = reg->u32_max_value = 1532 min_t(u32, reg->s32_max_value, reg->u32_max_value); 1533 } else if ((s32)reg->u32_min_value < 0) { 1534 /* Negative. We can't learn anything from the smax, but smin 1535 * is negative, hence safe. 1536 */ 1537 reg->s32_min_value = reg->u32_min_value = 1538 max_t(u32, reg->s32_min_value, reg->u32_min_value); 1539 reg->s32_max_value = reg->u32_max_value; 1540 } 1541 } 1542 1543 static void __reg64_deduce_bounds(struct bpf_reg_state *reg) 1544 { 1545 /* Learn sign from signed bounds. 1546 * If we cannot cross the sign boundary, then signed and unsigned bounds 1547 * are the same, so combine. This works even in the negative case, e.g. 1548 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 1549 */ 1550 if (reg->smin_value >= 0 || reg->smax_value < 0) { 1551 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 1552 reg->umin_value); 1553 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 1554 reg->umax_value); 1555 return; 1556 } 1557 /* Learn sign from unsigned bounds. Signed bounds cross the sign 1558 * boundary, so we must be careful. 1559 */ 1560 if ((s64)reg->umax_value >= 0) { 1561 /* Positive. We can't learn anything from the smin, but smax 1562 * is positive, hence safe. 1563 */ 1564 reg->smin_value = reg->umin_value; 1565 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 1566 reg->umax_value); 1567 } else if ((s64)reg->umin_value < 0) { 1568 /* Negative. We can't learn anything from the smax, but smin 1569 * is negative, hence safe. 1570 */ 1571 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 1572 reg->umin_value); 1573 reg->smax_value = reg->umax_value; 1574 } 1575 } 1576 1577 static void __reg_deduce_bounds(struct bpf_reg_state *reg) 1578 { 1579 __reg32_deduce_bounds(reg); 1580 __reg64_deduce_bounds(reg); 1581 } 1582 1583 /* Attempts to improve var_off based on unsigned min/max information */ 1584 static void __reg_bound_offset(struct bpf_reg_state *reg) 1585 { 1586 struct tnum var64_off = tnum_intersect(reg->var_off, 1587 tnum_range(reg->umin_value, 1588 reg->umax_value)); 1589 struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off), 1590 tnum_range(reg->u32_min_value, 1591 reg->u32_max_value)); 1592 1593 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); 1594 } 1595 1596 static void reg_bounds_sync(struct bpf_reg_state *reg) 1597 { 1598 /* We might have learned new bounds from the var_off. */ 1599 __update_reg_bounds(reg); 1600 /* We might have learned something about the sign bit. */ 1601 __reg_deduce_bounds(reg); 1602 /* We might have learned some bits from the bounds. */ 1603 __reg_bound_offset(reg); 1604 /* Intersecting with the old var_off might have improved our bounds 1605 * slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 1606 * then new var_off is (0; 0x7f...fc) which improves our umax. 1607 */ 1608 __update_reg_bounds(reg); 1609 } 1610 1611 static bool __reg32_bound_s64(s32 a) 1612 { 1613 return a >= 0 && a <= S32_MAX; 1614 } 1615 1616 static void __reg_assign_32_into_64(struct bpf_reg_state *reg) 1617 { 1618 reg->umin_value = reg->u32_min_value; 1619 reg->umax_value = reg->u32_max_value; 1620 1621 /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must 1622 * be positive otherwise set to worse case bounds and refine later 1623 * from tnum. 1624 */ 1625 if (__reg32_bound_s64(reg->s32_min_value) && 1626 __reg32_bound_s64(reg->s32_max_value)) { 1627 reg->smin_value = reg->s32_min_value; 1628 reg->smax_value = reg->s32_max_value; 1629 } else { 1630 reg->smin_value = 0; 1631 reg->smax_value = U32_MAX; 1632 } 1633 } 1634 1635 static void __reg_combine_32_into_64(struct bpf_reg_state *reg) 1636 { 1637 /* special case when 64-bit register has upper 32-bit register 1638 * zeroed. Typically happens after zext or <<32, >>32 sequence 1639 * allowing us to use 32-bit bounds directly, 1640 */ 1641 if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) { 1642 __reg_assign_32_into_64(reg); 1643 } else { 1644 /* Otherwise the best we can do is push lower 32bit known and 1645 * unknown bits into register (var_off set from jmp logic) 1646 * then learn as much as possible from the 64-bit tnum 1647 * known and unknown bits. The previous smin/smax bounds are 1648 * invalid here because of jmp32 compare so mark them unknown 1649 * so they do not impact tnum bounds calculation. 1650 */ 1651 __mark_reg64_unbounded(reg); 1652 } 1653 reg_bounds_sync(reg); 1654 } 1655 1656 static bool __reg64_bound_s32(s64 a) 1657 { 1658 return a >= S32_MIN && a <= S32_MAX; 1659 } 1660 1661 static bool __reg64_bound_u32(u64 a) 1662 { 1663 return a >= U32_MIN && a <= U32_MAX; 1664 } 1665 1666 static void __reg_combine_64_into_32(struct bpf_reg_state *reg) 1667 { 1668 __mark_reg32_unbounded(reg); 1669 if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) { 1670 reg->s32_min_value = (s32)reg->smin_value; 1671 reg->s32_max_value = (s32)reg->smax_value; 1672 } 1673 if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) { 1674 reg->u32_min_value = (u32)reg->umin_value; 1675 reg->u32_max_value = (u32)reg->umax_value; 1676 } 1677 reg_bounds_sync(reg); 1678 } 1679 1680 /* Mark a register as having a completely unknown (scalar) value. */ 1681 static void __mark_reg_unknown(const struct bpf_verifier_env *env, 1682 struct bpf_reg_state *reg) 1683 { 1684 /* 1685 * Clear type, id, off, and union(map_ptr, range) and 1686 * padding between 'type' and union 1687 */ 1688 memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); 1689 reg->type = SCALAR_VALUE; 1690 reg->var_off = tnum_unknown; 1691 reg->frameno = 0; 1692 reg->precise = env->subprog_cnt > 1 || !env->bpf_capable; 1693 __mark_reg_unbounded(reg); 1694 } 1695 1696 static void mark_reg_unknown(struct bpf_verifier_env *env, 1697 struct bpf_reg_state *regs, u32 regno) 1698 { 1699 if (WARN_ON(regno >= MAX_BPF_REG)) { 1700 verbose(env, "mark_reg_unknown(regs, %u)\n", regno); 1701 /* Something bad happened, let's kill all regs except FP */ 1702 for (regno = 0; regno < BPF_REG_FP; regno++) 1703 __mark_reg_not_init(env, regs + regno); 1704 return; 1705 } 1706 __mark_reg_unknown(env, regs + regno); 1707 } 1708 1709 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 1710 struct bpf_reg_state *reg) 1711 { 1712 __mark_reg_unknown(env, reg); 1713 reg->type = NOT_INIT; 1714 } 1715 1716 static void mark_reg_not_init(struct bpf_verifier_env *env, 1717 struct bpf_reg_state *regs, u32 regno) 1718 { 1719 if (WARN_ON(regno >= MAX_BPF_REG)) { 1720 verbose(env, "mark_reg_not_init(regs, %u)\n", regno); 1721 /* Something bad happened, let's kill all regs except FP */ 1722 for (regno = 0; regno < BPF_REG_FP; regno++) 1723 __mark_reg_not_init(env, regs + regno); 1724 return; 1725 } 1726 __mark_reg_not_init(env, regs + regno); 1727 } 1728 1729 static void mark_btf_ld_reg(struct bpf_verifier_env *env, 1730 struct bpf_reg_state *regs, u32 regno, 1731 enum bpf_reg_type reg_type, 1732 struct btf *btf, u32 btf_id, 1733 enum bpf_type_flag flag) 1734 { 1735 if (reg_type == SCALAR_VALUE) { 1736 mark_reg_unknown(env, regs, regno); 1737 return; 1738 } 1739 mark_reg_known_zero(env, regs, regno); 1740 regs[regno].type = PTR_TO_BTF_ID | flag; 1741 regs[regno].btf = btf; 1742 regs[regno].btf_id = btf_id; 1743 } 1744 1745 #define DEF_NOT_SUBREG (0) 1746 static void init_reg_state(struct bpf_verifier_env *env, 1747 struct bpf_func_state *state) 1748 { 1749 struct bpf_reg_state *regs = state->regs; 1750 int i; 1751 1752 for (i = 0; i < MAX_BPF_REG; i++) { 1753 mark_reg_not_init(env, regs, i); 1754 regs[i].live = REG_LIVE_NONE; 1755 regs[i].parent = NULL; 1756 regs[i].subreg_def = DEF_NOT_SUBREG; 1757 } 1758 1759 /* frame pointer */ 1760 regs[BPF_REG_FP].type = PTR_TO_STACK; 1761 mark_reg_known_zero(env, regs, BPF_REG_FP); 1762 regs[BPF_REG_FP].frameno = state->frameno; 1763 } 1764 1765 #define BPF_MAIN_FUNC (-1) 1766 static void init_func_state(struct bpf_verifier_env *env, 1767 struct bpf_func_state *state, 1768 int callsite, int frameno, int subprogno) 1769 { 1770 state->callsite = callsite; 1771 state->frameno = frameno; 1772 state->subprogno = subprogno; 1773 state->callback_ret_range = tnum_range(0, 0); 1774 init_reg_state(env, state); 1775 mark_verifier_state_scratched(env); 1776 } 1777 1778 /* Similar to push_stack(), but for async callbacks */ 1779 static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, 1780 int insn_idx, int prev_insn_idx, 1781 int subprog) 1782 { 1783 struct bpf_verifier_stack_elem *elem; 1784 struct bpf_func_state *frame; 1785 1786 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 1787 if (!elem) 1788 goto err; 1789 1790 elem->insn_idx = insn_idx; 1791 elem->prev_insn_idx = prev_insn_idx; 1792 elem->next = env->head; 1793 elem->log_pos = env->log.len_used; 1794 env->head = elem; 1795 env->stack_size++; 1796 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { 1797 verbose(env, 1798 "The sequence of %d jumps is too complex for async cb.\n", 1799 env->stack_size); 1800 goto err; 1801 } 1802 /* Unlike push_stack() do not copy_verifier_state(). 1803 * The caller state doesn't matter. 1804 * This is async callback. It starts in a fresh stack. 1805 * Initialize it similar to do_check_common(). 1806 */ 1807 elem->st.branches = 1; 1808 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 1809 if (!frame) 1810 goto err; 1811 init_func_state(env, frame, 1812 BPF_MAIN_FUNC /* callsite */, 1813 0 /* frameno within this callchain */, 1814 subprog /* subprog number within this prog */); 1815 elem->st.frame[0] = frame; 1816 return &elem->st; 1817 err: 1818 free_verifier_state(env->cur_state, true); 1819 env->cur_state = NULL; 1820 /* pop all elements and return */ 1821 while (!pop_stack(env, NULL, NULL, false)); 1822 return NULL; 1823 } 1824 1825 1826 enum reg_arg_type { 1827 SRC_OP, /* register is used as source operand */ 1828 DST_OP, /* register is used as destination operand */ 1829 DST_OP_NO_MARK /* same as above, check only, don't mark */ 1830 }; 1831 1832 static int cmp_subprogs(const void *a, const void *b) 1833 { 1834 return ((struct bpf_subprog_info *)a)->start - 1835 ((struct bpf_subprog_info *)b)->start; 1836 } 1837 1838 static int find_subprog(struct bpf_verifier_env *env, int off) 1839 { 1840 struct bpf_subprog_info *p; 1841 1842 p = bsearch(&off, env->subprog_info, env->subprog_cnt, 1843 sizeof(env->subprog_info[0]), cmp_subprogs); 1844 if (!p) 1845 return -ENOENT; 1846 return p - env->subprog_info; 1847 1848 } 1849 1850 static int add_subprog(struct bpf_verifier_env *env, int off) 1851 { 1852 int insn_cnt = env->prog->len; 1853 int ret; 1854 1855 if (off >= insn_cnt || off < 0) { 1856 verbose(env, "call to invalid destination\n"); 1857 return -EINVAL; 1858 } 1859 ret = find_subprog(env, off); 1860 if (ret >= 0) 1861 return ret; 1862 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { 1863 verbose(env, "too many subprograms\n"); 1864 return -E2BIG; 1865 } 1866 /* determine subprog starts. The end is one before the next starts */ 1867 env->subprog_info[env->subprog_cnt++].start = off; 1868 sort(env->subprog_info, env->subprog_cnt, 1869 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); 1870 return env->subprog_cnt - 1; 1871 } 1872 1873 #define MAX_KFUNC_DESCS 256 1874 #define MAX_KFUNC_BTFS 256 1875 1876 struct bpf_kfunc_desc { 1877 struct btf_func_model func_model; 1878 u32 func_id; 1879 s32 imm; 1880 u16 offset; 1881 }; 1882 1883 struct bpf_kfunc_btf { 1884 struct btf *btf; 1885 struct module *module; 1886 u16 offset; 1887 }; 1888 1889 struct bpf_kfunc_desc_tab { 1890 struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS]; 1891 u32 nr_descs; 1892 }; 1893 1894 struct bpf_kfunc_btf_tab { 1895 struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS]; 1896 u32 nr_descs; 1897 }; 1898 1899 static int kfunc_desc_cmp_by_id_off(const void *a, const void *b) 1900 { 1901 const struct bpf_kfunc_desc *d0 = a; 1902 const struct bpf_kfunc_desc *d1 = b; 1903 1904 /* func_id is not greater than BTF_MAX_TYPE */ 1905 return d0->func_id - d1->func_id ?: d0->offset - d1->offset; 1906 } 1907 1908 static int kfunc_btf_cmp_by_off(const void *a, const void *b) 1909 { 1910 const struct bpf_kfunc_btf *d0 = a; 1911 const struct bpf_kfunc_btf *d1 = b; 1912 1913 return d0->offset - d1->offset; 1914 } 1915 1916 static const struct bpf_kfunc_desc * 1917 find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset) 1918 { 1919 struct bpf_kfunc_desc desc = { 1920 .func_id = func_id, 1921 .offset = offset, 1922 }; 1923 struct bpf_kfunc_desc_tab *tab; 1924 1925 tab = prog->aux->kfunc_tab; 1926 return bsearch(&desc, tab->descs, tab->nr_descs, 1927 sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off); 1928 } 1929 1930 static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env, 1931 s16 offset) 1932 { 1933 struct bpf_kfunc_btf kf_btf = { .offset = offset }; 1934 struct bpf_kfunc_btf_tab *tab; 1935 struct bpf_kfunc_btf *b; 1936 struct module *mod; 1937 struct btf *btf; 1938 int btf_fd; 1939 1940 tab = env->prog->aux->kfunc_btf_tab; 1941 b = bsearch(&kf_btf, tab->descs, tab->nr_descs, 1942 sizeof(tab->descs[0]), kfunc_btf_cmp_by_off); 1943 if (!b) { 1944 if (tab->nr_descs == MAX_KFUNC_BTFS) { 1945 verbose(env, "too many different module BTFs\n"); 1946 return ERR_PTR(-E2BIG); 1947 } 1948 1949 if (bpfptr_is_null(env->fd_array)) { 1950 verbose(env, "kfunc offset > 0 without fd_array is invalid\n"); 1951 return ERR_PTR(-EPROTO); 1952 } 1953 1954 if (copy_from_bpfptr_offset(&btf_fd, env->fd_array, 1955 offset * sizeof(btf_fd), 1956 sizeof(btf_fd))) 1957 return ERR_PTR(-EFAULT); 1958 1959 btf = btf_get_by_fd(btf_fd); 1960 if (IS_ERR(btf)) { 1961 verbose(env, "invalid module BTF fd specified\n"); 1962 return btf; 1963 } 1964 1965 if (!btf_is_module(btf)) { 1966 verbose(env, "BTF fd for kfunc is not a module BTF\n"); 1967 btf_put(btf); 1968 return ERR_PTR(-EINVAL); 1969 } 1970 1971 mod = btf_try_get_module(btf); 1972 if (!mod) { 1973 btf_put(btf); 1974 return ERR_PTR(-ENXIO); 1975 } 1976 1977 b = &tab->descs[tab->nr_descs++]; 1978 b->btf = btf; 1979 b->module = mod; 1980 b->offset = offset; 1981 1982 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 1983 kfunc_btf_cmp_by_off, NULL); 1984 } 1985 return b->btf; 1986 } 1987 1988 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab) 1989 { 1990 if (!tab) 1991 return; 1992 1993 while (tab->nr_descs--) { 1994 module_put(tab->descs[tab->nr_descs].module); 1995 btf_put(tab->descs[tab->nr_descs].btf); 1996 } 1997 kfree(tab); 1998 } 1999 2000 static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset) 2001 { 2002 if (offset) { 2003 if (offset < 0) { 2004 /* In the future, this can be allowed to increase limit 2005 * of fd index into fd_array, interpreted as u16. 2006 */ 2007 verbose(env, "negative offset disallowed for kernel module function call\n"); 2008 return ERR_PTR(-EINVAL); 2009 } 2010 2011 return __find_kfunc_desc_btf(env, offset); 2012 } 2013 return btf_vmlinux ?: ERR_PTR(-ENOENT); 2014 } 2015 2016 static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) 2017 { 2018 const struct btf_type *func, *func_proto; 2019 struct bpf_kfunc_btf_tab *btf_tab; 2020 struct bpf_kfunc_desc_tab *tab; 2021 struct bpf_prog_aux *prog_aux; 2022 struct bpf_kfunc_desc *desc; 2023 const char *func_name; 2024 struct btf *desc_btf; 2025 unsigned long call_imm; 2026 unsigned long addr; 2027 int err; 2028 2029 prog_aux = env->prog->aux; 2030 tab = prog_aux->kfunc_tab; 2031 btf_tab = prog_aux->kfunc_btf_tab; 2032 if (!tab) { 2033 if (!btf_vmlinux) { 2034 verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n"); 2035 return -ENOTSUPP; 2036 } 2037 2038 if (!env->prog->jit_requested) { 2039 verbose(env, "JIT is required for calling kernel function\n"); 2040 return -ENOTSUPP; 2041 } 2042 2043 if (!bpf_jit_supports_kfunc_call()) { 2044 verbose(env, "JIT does not support calling kernel function\n"); 2045 return -ENOTSUPP; 2046 } 2047 2048 if (!env->prog->gpl_compatible) { 2049 verbose(env, "cannot call kernel function from non-GPL compatible program\n"); 2050 return -EINVAL; 2051 } 2052 2053 tab = kzalloc(sizeof(*tab), GFP_KERNEL); 2054 if (!tab) 2055 return -ENOMEM; 2056 prog_aux->kfunc_tab = tab; 2057 } 2058 2059 /* func_id == 0 is always invalid, but instead of returning an error, be 2060 * conservative and wait until the code elimination pass before returning 2061 * error, so that invalid calls that get pruned out can be in BPF programs 2062 * loaded from userspace. It is also required that offset be untouched 2063 * for such calls. 2064 */ 2065 if (!func_id && !offset) 2066 return 0; 2067 2068 if (!btf_tab && offset) { 2069 btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL); 2070 if (!btf_tab) 2071 return -ENOMEM; 2072 prog_aux->kfunc_btf_tab = btf_tab; 2073 } 2074 2075 desc_btf = find_kfunc_desc_btf(env, offset); 2076 if (IS_ERR(desc_btf)) { 2077 verbose(env, "failed to find BTF for kernel function\n"); 2078 return PTR_ERR(desc_btf); 2079 } 2080 2081 if (find_kfunc_desc(env->prog, func_id, offset)) 2082 return 0; 2083 2084 if (tab->nr_descs == MAX_KFUNC_DESCS) { 2085 verbose(env, "too many different kernel function calls\n"); 2086 return -E2BIG; 2087 } 2088 2089 func = btf_type_by_id(desc_btf, func_id); 2090 if (!func || !btf_type_is_func(func)) { 2091 verbose(env, "kernel btf_id %u is not a function\n", 2092 func_id); 2093 return -EINVAL; 2094 } 2095 func_proto = btf_type_by_id(desc_btf, func->type); 2096 if (!func_proto || !btf_type_is_func_proto(func_proto)) { 2097 verbose(env, "kernel function btf_id %u does not have a valid func_proto\n", 2098 func_id); 2099 return -EINVAL; 2100 } 2101 2102 func_name = btf_name_by_offset(desc_btf, func->name_off); 2103 addr = kallsyms_lookup_name(func_name); 2104 if (!addr) { 2105 verbose(env, "cannot find address for kernel function %s\n", 2106 func_name); 2107 return -EINVAL; 2108 } 2109 2110 call_imm = BPF_CALL_IMM(addr); 2111 /* Check whether or not the relative offset overflows desc->imm */ 2112 if ((unsigned long)(s32)call_imm != call_imm) { 2113 verbose(env, "address of kernel function %s is out of range\n", 2114 func_name); 2115 return -EINVAL; 2116 } 2117 2118 desc = &tab->descs[tab->nr_descs++]; 2119 desc->func_id = func_id; 2120 desc->imm = call_imm; 2121 desc->offset = offset; 2122 err = btf_distill_func_proto(&env->log, desc_btf, 2123 func_proto, func_name, 2124 &desc->func_model); 2125 if (!err) 2126 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 2127 kfunc_desc_cmp_by_id_off, NULL); 2128 return err; 2129 } 2130 2131 static int kfunc_desc_cmp_by_imm(const void *a, const void *b) 2132 { 2133 const struct bpf_kfunc_desc *d0 = a; 2134 const struct bpf_kfunc_desc *d1 = b; 2135 2136 if (d0->imm > d1->imm) 2137 return 1; 2138 else if (d0->imm < d1->imm) 2139 return -1; 2140 return 0; 2141 } 2142 2143 static void sort_kfunc_descs_by_imm(struct bpf_prog *prog) 2144 { 2145 struct bpf_kfunc_desc_tab *tab; 2146 2147 tab = prog->aux->kfunc_tab; 2148 if (!tab) 2149 return; 2150 2151 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 2152 kfunc_desc_cmp_by_imm, NULL); 2153 } 2154 2155 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) 2156 { 2157 return !!prog->aux->kfunc_tab; 2158 } 2159 2160 const struct btf_func_model * 2161 bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 2162 const struct bpf_insn *insn) 2163 { 2164 const struct bpf_kfunc_desc desc = { 2165 .imm = insn->imm, 2166 }; 2167 const struct bpf_kfunc_desc *res; 2168 struct bpf_kfunc_desc_tab *tab; 2169 2170 tab = prog->aux->kfunc_tab; 2171 res = bsearch(&desc, tab->descs, tab->nr_descs, 2172 sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm); 2173 2174 return res ? &res->func_model : NULL; 2175 } 2176 2177 static int add_subprog_and_kfunc(struct bpf_verifier_env *env) 2178 { 2179 struct bpf_subprog_info *subprog = env->subprog_info; 2180 struct bpf_insn *insn = env->prog->insnsi; 2181 int i, ret, insn_cnt = env->prog->len; 2182 2183 /* Add entry function. */ 2184 ret = add_subprog(env, 0); 2185 if (ret) 2186 return ret; 2187 2188 for (i = 0; i < insn_cnt; i++, insn++) { 2189 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) && 2190 !bpf_pseudo_kfunc_call(insn)) 2191 continue; 2192 2193 if (!env->bpf_capable) { 2194 verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n"); 2195 return -EPERM; 2196 } 2197 2198 if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn)) 2199 ret = add_subprog(env, i + insn->imm + 1); 2200 else 2201 ret = add_kfunc_call(env, insn->imm, insn->off); 2202 2203 if (ret < 0) 2204 return ret; 2205 } 2206 2207 /* Add a fake 'exit' subprog which could simplify subprog iteration 2208 * logic. 'subprog_cnt' should not be increased. 2209 */ 2210 subprog[env->subprog_cnt].start = insn_cnt; 2211 2212 if (env->log.level & BPF_LOG_LEVEL2) 2213 for (i = 0; i < env->subprog_cnt; i++) 2214 verbose(env, "func#%d @%d\n", i, subprog[i].start); 2215 2216 return 0; 2217 } 2218 2219 static int check_subprogs(struct bpf_verifier_env *env) 2220 { 2221 int i, subprog_start, subprog_end, off, cur_subprog = 0; 2222 struct bpf_subprog_info *subprog = env->subprog_info; 2223 struct bpf_insn *insn = env->prog->insnsi; 2224 int insn_cnt = env->prog->len; 2225 2226 /* now check that all jumps are within the same subprog */ 2227 subprog_start = subprog[cur_subprog].start; 2228 subprog_end = subprog[cur_subprog + 1].start; 2229 for (i = 0; i < insn_cnt; i++) { 2230 u8 code = insn[i].code; 2231 2232 if (code == (BPF_JMP | BPF_CALL) && 2233 insn[i].imm == BPF_FUNC_tail_call && 2234 insn[i].src_reg != BPF_PSEUDO_CALL) 2235 subprog[cur_subprog].has_tail_call = true; 2236 if (BPF_CLASS(code) == BPF_LD && 2237 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND)) 2238 subprog[cur_subprog].has_ld_abs = true; 2239 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) 2240 goto next; 2241 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) 2242 goto next; 2243 off = i + insn[i].off + 1; 2244 if (off < subprog_start || off >= subprog_end) { 2245 verbose(env, "jump out of range from insn %d to %d\n", i, off); 2246 return -EINVAL; 2247 } 2248 next: 2249 if (i == subprog_end - 1) { 2250 /* to avoid fall-through from one subprog into another 2251 * the last insn of the subprog should be either exit 2252 * or unconditional jump back 2253 */ 2254 if (code != (BPF_JMP | BPF_EXIT) && 2255 code != (BPF_JMP | BPF_JA)) { 2256 verbose(env, "last insn is not an exit or jmp\n"); 2257 return -EINVAL; 2258 } 2259 subprog_start = subprog_end; 2260 cur_subprog++; 2261 if (cur_subprog < env->subprog_cnt) 2262 subprog_end = subprog[cur_subprog + 1].start; 2263 } 2264 } 2265 return 0; 2266 } 2267 2268 /* Parentage chain of this register (or stack slot) should take care of all 2269 * issues like callee-saved registers, stack slot allocation time, etc. 2270 */ 2271 static int mark_reg_read(struct bpf_verifier_env *env, 2272 const struct bpf_reg_state *state, 2273 struct bpf_reg_state *parent, u8 flag) 2274 { 2275 bool writes = parent == state->parent; /* Observe write marks */ 2276 int cnt = 0; 2277 2278 while (parent) { 2279 /* if read wasn't screened by an earlier write ... */ 2280 if (writes && state->live & REG_LIVE_WRITTEN) 2281 break; 2282 if (parent->live & REG_LIVE_DONE) { 2283 verbose(env, "verifier BUG type %s var_off %lld off %d\n", 2284 reg_type_str(env, parent->type), 2285 parent->var_off.value, parent->off); 2286 return -EFAULT; 2287 } 2288 /* The first condition is more likely to be true than the 2289 * second, checked it first. 2290 */ 2291 if ((parent->live & REG_LIVE_READ) == flag || 2292 parent->live & REG_LIVE_READ64) 2293 /* The parentage chain never changes and 2294 * this parent was already marked as LIVE_READ. 2295 * There is no need to keep walking the chain again and 2296 * keep re-marking all parents as LIVE_READ. 2297 * This case happens when the same register is read 2298 * multiple times without writes into it in-between. 2299 * Also, if parent has the stronger REG_LIVE_READ64 set, 2300 * then no need to set the weak REG_LIVE_READ32. 2301 */ 2302 break; 2303 /* ... then we depend on parent's value */ 2304 parent->live |= flag; 2305 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */ 2306 if (flag == REG_LIVE_READ64) 2307 parent->live &= ~REG_LIVE_READ32; 2308 state = parent; 2309 parent = state->parent; 2310 writes = true; 2311 cnt++; 2312 } 2313 2314 if (env->longest_mark_read_walk < cnt) 2315 env->longest_mark_read_walk = cnt; 2316 return 0; 2317 } 2318 2319 /* This function is supposed to be used by the following 32-bit optimization 2320 * code only. It returns TRUE if the source or destination register operates 2321 * on 64-bit, otherwise return FALSE. 2322 */ 2323 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, 2324 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t) 2325 { 2326 u8 code, class, op; 2327 2328 code = insn->code; 2329 class = BPF_CLASS(code); 2330 op = BPF_OP(code); 2331 if (class == BPF_JMP) { 2332 /* BPF_EXIT for "main" will reach here. Return TRUE 2333 * conservatively. 2334 */ 2335 if (op == BPF_EXIT) 2336 return true; 2337 if (op == BPF_CALL) { 2338 /* BPF to BPF call will reach here because of marking 2339 * caller saved clobber with DST_OP_NO_MARK for which we 2340 * don't care the register def because they are anyway 2341 * marked as NOT_INIT already. 2342 */ 2343 if (insn->src_reg == BPF_PSEUDO_CALL) 2344 return false; 2345 /* Helper call will reach here because of arg type 2346 * check, conservatively return TRUE. 2347 */ 2348 if (t == SRC_OP) 2349 return true; 2350 2351 return false; 2352 } 2353 } 2354 2355 if (class == BPF_ALU64 || class == BPF_JMP || 2356 /* BPF_END always use BPF_ALU class. */ 2357 (class == BPF_ALU && op == BPF_END && insn->imm == 64)) 2358 return true; 2359 2360 if (class == BPF_ALU || class == BPF_JMP32) 2361 return false; 2362 2363 if (class == BPF_LDX) { 2364 if (t != SRC_OP) 2365 return BPF_SIZE(code) == BPF_DW; 2366 /* LDX source must be ptr. */ 2367 return true; 2368 } 2369 2370 if (class == BPF_STX) { 2371 /* BPF_STX (including atomic variants) has multiple source 2372 * operands, one of which is a ptr. Check whether the caller is 2373 * asking about it. 2374 */ 2375 if (t == SRC_OP && reg->type != SCALAR_VALUE) 2376 return true; 2377 return BPF_SIZE(code) == BPF_DW; 2378 } 2379 2380 if (class == BPF_LD) { 2381 u8 mode = BPF_MODE(code); 2382 2383 /* LD_IMM64 */ 2384 if (mode == BPF_IMM) 2385 return true; 2386 2387 /* Both LD_IND and LD_ABS return 32-bit data. */ 2388 if (t != SRC_OP) 2389 return false; 2390 2391 /* Implicit ctx ptr. */ 2392 if (regno == BPF_REG_6) 2393 return true; 2394 2395 /* Explicit source could be any width. */ 2396 return true; 2397 } 2398 2399 if (class == BPF_ST) 2400 /* The only source register for BPF_ST is a ptr. */ 2401 return true; 2402 2403 /* Conservatively return true at default. */ 2404 return true; 2405 } 2406 2407 /* Return the regno defined by the insn, or -1. */ 2408 static int insn_def_regno(const struct bpf_insn *insn) 2409 { 2410 switch (BPF_CLASS(insn->code)) { 2411 case BPF_JMP: 2412 case BPF_JMP32: 2413 case BPF_ST: 2414 return -1; 2415 case BPF_STX: 2416 if (BPF_MODE(insn->code) == BPF_ATOMIC && 2417 (insn->imm & BPF_FETCH)) { 2418 if (insn->imm == BPF_CMPXCHG) 2419 return BPF_REG_0; 2420 else 2421 return insn->src_reg; 2422 } else { 2423 return -1; 2424 } 2425 default: 2426 return insn->dst_reg; 2427 } 2428 } 2429 2430 /* Return TRUE if INSN has defined any 32-bit value explicitly. */ 2431 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn) 2432 { 2433 int dst_reg = insn_def_regno(insn); 2434 2435 if (dst_reg == -1) 2436 return false; 2437 2438 return !is_reg64(env, insn, dst_reg, NULL, DST_OP); 2439 } 2440 2441 static void mark_insn_zext(struct bpf_verifier_env *env, 2442 struct bpf_reg_state *reg) 2443 { 2444 s32 def_idx = reg->subreg_def; 2445 2446 if (def_idx == DEF_NOT_SUBREG) 2447 return; 2448 2449 env->insn_aux_data[def_idx - 1].zext_dst = true; 2450 /* The dst will be zero extended, so won't be sub-register anymore. */ 2451 reg->subreg_def = DEF_NOT_SUBREG; 2452 } 2453 2454 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, 2455 enum reg_arg_type t) 2456 { 2457 struct bpf_verifier_state *vstate = env->cur_state; 2458 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2459 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; 2460 struct bpf_reg_state *reg, *regs = state->regs; 2461 bool rw64; 2462 2463 if (regno >= MAX_BPF_REG) { 2464 verbose(env, "R%d is invalid\n", regno); 2465 return -EINVAL; 2466 } 2467 2468 mark_reg_scratched(env, regno); 2469 2470 reg = ®s[regno]; 2471 rw64 = is_reg64(env, insn, regno, reg, t); 2472 if (t == SRC_OP) { 2473 /* check whether register used as source operand can be read */ 2474 if (reg->type == NOT_INIT) { 2475 verbose(env, "R%d !read_ok\n", regno); 2476 return -EACCES; 2477 } 2478 /* We don't need to worry about FP liveness because it's read-only */ 2479 if (regno == BPF_REG_FP) 2480 return 0; 2481 2482 if (rw64) 2483 mark_insn_zext(env, reg); 2484 2485 return mark_reg_read(env, reg, reg->parent, 2486 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32); 2487 } else { 2488 /* check whether register used as dest operand can be written to */ 2489 if (regno == BPF_REG_FP) { 2490 verbose(env, "frame pointer is read only\n"); 2491 return -EACCES; 2492 } 2493 reg->live |= REG_LIVE_WRITTEN; 2494 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; 2495 if (t == DST_OP) 2496 mark_reg_unknown(env, regs, regno); 2497 } 2498 return 0; 2499 } 2500 2501 /* for any branch, call, exit record the history of jmps in the given state */ 2502 static int push_jmp_history(struct bpf_verifier_env *env, 2503 struct bpf_verifier_state *cur) 2504 { 2505 u32 cnt = cur->jmp_history_cnt; 2506 struct bpf_idx_pair *p; 2507 2508 cnt++; 2509 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER); 2510 if (!p) 2511 return -ENOMEM; 2512 p[cnt - 1].idx = env->insn_idx; 2513 p[cnt - 1].prev_idx = env->prev_insn_idx; 2514 cur->jmp_history = p; 2515 cur->jmp_history_cnt = cnt; 2516 return 0; 2517 } 2518 2519 /* Backtrack one insn at a time. If idx is not at the top of recorded 2520 * history then previous instruction came from straight line execution. 2521 */ 2522 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, 2523 u32 *history) 2524 { 2525 u32 cnt = *history; 2526 2527 if (cnt && st->jmp_history[cnt - 1].idx == i) { 2528 i = st->jmp_history[cnt - 1].prev_idx; 2529 (*history)--; 2530 } else { 2531 i--; 2532 } 2533 return i; 2534 } 2535 2536 static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn) 2537 { 2538 const struct btf_type *func; 2539 struct btf *desc_btf; 2540 2541 if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL) 2542 return NULL; 2543 2544 desc_btf = find_kfunc_desc_btf(data, insn->off); 2545 if (IS_ERR(desc_btf)) 2546 return "<error>"; 2547 2548 func = btf_type_by_id(desc_btf, insn->imm); 2549 return btf_name_by_offset(desc_btf, func->name_off); 2550 } 2551 2552 /* For given verifier state backtrack_insn() is called from the last insn to 2553 * the first insn. Its purpose is to compute a bitmask of registers and 2554 * stack slots that needs precision in the parent verifier state. 2555 */ 2556 static int backtrack_insn(struct bpf_verifier_env *env, int idx, 2557 u32 *reg_mask, u64 *stack_mask) 2558 { 2559 const struct bpf_insn_cbs cbs = { 2560 .cb_call = disasm_kfunc_name, 2561 .cb_print = verbose, 2562 .private_data = env, 2563 }; 2564 struct bpf_insn *insn = env->prog->insnsi + idx; 2565 u8 class = BPF_CLASS(insn->code); 2566 u8 opcode = BPF_OP(insn->code); 2567 u8 mode = BPF_MODE(insn->code); 2568 u32 dreg = 1u << insn->dst_reg; 2569 u32 sreg = 1u << insn->src_reg; 2570 u32 spi; 2571 2572 if (insn->code == 0) 2573 return 0; 2574 if (env->log.level & BPF_LOG_LEVEL2) { 2575 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask); 2576 verbose(env, "%d: ", idx); 2577 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 2578 } 2579 2580 if (class == BPF_ALU || class == BPF_ALU64) { 2581 if (!(*reg_mask & dreg)) 2582 return 0; 2583 if (opcode == BPF_MOV) { 2584 if (BPF_SRC(insn->code) == BPF_X) { 2585 /* dreg = sreg 2586 * dreg needs precision after this insn 2587 * sreg needs precision before this insn 2588 */ 2589 *reg_mask &= ~dreg; 2590 *reg_mask |= sreg; 2591 } else { 2592 /* dreg = K 2593 * dreg needs precision after this insn. 2594 * Corresponding register is already marked 2595 * as precise=true in this verifier state. 2596 * No further markings in parent are necessary 2597 */ 2598 *reg_mask &= ~dreg; 2599 } 2600 } else { 2601 if (BPF_SRC(insn->code) == BPF_X) { 2602 /* dreg += sreg 2603 * both dreg and sreg need precision 2604 * before this insn 2605 */ 2606 *reg_mask |= sreg; 2607 } /* else dreg += K 2608 * dreg still needs precision before this insn 2609 */ 2610 } 2611 } else if (class == BPF_LDX) { 2612 if (!(*reg_mask & dreg)) 2613 return 0; 2614 *reg_mask &= ~dreg; 2615 2616 /* scalars can only be spilled into stack w/o losing precision. 2617 * Load from any other memory can be zero extended. 2618 * The desire to keep that precision is already indicated 2619 * by 'precise' mark in corresponding register of this state. 2620 * No further tracking necessary. 2621 */ 2622 if (insn->src_reg != BPF_REG_FP) 2623 return 0; 2624 2625 /* dreg = *(u64 *)[fp - off] was a fill from the stack. 2626 * that [fp - off] slot contains scalar that needs to be 2627 * tracked with precision 2628 */ 2629 spi = (-insn->off - 1) / BPF_REG_SIZE; 2630 if (spi >= 64) { 2631 verbose(env, "BUG spi %d\n", spi); 2632 WARN_ONCE(1, "verifier backtracking bug"); 2633 return -EFAULT; 2634 } 2635 *stack_mask |= 1ull << spi; 2636 } else if (class == BPF_STX || class == BPF_ST) { 2637 if (*reg_mask & dreg) 2638 /* stx & st shouldn't be using _scalar_ dst_reg 2639 * to access memory. It means backtracking 2640 * encountered a case of pointer subtraction. 2641 */ 2642 return -ENOTSUPP; 2643 /* scalars can only be spilled into stack */ 2644 if (insn->dst_reg != BPF_REG_FP) 2645 return 0; 2646 spi = (-insn->off - 1) / BPF_REG_SIZE; 2647 if (spi >= 64) { 2648 verbose(env, "BUG spi %d\n", spi); 2649 WARN_ONCE(1, "verifier backtracking bug"); 2650 return -EFAULT; 2651 } 2652 if (!(*stack_mask & (1ull << spi))) 2653 return 0; 2654 *stack_mask &= ~(1ull << spi); 2655 if (class == BPF_STX) 2656 *reg_mask |= sreg; 2657 } else if (class == BPF_JMP || class == BPF_JMP32) { 2658 if (opcode == BPF_CALL) { 2659 if (insn->src_reg == BPF_PSEUDO_CALL) 2660 return -ENOTSUPP; 2661 /* regular helper call sets R0 */ 2662 *reg_mask &= ~1; 2663 if (*reg_mask & 0x3f) { 2664 /* if backtracing was looking for registers R1-R5 2665 * they should have been found already. 2666 */ 2667 verbose(env, "BUG regs %x\n", *reg_mask); 2668 WARN_ONCE(1, "verifier backtracking bug"); 2669 return -EFAULT; 2670 } 2671 } else if (opcode == BPF_EXIT) { 2672 return -ENOTSUPP; 2673 } 2674 } else if (class == BPF_LD) { 2675 if (!(*reg_mask & dreg)) 2676 return 0; 2677 *reg_mask &= ~dreg; 2678 /* It's ld_imm64 or ld_abs or ld_ind. 2679 * For ld_imm64 no further tracking of precision 2680 * into parent is necessary 2681 */ 2682 if (mode == BPF_IND || mode == BPF_ABS) 2683 /* to be analyzed */ 2684 return -ENOTSUPP; 2685 } 2686 return 0; 2687 } 2688 2689 /* the scalar precision tracking algorithm: 2690 * . at the start all registers have precise=false. 2691 * . scalar ranges are tracked as normal through alu and jmp insns. 2692 * . once precise value of the scalar register is used in: 2693 * . ptr + scalar alu 2694 * . if (scalar cond K|scalar) 2695 * . helper_call(.., scalar, ...) where ARG_CONST is expected 2696 * backtrack through the verifier states and mark all registers and 2697 * stack slots with spilled constants that these scalar regisers 2698 * should be precise. 2699 * . during state pruning two registers (or spilled stack slots) 2700 * are equivalent if both are not precise. 2701 * 2702 * Note the verifier cannot simply walk register parentage chain, 2703 * since many different registers and stack slots could have been 2704 * used to compute single precise scalar. 2705 * 2706 * The approach of starting with precise=true for all registers and then 2707 * backtrack to mark a register as not precise when the verifier detects 2708 * that program doesn't care about specific value (e.g., when helper 2709 * takes register as ARG_ANYTHING parameter) is not safe. 2710 * 2711 * It's ok to walk single parentage chain of the verifier states. 2712 * It's possible that this backtracking will go all the way till 1st insn. 2713 * All other branches will be explored for needing precision later. 2714 * 2715 * The backtracking needs to deal with cases like: 2716 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0) 2717 * r9 -= r8 2718 * r5 = r9 2719 * if r5 > 0x79f goto pc+7 2720 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff)) 2721 * r5 += 1 2722 * ... 2723 * call bpf_perf_event_output#25 2724 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO 2725 * 2726 * and this case: 2727 * r6 = 1 2728 * call foo // uses callee's r6 inside to compute r0 2729 * r0 += r6 2730 * if r0 == 0 goto 2731 * 2732 * to track above reg_mask/stack_mask needs to be independent for each frame. 2733 * 2734 * Also if parent's curframe > frame where backtracking started, 2735 * the verifier need to mark registers in both frames, otherwise callees 2736 * may incorrectly prune callers. This is similar to 2737 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences") 2738 * 2739 * For now backtracking falls back into conservative marking. 2740 */ 2741 static void mark_all_scalars_precise(struct bpf_verifier_env *env, 2742 struct bpf_verifier_state *st) 2743 { 2744 struct bpf_func_state *func; 2745 struct bpf_reg_state *reg; 2746 int i, j; 2747 2748 /* big hammer: mark all scalars precise in this path. 2749 * pop_stack may still get !precise scalars. 2750 */ 2751 for (; st; st = st->parent) 2752 for (i = 0; i <= st->curframe; i++) { 2753 func = st->frame[i]; 2754 for (j = 0; j < BPF_REG_FP; j++) { 2755 reg = &func->regs[j]; 2756 if (reg->type != SCALAR_VALUE) 2757 continue; 2758 reg->precise = true; 2759 } 2760 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { 2761 if (!is_spilled_reg(&func->stack[j])) 2762 continue; 2763 reg = &func->stack[j].spilled_ptr; 2764 if (reg->type != SCALAR_VALUE) 2765 continue; 2766 reg->precise = true; 2767 } 2768 } 2769 } 2770 2771 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, 2772 int spi) 2773 { 2774 struct bpf_verifier_state *st = env->cur_state; 2775 int first_idx = st->first_insn_idx; 2776 int last_idx = env->insn_idx; 2777 struct bpf_func_state *func; 2778 struct bpf_reg_state *reg; 2779 u32 reg_mask = regno >= 0 ? 1u << regno : 0; 2780 u64 stack_mask = spi >= 0 ? 1ull << spi : 0; 2781 bool skip_first = true; 2782 bool new_marks = false; 2783 int i, err; 2784 2785 if (!env->bpf_capable) 2786 return 0; 2787 2788 func = st->frame[st->curframe]; 2789 if (regno >= 0) { 2790 reg = &func->regs[regno]; 2791 if (reg->type != SCALAR_VALUE) { 2792 WARN_ONCE(1, "backtracing misuse"); 2793 return -EFAULT; 2794 } 2795 if (!reg->precise) 2796 new_marks = true; 2797 else 2798 reg_mask = 0; 2799 reg->precise = true; 2800 } 2801 2802 while (spi >= 0) { 2803 if (!is_spilled_reg(&func->stack[spi])) { 2804 stack_mask = 0; 2805 break; 2806 } 2807 reg = &func->stack[spi].spilled_ptr; 2808 if (reg->type != SCALAR_VALUE) { 2809 stack_mask = 0; 2810 break; 2811 } 2812 if (!reg->precise) 2813 new_marks = true; 2814 else 2815 stack_mask = 0; 2816 reg->precise = true; 2817 break; 2818 } 2819 2820 if (!new_marks) 2821 return 0; 2822 if (!reg_mask && !stack_mask) 2823 return 0; 2824 for (;;) { 2825 DECLARE_BITMAP(mask, 64); 2826 u32 history = st->jmp_history_cnt; 2827 2828 if (env->log.level & BPF_LOG_LEVEL2) 2829 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx); 2830 for (i = last_idx;;) { 2831 if (skip_first) { 2832 err = 0; 2833 skip_first = false; 2834 } else { 2835 err = backtrack_insn(env, i, ®_mask, &stack_mask); 2836 } 2837 if (err == -ENOTSUPP) { 2838 mark_all_scalars_precise(env, st); 2839 return 0; 2840 } else if (err) { 2841 return err; 2842 } 2843 if (!reg_mask && !stack_mask) 2844 /* Found assignment(s) into tracked register in this state. 2845 * Since this state is already marked, just return. 2846 * Nothing to be tracked further in the parent state. 2847 */ 2848 return 0; 2849 if (i == first_idx) 2850 break; 2851 i = get_prev_insn_idx(st, i, &history); 2852 if (i >= env->prog->len) { 2853 /* This can happen if backtracking reached insn 0 2854 * and there are still reg_mask or stack_mask 2855 * to backtrack. 2856 * It means the backtracking missed the spot where 2857 * particular register was initialized with a constant. 2858 */ 2859 verbose(env, "BUG backtracking idx %d\n", i); 2860 WARN_ONCE(1, "verifier backtracking bug"); 2861 return -EFAULT; 2862 } 2863 } 2864 st = st->parent; 2865 if (!st) 2866 break; 2867 2868 new_marks = false; 2869 func = st->frame[st->curframe]; 2870 bitmap_from_u64(mask, reg_mask); 2871 for_each_set_bit(i, mask, 32) { 2872 reg = &func->regs[i]; 2873 if (reg->type != SCALAR_VALUE) { 2874 reg_mask &= ~(1u << i); 2875 continue; 2876 } 2877 if (!reg->precise) 2878 new_marks = true; 2879 reg->precise = true; 2880 } 2881 2882 bitmap_from_u64(mask, stack_mask); 2883 for_each_set_bit(i, mask, 64) { 2884 if (i >= func->allocated_stack / BPF_REG_SIZE) { 2885 /* the sequence of instructions: 2886 * 2: (bf) r3 = r10 2887 * 3: (7b) *(u64 *)(r3 -8) = r0 2888 * 4: (79) r4 = *(u64 *)(r10 -8) 2889 * doesn't contain jmps. It's backtracked 2890 * as a single block. 2891 * During backtracking insn 3 is not recognized as 2892 * stack access, so at the end of backtracking 2893 * stack slot fp-8 is still marked in stack_mask. 2894 * However the parent state may not have accessed 2895 * fp-8 and it's "unallocated" stack space. 2896 * In such case fallback to conservative. 2897 */ 2898 mark_all_scalars_precise(env, st); 2899 return 0; 2900 } 2901 2902 if (!is_spilled_reg(&func->stack[i])) { 2903 stack_mask &= ~(1ull << i); 2904 continue; 2905 } 2906 reg = &func->stack[i].spilled_ptr; 2907 if (reg->type != SCALAR_VALUE) { 2908 stack_mask &= ~(1ull << i); 2909 continue; 2910 } 2911 if (!reg->precise) 2912 new_marks = true; 2913 reg->precise = true; 2914 } 2915 if (env->log.level & BPF_LOG_LEVEL2) { 2916 verbose(env, "parent %s regs=%x stack=%llx marks:", 2917 new_marks ? "didn't have" : "already had", 2918 reg_mask, stack_mask); 2919 print_verifier_state(env, func, true); 2920 } 2921 2922 if (!reg_mask && !stack_mask) 2923 break; 2924 if (!new_marks) 2925 break; 2926 2927 last_idx = st->last_insn_idx; 2928 first_idx = st->first_insn_idx; 2929 } 2930 return 0; 2931 } 2932 2933 int mark_chain_precision(struct bpf_verifier_env *env, int regno) 2934 { 2935 return __mark_chain_precision(env, regno, -1); 2936 } 2937 2938 static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi) 2939 { 2940 return __mark_chain_precision(env, -1, spi); 2941 } 2942 2943 static bool is_spillable_regtype(enum bpf_reg_type type) 2944 { 2945 switch (base_type(type)) { 2946 case PTR_TO_MAP_VALUE: 2947 case PTR_TO_STACK: 2948 case PTR_TO_CTX: 2949 case PTR_TO_PACKET: 2950 case PTR_TO_PACKET_META: 2951 case PTR_TO_PACKET_END: 2952 case PTR_TO_FLOW_KEYS: 2953 case CONST_PTR_TO_MAP: 2954 case PTR_TO_SOCKET: 2955 case PTR_TO_SOCK_COMMON: 2956 case PTR_TO_TCP_SOCK: 2957 case PTR_TO_XDP_SOCK: 2958 case PTR_TO_BTF_ID: 2959 case PTR_TO_BUF: 2960 case PTR_TO_MEM: 2961 case PTR_TO_FUNC: 2962 case PTR_TO_MAP_KEY: 2963 return true; 2964 default: 2965 return false; 2966 } 2967 } 2968 2969 /* Does this register contain a constant zero? */ 2970 static bool register_is_null(struct bpf_reg_state *reg) 2971 { 2972 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); 2973 } 2974 2975 static bool register_is_const(struct bpf_reg_state *reg) 2976 { 2977 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off); 2978 } 2979 2980 static bool __is_scalar_unbounded(struct bpf_reg_state *reg) 2981 { 2982 return tnum_is_unknown(reg->var_off) && 2983 reg->smin_value == S64_MIN && reg->smax_value == S64_MAX && 2984 reg->umin_value == 0 && reg->umax_value == U64_MAX && 2985 reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX && 2986 reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX; 2987 } 2988 2989 static bool register_is_bounded(struct bpf_reg_state *reg) 2990 { 2991 return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg); 2992 } 2993 2994 static bool __is_pointer_value(bool allow_ptr_leaks, 2995 const struct bpf_reg_state *reg) 2996 { 2997 if (allow_ptr_leaks) 2998 return false; 2999 3000 return reg->type != SCALAR_VALUE; 3001 } 3002 3003 static void save_register_state(struct bpf_func_state *state, 3004 int spi, struct bpf_reg_state *reg, 3005 int size) 3006 { 3007 int i; 3008 3009 state->stack[spi].spilled_ptr = *reg; 3010 if (size == BPF_REG_SIZE) 3011 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 3012 3013 for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) 3014 state->stack[spi].slot_type[i - 1] = STACK_SPILL; 3015 3016 /* size < 8 bytes spill */ 3017 for (; i; i--) 3018 scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]); 3019 } 3020 3021 /* check_stack_{read,write}_fixed_off functions track spill/fill of registers, 3022 * stack boundary and alignment are checked in check_mem_access() 3023 */ 3024 static int check_stack_write_fixed_off(struct bpf_verifier_env *env, 3025 /* stack frame we're writing to */ 3026 struct bpf_func_state *state, 3027 int off, int size, int value_regno, 3028 int insn_idx) 3029 { 3030 struct bpf_func_state *cur; /* state of the current function */ 3031 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; 3032 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg; 3033 struct bpf_reg_state *reg = NULL; 3034 3035 err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE)); 3036 if (err) 3037 return err; 3038 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 3039 * so it's aligned access and [off, off + size) are within stack limits 3040 */ 3041 if (!env->allow_ptr_leaks && 3042 state->stack[spi].slot_type[0] == STACK_SPILL && 3043 size != BPF_REG_SIZE) { 3044 verbose(env, "attempt to corrupt spilled pointer on stack\n"); 3045 return -EACCES; 3046 } 3047 3048 cur = env->cur_state->frame[env->cur_state->curframe]; 3049 if (value_regno >= 0) 3050 reg = &cur->regs[value_regno]; 3051 if (!env->bypass_spec_v4) { 3052 bool sanitize = reg && is_spillable_regtype(reg->type); 3053 3054 for (i = 0; i < size; i++) { 3055 if (state->stack[spi].slot_type[i] == STACK_INVALID) { 3056 sanitize = true; 3057 break; 3058 } 3059 } 3060 3061 if (sanitize) 3062 env->insn_aux_data[insn_idx].sanitize_stack_spill = true; 3063 } 3064 3065 mark_stack_slot_scratched(env, spi); 3066 if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && 3067 !register_is_null(reg) && env->bpf_capable) { 3068 if (dst_reg != BPF_REG_FP) { 3069 /* The backtracking logic can only recognize explicit 3070 * stack slot address like [fp - 8]. Other spill of 3071 * scalar via different register has to be conservative. 3072 * Backtrack from here and mark all registers as precise 3073 * that contributed into 'reg' being a constant. 3074 */ 3075 err = mark_chain_precision(env, value_regno); 3076 if (err) 3077 return err; 3078 } 3079 save_register_state(state, spi, reg, size); 3080 } else if (reg && is_spillable_regtype(reg->type)) { 3081 /* register containing pointer is being spilled into stack */ 3082 if (size != BPF_REG_SIZE) { 3083 verbose_linfo(env, insn_idx, "; "); 3084 verbose(env, "invalid size of register spill\n"); 3085 return -EACCES; 3086 } 3087 if (state != cur && reg->type == PTR_TO_STACK) { 3088 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); 3089 return -EINVAL; 3090 } 3091 save_register_state(state, spi, reg, size); 3092 } else { 3093 u8 type = STACK_MISC; 3094 3095 /* regular write of data into stack destroys any spilled ptr */ 3096 state->stack[spi].spilled_ptr.type = NOT_INIT; 3097 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ 3098 if (is_spilled_reg(&state->stack[spi])) 3099 for (i = 0; i < BPF_REG_SIZE; i++) 3100 scrub_spilled_slot(&state->stack[spi].slot_type[i]); 3101 3102 /* only mark the slot as written if all 8 bytes were written 3103 * otherwise read propagation may incorrectly stop too soon 3104 * when stack slots are partially written. 3105 * This heuristic means that read propagation will be 3106 * conservative, since it will add reg_live_read marks 3107 * to stack slots all the way to first state when programs 3108 * writes+reads less than 8 bytes 3109 */ 3110 if (size == BPF_REG_SIZE) 3111 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 3112 3113 /* when we zero initialize stack slots mark them as such */ 3114 if (reg && register_is_null(reg)) { 3115 /* backtracking doesn't work for STACK_ZERO yet. */ 3116 err = mark_chain_precision(env, value_regno); 3117 if (err) 3118 return err; 3119 type = STACK_ZERO; 3120 } 3121 3122 /* Mark slots affected by this stack write. */ 3123 for (i = 0; i < size; i++) 3124 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = 3125 type; 3126 } 3127 return 0; 3128 } 3129 3130 /* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is 3131 * known to contain a variable offset. 3132 * This function checks whether the write is permitted and conservatively 3133 * tracks the effects of the write, considering that each stack slot in the 3134 * dynamic range is potentially written to. 3135 * 3136 * 'off' includes 'regno->off'. 3137 * 'value_regno' can be -1, meaning that an unknown value is being written to 3138 * the stack. 3139 * 3140 * Spilled pointers in range are not marked as written because we don't know 3141 * what's going to be actually written. This means that read propagation for 3142 * future reads cannot be terminated by this write. 3143 * 3144 * For privileged programs, uninitialized stack slots are considered 3145 * initialized by this write (even though we don't know exactly what offsets 3146 * are going to be written to). The idea is that we don't want the verifier to 3147 * reject future reads that access slots written to through variable offsets. 3148 */ 3149 static int check_stack_write_var_off(struct bpf_verifier_env *env, 3150 /* func where register points to */ 3151 struct bpf_func_state *state, 3152 int ptr_regno, int off, int size, 3153 int value_regno, int insn_idx) 3154 { 3155 struct bpf_func_state *cur; /* state of the current function */ 3156 int min_off, max_off; 3157 int i, err; 3158 struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL; 3159 bool writing_zero = false; 3160 /* set if the fact that we're writing a zero is used to let any 3161 * stack slots remain STACK_ZERO 3162 */ 3163 bool zero_used = false; 3164 3165 cur = env->cur_state->frame[env->cur_state->curframe]; 3166 ptr_reg = &cur->regs[ptr_regno]; 3167 min_off = ptr_reg->smin_value + off; 3168 max_off = ptr_reg->smax_value + off + size; 3169 if (value_regno >= 0) 3170 value_reg = &cur->regs[value_regno]; 3171 if (value_reg && register_is_null(value_reg)) 3172 writing_zero = true; 3173 3174 err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE)); 3175 if (err) 3176 return err; 3177 3178 3179 /* Variable offset writes destroy any spilled pointers in range. */ 3180 for (i = min_off; i < max_off; i++) { 3181 u8 new_type, *stype; 3182 int slot, spi; 3183 3184 slot = -i - 1; 3185 spi = slot / BPF_REG_SIZE; 3186 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 3187 mark_stack_slot_scratched(env, spi); 3188 3189 if (!env->allow_ptr_leaks 3190 && *stype != NOT_INIT 3191 && *stype != SCALAR_VALUE) { 3192 /* Reject the write if there's are spilled pointers in 3193 * range. If we didn't reject here, the ptr status 3194 * would be erased below (even though not all slots are 3195 * actually overwritten), possibly opening the door to 3196 * leaks. 3197 */ 3198 verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d", 3199 insn_idx, i); 3200 return -EINVAL; 3201 } 3202 3203 /* Erase all spilled pointers. */ 3204 state->stack[spi].spilled_ptr.type = NOT_INIT; 3205 3206 /* Update the slot type. */ 3207 new_type = STACK_MISC; 3208 if (writing_zero && *stype == STACK_ZERO) { 3209 new_type = STACK_ZERO; 3210 zero_used = true; 3211 } 3212 /* If the slot is STACK_INVALID, we check whether it's OK to 3213 * pretend that it will be initialized by this write. The slot 3214 * might not actually be written to, and so if we mark it as 3215 * initialized future reads might leak uninitialized memory. 3216 * For privileged programs, we will accept such reads to slots 3217 * that may or may not be written because, if we're reject 3218 * them, the error would be too confusing. 3219 */ 3220 if (*stype == STACK_INVALID && !env->allow_uninit_stack) { 3221 verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d", 3222 insn_idx, i); 3223 return -EINVAL; 3224 } 3225 *stype = new_type; 3226 } 3227 if (zero_used) { 3228 /* backtracking doesn't work for STACK_ZERO yet. */ 3229 err = mark_chain_precision(env, value_regno); 3230 if (err) 3231 return err; 3232 } 3233 return 0; 3234 } 3235 3236 /* When register 'dst_regno' is assigned some values from stack[min_off, 3237 * max_off), we set the register's type according to the types of the 3238 * respective stack slots. If all the stack values are known to be zeros, then 3239 * so is the destination reg. Otherwise, the register is considered to be 3240 * SCALAR. This function does not deal with register filling; the caller must 3241 * ensure that all spilled registers in the stack range have been marked as 3242 * read. 3243 */ 3244 static void mark_reg_stack_read(struct bpf_verifier_env *env, 3245 /* func where src register points to */ 3246 struct bpf_func_state *ptr_state, 3247 int min_off, int max_off, int dst_regno) 3248 { 3249 struct bpf_verifier_state *vstate = env->cur_state; 3250 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 3251 int i, slot, spi; 3252 u8 *stype; 3253 int zeros = 0; 3254 3255 for (i = min_off; i < max_off; i++) { 3256 slot = -i - 1; 3257 spi = slot / BPF_REG_SIZE; 3258 stype = ptr_state->stack[spi].slot_type; 3259 if (stype[slot % BPF_REG_SIZE] != STACK_ZERO) 3260 break; 3261 zeros++; 3262 } 3263 if (zeros == max_off - min_off) { 3264 /* any access_size read into register is zero extended, 3265 * so the whole register == const_zero 3266 */ 3267 __mark_reg_const_zero(&state->regs[dst_regno]); 3268 /* backtracking doesn't support STACK_ZERO yet, 3269 * so mark it precise here, so that later 3270 * backtracking can stop here. 3271 * Backtracking may not need this if this register 3272 * doesn't participate in pointer adjustment. 3273 * Forward propagation of precise flag is not 3274 * necessary either. This mark is only to stop 3275 * backtracking. Any register that contributed 3276 * to const 0 was marked precise before spill. 3277 */ 3278 state->regs[dst_regno].precise = true; 3279 } else { 3280 /* have read misc data from the stack */ 3281 mark_reg_unknown(env, state->regs, dst_regno); 3282 } 3283 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; 3284 } 3285 3286 /* Read the stack at 'off' and put the results into the register indicated by 3287 * 'dst_regno'. It handles reg filling if the addressed stack slot is a 3288 * spilled reg. 3289 * 3290 * 'dst_regno' can be -1, meaning that the read value is not going to a 3291 * register. 3292 * 3293 * The access is assumed to be within the current stack bounds. 3294 */ 3295 static int check_stack_read_fixed_off(struct bpf_verifier_env *env, 3296 /* func where src register points to */ 3297 struct bpf_func_state *reg_state, 3298 int off, int size, int dst_regno) 3299 { 3300 struct bpf_verifier_state *vstate = env->cur_state; 3301 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 3302 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; 3303 struct bpf_reg_state *reg; 3304 u8 *stype, type; 3305 3306 stype = reg_state->stack[spi].slot_type; 3307 reg = ®_state->stack[spi].spilled_ptr; 3308 3309 if (is_spilled_reg(®_state->stack[spi])) { 3310 u8 spill_size = 1; 3311 3312 for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--) 3313 spill_size++; 3314 3315 if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) { 3316 if (reg->type != SCALAR_VALUE) { 3317 verbose_linfo(env, env->insn_idx, "; "); 3318 verbose(env, "invalid size of register fill\n"); 3319 return -EACCES; 3320 } 3321 3322 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 3323 if (dst_regno < 0) 3324 return 0; 3325 3326 if (!(off % BPF_REG_SIZE) && size == spill_size) { 3327 /* The earlier check_reg_arg() has decided the 3328 * subreg_def for this insn. Save it first. 3329 */ 3330 s32 subreg_def = state->regs[dst_regno].subreg_def; 3331 3332 state->regs[dst_regno] = *reg; 3333 state->regs[dst_regno].subreg_def = subreg_def; 3334 } else { 3335 for (i = 0; i < size; i++) { 3336 type = stype[(slot - i) % BPF_REG_SIZE]; 3337 if (type == STACK_SPILL) 3338 continue; 3339 if (type == STACK_MISC) 3340 continue; 3341 verbose(env, "invalid read from stack off %d+%d size %d\n", 3342 off, i, size); 3343 return -EACCES; 3344 } 3345 mark_reg_unknown(env, state->regs, dst_regno); 3346 } 3347 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; 3348 return 0; 3349 } 3350 3351 if (dst_regno >= 0) { 3352 /* restore register state from stack */ 3353 state->regs[dst_regno] = *reg; 3354 /* mark reg as written since spilled pointer state likely 3355 * has its liveness marks cleared by is_state_visited() 3356 * which resets stack/reg liveness for state transitions 3357 */ 3358 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; 3359 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { 3360 /* If dst_regno==-1, the caller is asking us whether 3361 * it is acceptable to use this value as a SCALAR_VALUE 3362 * (e.g. for XADD). 3363 * We must not allow unprivileged callers to do that 3364 * with spilled pointers. 3365 */ 3366 verbose(env, "leaking pointer from stack off %d\n", 3367 off); 3368 return -EACCES; 3369 } 3370 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 3371 } else { 3372 for (i = 0; i < size; i++) { 3373 type = stype[(slot - i) % BPF_REG_SIZE]; 3374 if (type == STACK_MISC) 3375 continue; 3376 if (type == STACK_ZERO) 3377 continue; 3378 verbose(env, "invalid read from stack off %d+%d size %d\n", 3379 off, i, size); 3380 return -EACCES; 3381 } 3382 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 3383 if (dst_regno >= 0) 3384 mark_reg_stack_read(env, reg_state, off, off + size, dst_regno); 3385 } 3386 return 0; 3387 } 3388 3389 enum bpf_access_src { 3390 ACCESS_DIRECT = 1, /* the access is performed by an instruction */ 3391 ACCESS_HELPER = 2, /* the access is performed by a helper */ 3392 }; 3393 3394 static int check_stack_range_initialized(struct bpf_verifier_env *env, 3395 int regno, int off, int access_size, 3396 bool zero_size_allowed, 3397 enum bpf_access_src type, 3398 struct bpf_call_arg_meta *meta); 3399 3400 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) 3401 { 3402 return cur_regs(env) + regno; 3403 } 3404 3405 /* Read the stack at 'ptr_regno + off' and put the result into the register 3406 * 'dst_regno'. 3407 * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'), 3408 * but not its variable offset. 3409 * 'size' is assumed to be <= reg size and the access is assumed to be aligned. 3410 * 3411 * As opposed to check_stack_read_fixed_off, this function doesn't deal with 3412 * filling registers (i.e. reads of spilled register cannot be detected when 3413 * the offset is not fixed). We conservatively mark 'dst_regno' as containing 3414 * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable 3415 * offset; for a fixed offset check_stack_read_fixed_off should be used 3416 * instead. 3417 */ 3418 static int check_stack_read_var_off(struct bpf_verifier_env *env, 3419 int ptr_regno, int off, int size, int dst_regno) 3420 { 3421 /* The state of the source register. */ 3422 struct bpf_reg_state *reg = reg_state(env, ptr_regno); 3423 struct bpf_func_state *ptr_state = func(env, reg); 3424 int err; 3425 int min_off, max_off; 3426 3427 /* Note that we pass a NULL meta, so raw access will not be permitted. 3428 */ 3429 err = check_stack_range_initialized(env, ptr_regno, off, size, 3430 false, ACCESS_DIRECT, NULL); 3431 if (err) 3432 return err; 3433 3434 min_off = reg->smin_value + off; 3435 max_off = reg->smax_value + off; 3436 mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno); 3437 return 0; 3438 } 3439 3440 /* check_stack_read dispatches to check_stack_read_fixed_off or 3441 * check_stack_read_var_off. 3442 * 3443 * The caller must ensure that the offset falls within the allocated stack 3444 * bounds. 3445 * 3446 * 'dst_regno' is a register which will receive the value from the stack. It 3447 * can be -1, meaning that the read value is not going to a register. 3448 */ 3449 static int check_stack_read(struct bpf_verifier_env *env, 3450 int ptr_regno, int off, int size, 3451 int dst_regno) 3452 { 3453 struct bpf_reg_state *reg = reg_state(env, ptr_regno); 3454 struct bpf_func_state *state = func(env, reg); 3455 int err; 3456 /* Some accesses are only permitted with a static offset. */ 3457 bool var_off = !tnum_is_const(reg->var_off); 3458 3459 /* The offset is required to be static when reads don't go to a 3460 * register, in order to not leak pointers (see 3461 * check_stack_read_fixed_off). 3462 */ 3463 if (dst_regno < 0 && var_off) { 3464 char tn_buf[48]; 3465 3466 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3467 verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n", 3468 tn_buf, off, size); 3469 return -EACCES; 3470 } 3471 /* Variable offset is prohibited for unprivileged mode for simplicity 3472 * since it requires corresponding support in Spectre masking for stack 3473 * ALU. See also retrieve_ptr_limit(). 3474 */ 3475 if (!env->bypass_spec_v1 && var_off) { 3476 char tn_buf[48]; 3477 3478 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3479 verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n", 3480 ptr_regno, tn_buf); 3481 return -EACCES; 3482 } 3483 3484 if (!var_off) { 3485 off += reg->var_off.value; 3486 err = check_stack_read_fixed_off(env, state, off, size, 3487 dst_regno); 3488 } else { 3489 /* Variable offset stack reads need more conservative handling 3490 * than fixed offset ones. Note that dst_regno >= 0 on this 3491 * branch. 3492 */ 3493 err = check_stack_read_var_off(env, ptr_regno, off, size, 3494 dst_regno); 3495 } 3496 return err; 3497 } 3498 3499 3500 /* check_stack_write dispatches to check_stack_write_fixed_off or 3501 * check_stack_write_var_off. 3502 * 3503 * 'ptr_regno' is the register used as a pointer into the stack. 3504 * 'off' includes 'ptr_regno->off', but not its variable offset (if any). 3505 * 'value_regno' is the register whose value we're writing to the stack. It can 3506 * be -1, meaning that we're not writing from a register. 3507 * 3508 * The caller must ensure that the offset falls within the maximum stack size. 3509 */ 3510 static int check_stack_write(struct bpf_verifier_env *env, 3511 int ptr_regno, int off, int size, 3512 int value_regno, int insn_idx) 3513 { 3514 struct bpf_reg_state *reg = reg_state(env, ptr_regno); 3515 struct bpf_func_state *state = func(env, reg); 3516 int err; 3517 3518 if (tnum_is_const(reg->var_off)) { 3519 off += reg->var_off.value; 3520 err = check_stack_write_fixed_off(env, state, off, size, 3521 value_regno, insn_idx); 3522 } else { 3523 /* Variable offset stack reads need more conservative handling 3524 * than fixed offset ones. 3525 */ 3526 err = check_stack_write_var_off(env, state, 3527 ptr_regno, off, size, 3528 value_regno, insn_idx); 3529 } 3530 return err; 3531 } 3532 3533 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno, 3534 int off, int size, enum bpf_access_type type) 3535 { 3536 struct bpf_reg_state *regs = cur_regs(env); 3537 struct bpf_map *map = regs[regno].map_ptr; 3538 u32 cap = bpf_map_flags_to_cap(map); 3539 3540 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) { 3541 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n", 3542 map->value_size, off, size); 3543 return -EACCES; 3544 } 3545 3546 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) { 3547 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n", 3548 map->value_size, off, size); 3549 return -EACCES; 3550 } 3551 3552 return 0; 3553 } 3554 3555 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */ 3556 static int __check_mem_access(struct bpf_verifier_env *env, int regno, 3557 int off, int size, u32 mem_size, 3558 bool zero_size_allowed) 3559 { 3560 bool size_ok = size > 0 || (size == 0 && zero_size_allowed); 3561 struct bpf_reg_state *reg; 3562 3563 if (off >= 0 && size_ok && (u64)off + size <= mem_size) 3564 return 0; 3565 3566 reg = &cur_regs(env)[regno]; 3567 switch (reg->type) { 3568 case PTR_TO_MAP_KEY: 3569 verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n", 3570 mem_size, off, size); 3571 break; 3572 case PTR_TO_MAP_VALUE: 3573 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", 3574 mem_size, off, size); 3575 break; 3576 case PTR_TO_PACKET: 3577 case PTR_TO_PACKET_META: 3578 case PTR_TO_PACKET_END: 3579 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 3580 off, size, regno, reg->id, off, mem_size); 3581 break; 3582 case PTR_TO_MEM: 3583 default: 3584 verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n", 3585 mem_size, off, size); 3586 } 3587 3588 return -EACCES; 3589 } 3590 3591 /* check read/write into a memory region with possible variable offset */ 3592 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno, 3593 int off, int size, u32 mem_size, 3594 bool zero_size_allowed) 3595 { 3596 struct bpf_verifier_state *vstate = env->cur_state; 3597 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 3598 struct bpf_reg_state *reg = &state->regs[regno]; 3599 int err; 3600 3601 /* We may have adjusted the register pointing to memory region, so we 3602 * need to try adding each of min_value and max_value to off 3603 * to make sure our theoretical access will be safe. 3604 * 3605 * The minimum value is only important with signed 3606 * comparisons where we can't assume the floor of a 3607 * value is 0. If we are using signed variables for our 3608 * index'es we need to make sure that whatever we use 3609 * will have a set floor within our range. 3610 */ 3611 if (reg->smin_value < 0 && 3612 (reg->smin_value == S64_MIN || 3613 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || 3614 reg->smin_value + off < 0)) { 3615 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 3616 regno); 3617 return -EACCES; 3618 } 3619 err = __check_mem_access(env, regno, reg->smin_value + off, size, 3620 mem_size, zero_size_allowed); 3621 if (err) { 3622 verbose(env, "R%d min value is outside of the allowed memory range\n", 3623 regno); 3624 return err; 3625 } 3626 3627 /* If we haven't set a max value then we need to bail since we can't be 3628 * sure we won't do bad things. 3629 * If reg->umax_value + off could overflow, treat that as unbounded too. 3630 */ 3631 if (reg->umax_value >= BPF_MAX_VAR_OFF) { 3632 verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n", 3633 regno); 3634 return -EACCES; 3635 } 3636 err = __check_mem_access(env, regno, reg->umax_value + off, size, 3637 mem_size, zero_size_allowed); 3638 if (err) { 3639 verbose(env, "R%d max value is outside of the allowed memory range\n", 3640 regno); 3641 return err; 3642 } 3643 3644 return 0; 3645 } 3646 3647 static int __check_ptr_off_reg(struct bpf_verifier_env *env, 3648 const struct bpf_reg_state *reg, int regno, 3649 bool fixed_off_ok) 3650 { 3651 /* Access to this pointer-typed register or passing it to a helper 3652 * is only allowed in its original, unmodified form. 3653 */ 3654 3655 if (reg->off < 0) { 3656 verbose(env, "negative offset %s ptr R%d off=%d disallowed\n", 3657 reg_type_str(env, reg->type), regno, reg->off); 3658 return -EACCES; 3659 } 3660 3661 if (!fixed_off_ok && reg->off) { 3662 verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n", 3663 reg_type_str(env, reg->type), regno, reg->off); 3664 return -EACCES; 3665 } 3666 3667 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 3668 char tn_buf[48]; 3669 3670 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3671 verbose(env, "variable %s access var_off=%s disallowed\n", 3672 reg_type_str(env, reg->type), tn_buf); 3673 return -EACCES; 3674 } 3675 3676 return 0; 3677 } 3678 3679 int check_ptr_off_reg(struct bpf_verifier_env *env, 3680 const struct bpf_reg_state *reg, int regno) 3681 { 3682 return __check_ptr_off_reg(env, reg, regno, false); 3683 } 3684 3685 static int map_kptr_match_type(struct bpf_verifier_env *env, 3686 struct bpf_map_value_off_desc *off_desc, 3687 struct bpf_reg_state *reg, u32 regno) 3688 { 3689 const char *targ_name = kernel_type_name(off_desc->kptr.btf, off_desc->kptr.btf_id); 3690 int perm_flags = PTR_MAYBE_NULL; 3691 const char *reg_name = ""; 3692 3693 /* Only unreferenced case accepts untrusted pointers */ 3694 if (off_desc->type == BPF_KPTR_UNREF) 3695 perm_flags |= PTR_UNTRUSTED; 3696 3697 if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags)) 3698 goto bad_type; 3699 3700 if (!btf_is_kernel(reg->btf)) { 3701 verbose(env, "R%d must point to kernel BTF\n", regno); 3702 return -EINVAL; 3703 } 3704 /* We need to verify reg->type and reg->btf, before accessing reg->btf */ 3705 reg_name = kernel_type_name(reg->btf, reg->btf_id); 3706 3707 /* For ref_ptr case, release function check should ensure we get one 3708 * referenced PTR_TO_BTF_ID, and that its fixed offset is 0. For the 3709 * normal store of unreferenced kptr, we must ensure var_off is zero. 3710 * Since ref_ptr cannot be accessed directly by BPF insns, checks for 3711 * reg->off and reg->ref_obj_id are not needed here. 3712 */ 3713 if (__check_ptr_off_reg(env, reg, regno, true)) 3714 return -EACCES; 3715 3716 /* A full type match is needed, as BTF can be vmlinux or module BTF, and 3717 * we also need to take into account the reg->off. 3718 * 3719 * We want to support cases like: 3720 * 3721 * struct foo { 3722 * struct bar br; 3723 * struct baz bz; 3724 * }; 3725 * 3726 * struct foo *v; 3727 * v = func(); // PTR_TO_BTF_ID 3728 * val->foo = v; // reg->off is zero, btf and btf_id match type 3729 * val->bar = &v->br; // reg->off is still zero, but we need to retry with 3730 * // first member type of struct after comparison fails 3731 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked 3732 * // to match type 3733 * 3734 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off 3735 * is zero. We must also ensure that btf_struct_ids_match does not walk 3736 * the struct to match type against first member of struct, i.e. reject 3737 * second case from above. Hence, when type is BPF_KPTR_REF, we set 3738 * strict mode to true for type match. 3739 */ 3740 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, 3741 off_desc->kptr.btf, off_desc->kptr.btf_id, 3742 off_desc->type == BPF_KPTR_REF)) 3743 goto bad_type; 3744 return 0; 3745 bad_type: 3746 verbose(env, "invalid kptr access, R%d type=%s%s ", regno, 3747 reg_type_str(env, reg->type), reg_name); 3748 verbose(env, "expected=%s%s", reg_type_str(env, PTR_TO_BTF_ID), targ_name); 3749 if (off_desc->type == BPF_KPTR_UNREF) 3750 verbose(env, " or %s%s\n", reg_type_str(env, PTR_TO_BTF_ID | PTR_UNTRUSTED), 3751 targ_name); 3752 else 3753 verbose(env, "\n"); 3754 return -EINVAL; 3755 } 3756 3757 static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno, 3758 int value_regno, int insn_idx, 3759 struct bpf_map_value_off_desc *off_desc) 3760 { 3761 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; 3762 int class = BPF_CLASS(insn->code); 3763 struct bpf_reg_state *val_reg; 3764 3765 /* Things we already checked for in check_map_access and caller: 3766 * - Reject cases where variable offset may touch kptr 3767 * - size of access (must be BPF_DW) 3768 * - tnum_is_const(reg->var_off) 3769 * - off_desc->offset == off + reg->var_off.value 3770 */ 3771 /* Only BPF_[LDX,STX,ST] | BPF_MEM | BPF_DW is supported */ 3772 if (BPF_MODE(insn->code) != BPF_MEM) { 3773 verbose(env, "kptr in map can only be accessed using BPF_MEM instruction mode\n"); 3774 return -EACCES; 3775 } 3776 3777 /* We only allow loading referenced kptr, since it will be marked as 3778 * untrusted, similar to unreferenced kptr. 3779 */ 3780 if (class != BPF_LDX && off_desc->type == BPF_KPTR_REF) { 3781 verbose(env, "store to referenced kptr disallowed\n"); 3782 return -EACCES; 3783 } 3784 3785 if (class == BPF_LDX) { 3786 val_reg = reg_state(env, value_regno); 3787 /* We can simply mark the value_regno receiving the pointer 3788 * value from map as PTR_TO_BTF_ID, with the correct type. 3789 */ 3790 mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, off_desc->kptr.btf, 3791 off_desc->kptr.btf_id, PTR_MAYBE_NULL | PTR_UNTRUSTED); 3792 /* For mark_ptr_or_null_reg */ 3793 val_reg->id = ++env->id_gen; 3794 } else if (class == BPF_STX) { 3795 val_reg = reg_state(env, value_regno); 3796 if (!register_is_null(val_reg) && 3797 map_kptr_match_type(env, off_desc, val_reg, value_regno)) 3798 return -EACCES; 3799 } else if (class == BPF_ST) { 3800 if (insn->imm) { 3801 verbose(env, "BPF_ST imm must be 0 when storing to kptr at off=%u\n", 3802 off_desc->offset); 3803 return -EACCES; 3804 } 3805 } else { 3806 verbose(env, "kptr in map can only be accessed using BPF_LDX/BPF_STX/BPF_ST\n"); 3807 return -EACCES; 3808 } 3809 return 0; 3810 } 3811 3812 /* check read/write into a map element with possible variable offset */ 3813 static int check_map_access(struct bpf_verifier_env *env, u32 regno, 3814 int off, int size, bool zero_size_allowed, 3815 enum bpf_access_src src) 3816 { 3817 struct bpf_verifier_state *vstate = env->cur_state; 3818 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 3819 struct bpf_reg_state *reg = &state->regs[regno]; 3820 struct bpf_map *map = reg->map_ptr; 3821 int err; 3822 3823 err = check_mem_region_access(env, regno, off, size, map->value_size, 3824 zero_size_allowed); 3825 if (err) 3826 return err; 3827 3828 if (map_value_has_spin_lock(map)) { 3829 u32 lock = map->spin_lock_off; 3830 3831 /* if any part of struct bpf_spin_lock can be touched by 3832 * load/store reject this program. 3833 * To check that [x1, x2) overlaps with [y1, y2) 3834 * it is sufficient to check x1 < y2 && y1 < x2. 3835 */ 3836 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) && 3837 lock < reg->umax_value + off + size) { 3838 verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n"); 3839 return -EACCES; 3840 } 3841 } 3842 if (map_value_has_timer(map)) { 3843 u32 t = map->timer_off; 3844 3845 if (reg->smin_value + off < t + sizeof(struct bpf_timer) && 3846 t < reg->umax_value + off + size) { 3847 verbose(env, "bpf_timer cannot be accessed directly by load/store\n"); 3848 return -EACCES; 3849 } 3850 } 3851 if (map_value_has_kptrs(map)) { 3852 struct bpf_map_value_off *tab = map->kptr_off_tab; 3853 int i; 3854 3855 for (i = 0; i < tab->nr_off; i++) { 3856 u32 p = tab->off[i].offset; 3857 3858 if (reg->smin_value + off < p + sizeof(u64) && 3859 p < reg->umax_value + off + size) { 3860 if (src != ACCESS_DIRECT) { 3861 verbose(env, "kptr cannot be accessed indirectly by helper\n"); 3862 return -EACCES; 3863 } 3864 if (!tnum_is_const(reg->var_off)) { 3865 verbose(env, "kptr access cannot have variable offset\n"); 3866 return -EACCES; 3867 } 3868 if (p != off + reg->var_off.value) { 3869 verbose(env, "kptr access misaligned expected=%u off=%llu\n", 3870 p, off + reg->var_off.value); 3871 return -EACCES; 3872 } 3873 if (size != bpf_size_to_bytes(BPF_DW)) { 3874 verbose(env, "kptr access size must be BPF_DW\n"); 3875 return -EACCES; 3876 } 3877 break; 3878 } 3879 } 3880 } 3881 return err; 3882 } 3883 3884 #define MAX_PACKET_OFF 0xffff 3885 3886 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, 3887 const struct bpf_call_arg_meta *meta, 3888 enum bpf_access_type t) 3889 { 3890 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 3891 3892 switch (prog_type) { 3893 /* Program types only with direct read access go here! */ 3894 case BPF_PROG_TYPE_LWT_IN: 3895 case BPF_PROG_TYPE_LWT_OUT: 3896 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 3897 case BPF_PROG_TYPE_SK_REUSEPORT: 3898 case BPF_PROG_TYPE_FLOW_DISSECTOR: 3899 case BPF_PROG_TYPE_CGROUP_SKB: 3900 if (t == BPF_WRITE) 3901 return false; 3902 fallthrough; 3903 3904 /* Program types with direct read + write access go here! */ 3905 case BPF_PROG_TYPE_SCHED_CLS: 3906 case BPF_PROG_TYPE_SCHED_ACT: 3907 case BPF_PROG_TYPE_XDP: 3908 case BPF_PROG_TYPE_LWT_XMIT: 3909 case BPF_PROG_TYPE_SK_SKB: 3910 case BPF_PROG_TYPE_SK_MSG: 3911 if (meta) 3912 return meta->pkt_access; 3913 3914 env->seen_direct_write = true; 3915 return true; 3916 3917 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3918 if (t == BPF_WRITE) 3919 env->seen_direct_write = true; 3920 3921 return true; 3922 3923 default: 3924 return false; 3925 } 3926 } 3927 3928 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, 3929 int size, bool zero_size_allowed) 3930 { 3931 struct bpf_reg_state *regs = cur_regs(env); 3932 struct bpf_reg_state *reg = ®s[regno]; 3933 int err; 3934 3935 /* We may have added a variable offset to the packet pointer; but any 3936 * reg->range we have comes after that. We are only checking the fixed 3937 * offset. 3938 */ 3939 3940 /* We don't allow negative numbers, because we aren't tracking enough 3941 * detail to prove they're safe. 3942 */ 3943 if (reg->smin_value < 0) { 3944 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 3945 regno); 3946 return -EACCES; 3947 } 3948 3949 err = reg->range < 0 ? -EINVAL : 3950 __check_mem_access(env, regno, off, size, reg->range, 3951 zero_size_allowed); 3952 if (err) { 3953 verbose(env, "R%d offset is outside of the packet\n", regno); 3954 return err; 3955 } 3956 3957 /* __check_mem_access has made sure "off + size - 1" is within u16. 3958 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, 3959 * otherwise find_good_pkt_pointers would have refused to set range info 3960 * that __check_mem_access would have rejected this pkt access. 3961 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. 3962 */ 3963 env->prog->aux->max_pkt_offset = 3964 max_t(u32, env->prog->aux->max_pkt_offset, 3965 off + reg->umax_value + size - 1); 3966 3967 return err; 3968 } 3969 3970 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ 3971 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, 3972 enum bpf_access_type t, enum bpf_reg_type *reg_type, 3973 struct btf **btf, u32 *btf_id) 3974 { 3975 struct bpf_insn_access_aux info = { 3976 .reg_type = *reg_type, 3977 .log = &env->log, 3978 }; 3979 3980 if (env->ops->is_valid_access && 3981 env->ops->is_valid_access(off, size, t, env->prog, &info)) { 3982 /* A non zero info.ctx_field_size indicates that this field is a 3983 * candidate for later verifier transformation to load the whole 3984 * field and then apply a mask when accessed with a narrower 3985 * access than actual ctx access size. A zero info.ctx_field_size 3986 * will only allow for whole field access and rejects any other 3987 * type of narrower access. 3988 */ 3989 *reg_type = info.reg_type; 3990 3991 if (base_type(*reg_type) == PTR_TO_BTF_ID) { 3992 *btf = info.btf; 3993 *btf_id = info.btf_id; 3994 } else { 3995 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; 3996 } 3997 /* remember the offset of last byte accessed in ctx */ 3998 if (env->prog->aux->max_ctx_offset < off + size) 3999 env->prog->aux->max_ctx_offset = off + size; 4000 return 0; 4001 } 4002 4003 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); 4004 return -EACCES; 4005 } 4006 4007 static int check_flow_keys_access(struct bpf_verifier_env *env, int off, 4008 int size) 4009 { 4010 if (size < 0 || off < 0 || 4011 (u64)off + size > sizeof(struct bpf_flow_keys)) { 4012 verbose(env, "invalid access to flow keys off=%d size=%d\n", 4013 off, size); 4014 return -EACCES; 4015 } 4016 return 0; 4017 } 4018 4019 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, 4020 u32 regno, int off, int size, 4021 enum bpf_access_type t) 4022 { 4023 struct bpf_reg_state *regs = cur_regs(env); 4024 struct bpf_reg_state *reg = ®s[regno]; 4025 struct bpf_insn_access_aux info = {}; 4026 bool valid; 4027 4028 if (reg->smin_value < 0) { 4029 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 4030 regno); 4031 return -EACCES; 4032 } 4033 4034 switch (reg->type) { 4035 case PTR_TO_SOCK_COMMON: 4036 valid = bpf_sock_common_is_valid_access(off, size, t, &info); 4037 break; 4038 case PTR_TO_SOCKET: 4039 valid = bpf_sock_is_valid_access(off, size, t, &info); 4040 break; 4041 case PTR_TO_TCP_SOCK: 4042 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info); 4043 break; 4044 case PTR_TO_XDP_SOCK: 4045 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info); 4046 break; 4047 default: 4048 valid = false; 4049 } 4050 4051 4052 if (valid) { 4053 env->insn_aux_data[insn_idx].ctx_field_size = 4054 info.ctx_field_size; 4055 return 0; 4056 } 4057 4058 verbose(env, "R%d invalid %s access off=%d size=%d\n", 4059 regno, reg_type_str(env, reg->type), off, size); 4060 4061 return -EACCES; 4062 } 4063 4064 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 4065 { 4066 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); 4067 } 4068 4069 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) 4070 { 4071 const struct bpf_reg_state *reg = reg_state(env, regno); 4072 4073 return reg->type == PTR_TO_CTX; 4074 } 4075 4076 static bool is_sk_reg(struct bpf_verifier_env *env, int regno) 4077 { 4078 const struct bpf_reg_state *reg = reg_state(env, regno); 4079 4080 return type_is_sk_pointer(reg->type); 4081 } 4082 4083 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) 4084 { 4085 const struct bpf_reg_state *reg = reg_state(env, regno); 4086 4087 return type_is_pkt_pointer(reg->type); 4088 } 4089 4090 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) 4091 { 4092 const struct bpf_reg_state *reg = reg_state(env, regno); 4093 4094 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ 4095 return reg->type == PTR_TO_FLOW_KEYS; 4096 } 4097 4098 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, 4099 const struct bpf_reg_state *reg, 4100 int off, int size, bool strict) 4101 { 4102 struct tnum reg_off; 4103 int ip_align; 4104 4105 /* Byte size accesses are always allowed. */ 4106 if (!strict || size == 1) 4107 return 0; 4108 4109 /* For platforms that do not have a Kconfig enabling 4110 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of 4111 * NET_IP_ALIGN is universally set to '2'. And on platforms 4112 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get 4113 * to this code only in strict mode where we want to emulate 4114 * the NET_IP_ALIGN==2 checking. Therefore use an 4115 * unconditional IP align value of '2'. 4116 */ 4117 ip_align = 2; 4118 4119 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); 4120 if (!tnum_is_aligned(reg_off, size)) { 4121 char tn_buf[48]; 4122 4123 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 4124 verbose(env, 4125 "misaligned packet access off %d+%s+%d+%d size %d\n", 4126 ip_align, tn_buf, reg->off, off, size); 4127 return -EACCES; 4128 } 4129 4130 return 0; 4131 } 4132 4133 static int check_generic_ptr_alignment(struct bpf_verifier_env *env, 4134 const struct bpf_reg_state *reg, 4135 const char *pointer_desc, 4136 int off, int size, bool strict) 4137 { 4138 struct tnum reg_off; 4139 4140 /* Byte size accesses are always allowed. */ 4141 if (!strict || size == 1) 4142 return 0; 4143 4144 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); 4145 if (!tnum_is_aligned(reg_off, size)) { 4146 char tn_buf[48]; 4147 4148 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 4149 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", 4150 pointer_desc, tn_buf, reg->off, off, size); 4151 return -EACCES; 4152 } 4153 4154 return 0; 4155 } 4156 4157 static int check_ptr_alignment(struct bpf_verifier_env *env, 4158 const struct bpf_reg_state *reg, int off, 4159 int size, bool strict_alignment_once) 4160 { 4161 bool strict = env->strict_alignment || strict_alignment_once; 4162 const char *pointer_desc = ""; 4163 4164 switch (reg->type) { 4165 case PTR_TO_PACKET: 4166 case PTR_TO_PACKET_META: 4167 /* Special case, because of NET_IP_ALIGN. Given metadata sits 4168 * right in front, treat it the very same way. 4169 */ 4170 return check_pkt_ptr_alignment(env, reg, off, size, strict); 4171 case PTR_TO_FLOW_KEYS: 4172 pointer_desc = "flow keys "; 4173 break; 4174 case PTR_TO_MAP_KEY: 4175 pointer_desc = "key "; 4176 break; 4177 case PTR_TO_MAP_VALUE: 4178 pointer_desc = "value "; 4179 break; 4180 case PTR_TO_CTX: 4181 pointer_desc = "context "; 4182 break; 4183 case PTR_TO_STACK: 4184 pointer_desc = "stack "; 4185 /* The stack spill tracking logic in check_stack_write_fixed_off() 4186 * and check_stack_read_fixed_off() relies on stack accesses being 4187 * aligned. 4188 */ 4189 strict = true; 4190 break; 4191 case PTR_TO_SOCKET: 4192 pointer_desc = "sock "; 4193 break; 4194 case PTR_TO_SOCK_COMMON: 4195 pointer_desc = "sock_common "; 4196 break; 4197 case PTR_TO_TCP_SOCK: 4198 pointer_desc = "tcp_sock "; 4199 break; 4200 case PTR_TO_XDP_SOCK: 4201 pointer_desc = "xdp_sock "; 4202 break; 4203 default: 4204 break; 4205 } 4206 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, 4207 strict); 4208 } 4209 4210 static int update_stack_depth(struct bpf_verifier_env *env, 4211 const struct bpf_func_state *func, 4212 int off) 4213 { 4214 u16 stack = env->subprog_info[func->subprogno].stack_depth; 4215 4216 if (stack >= -off) 4217 return 0; 4218 4219 /* update known max for given subprogram */ 4220 env->subprog_info[func->subprogno].stack_depth = -off; 4221 return 0; 4222 } 4223 4224 /* starting from main bpf function walk all instructions of the function 4225 * and recursively walk all callees that given function can call. 4226 * Ignore jump and exit insns. 4227 * Since recursion is prevented by check_cfg() this algorithm 4228 * only needs a local stack of MAX_CALL_FRAMES to remember callsites 4229 */ 4230 static int check_max_stack_depth(struct bpf_verifier_env *env) 4231 { 4232 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end; 4233 struct bpf_subprog_info *subprog = env->subprog_info; 4234 struct bpf_insn *insn = env->prog->insnsi; 4235 bool tail_call_reachable = false; 4236 int ret_insn[MAX_CALL_FRAMES]; 4237 int ret_prog[MAX_CALL_FRAMES]; 4238 int j; 4239 4240 process_func: 4241 /* protect against potential stack overflow that might happen when 4242 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack 4243 * depth for such case down to 256 so that the worst case scenario 4244 * would result in 8k stack size (32 which is tailcall limit * 256 = 4245 * 8k). 4246 * 4247 * To get the idea what might happen, see an example: 4248 * func1 -> sub rsp, 128 4249 * subfunc1 -> sub rsp, 256 4250 * tailcall1 -> add rsp, 256 4251 * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320) 4252 * subfunc2 -> sub rsp, 64 4253 * subfunc22 -> sub rsp, 128 4254 * tailcall2 -> add rsp, 128 4255 * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416) 4256 * 4257 * tailcall will unwind the current stack frame but it will not get rid 4258 * of caller's stack as shown on the example above. 4259 */ 4260 if (idx && subprog[idx].has_tail_call && depth >= 256) { 4261 verbose(env, 4262 "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n", 4263 depth); 4264 return -EACCES; 4265 } 4266 /* round up to 32-bytes, since this is granularity 4267 * of interpreter stack size 4268 */ 4269 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 4270 if (depth > MAX_BPF_STACK) { 4271 verbose(env, "combined stack size of %d calls is %d. Too large\n", 4272 frame + 1, depth); 4273 return -EACCES; 4274 } 4275 continue_func: 4276 subprog_end = subprog[idx + 1].start; 4277 for (; i < subprog_end; i++) { 4278 int next_insn; 4279 4280 if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i)) 4281 continue; 4282 /* remember insn and function to return to */ 4283 ret_insn[frame] = i + 1; 4284 ret_prog[frame] = idx; 4285 4286 /* find the callee */ 4287 next_insn = i + insn[i].imm + 1; 4288 idx = find_subprog(env, next_insn); 4289 if (idx < 0) { 4290 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 4291 next_insn); 4292 return -EFAULT; 4293 } 4294 if (subprog[idx].is_async_cb) { 4295 if (subprog[idx].has_tail_call) { 4296 verbose(env, "verifier bug. subprog has tail_call and async cb\n"); 4297 return -EFAULT; 4298 } 4299 /* async callbacks don't increase bpf prog stack size */ 4300 continue; 4301 } 4302 i = next_insn; 4303 4304 if (subprog[idx].has_tail_call) 4305 tail_call_reachable = true; 4306 4307 frame++; 4308 if (frame >= MAX_CALL_FRAMES) { 4309 verbose(env, "the call stack of %d frames is too deep !\n", 4310 frame); 4311 return -E2BIG; 4312 } 4313 goto process_func; 4314 } 4315 /* if tail call got detected across bpf2bpf calls then mark each of the 4316 * currently present subprog frames as tail call reachable subprogs; 4317 * this info will be utilized by JIT so that we will be preserving the 4318 * tail call counter throughout bpf2bpf calls combined with tailcalls 4319 */ 4320 if (tail_call_reachable) 4321 for (j = 0; j < frame; j++) 4322 subprog[ret_prog[j]].tail_call_reachable = true; 4323 if (subprog[0].tail_call_reachable) 4324 env->prog->aux->tail_call_reachable = true; 4325 4326 /* end of for() loop means the last insn of the 'subprog' 4327 * was reached. Doesn't matter whether it was JA or EXIT 4328 */ 4329 if (frame == 0) 4330 return 0; 4331 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 4332 frame--; 4333 i = ret_insn[frame]; 4334 idx = ret_prog[frame]; 4335 goto continue_func; 4336 } 4337 4338 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 4339 static int get_callee_stack_depth(struct bpf_verifier_env *env, 4340 const struct bpf_insn *insn, int idx) 4341 { 4342 int start = idx + insn->imm + 1, subprog; 4343 4344 subprog = find_subprog(env, start); 4345 if (subprog < 0) { 4346 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 4347 start); 4348 return -EFAULT; 4349 } 4350 return env->subprog_info[subprog].stack_depth; 4351 } 4352 #endif 4353 4354 static int __check_buffer_access(struct bpf_verifier_env *env, 4355 const char *buf_info, 4356 const struct bpf_reg_state *reg, 4357 int regno, int off, int size) 4358 { 4359 if (off < 0) { 4360 verbose(env, 4361 "R%d invalid %s buffer access: off=%d, size=%d\n", 4362 regno, buf_info, off, size); 4363 return -EACCES; 4364 } 4365 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 4366 char tn_buf[48]; 4367 4368 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 4369 verbose(env, 4370 "R%d invalid variable buffer offset: off=%d, var_off=%s\n", 4371 regno, off, tn_buf); 4372 return -EACCES; 4373 } 4374 4375 return 0; 4376 } 4377 4378 static int check_tp_buffer_access(struct bpf_verifier_env *env, 4379 const struct bpf_reg_state *reg, 4380 int regno, int off, int size) 4381 { 4382 int err; 4383 4384 err = __check_buffer_access(env, "tracepoint", reg, regno, off, size); 4385 if (err) 4386 return err; 4387 4388 if (off + size > env->prog->aux->max_tp_access) 4389 env->prog->aux->max_tp_access = off + size; 4390 4391 return 0; 4392 } 4393 4394 static int check_buffer_access(struct bpf_verifier_env *env, 4395 const struct bpf_reg_state *reg, 4396 int regno, int off, int size, 4397 bool zero_size_allowed, 4398 u32 *max_access) 4399 { 4400 const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr"; 4401 int err; 4402 4403 err = __check_buffer_access(env, buf_info, reg, regno, off, size); 4404 if (err) 4405 return err; 4406 4407 if (off + size > *max_access) 4408 *max_access = off + size; 4409 4410 return 0; 4411 } 4412 4413 /* BPF architecture zero extends alu32 ops into 64-bit registesr */ 4414 static void zext_32_to_64(struct bpf_reg_state *reg) 4415 { 4416 reg->var_off = tnum_subreg(reg->var_off); 4417 __reg_assign_32_into_64(reg); 4418 } 4419 4420 /* truncate register to smaller size (in bytes) 4421 * must be called with size < BPF_REG_SIZE 4422 */ 4423 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) 4424 { 4425 u64 mask; 4426 4427 /* clear high bits in bit representation */ 4428 reg->var_off = tnum_cast(reg->var_off, size); 4429 4430 /* fix arithmetic bounds */ 4431 mask = ((u64)1 << (size * 8)) - 1; 4432 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { 4433 reg->umin_value &= mask; 4434 reg->umax_value &= mask; 4435 } else { 4436 reg->umin_value = 0; 4437 reg->umax_value = mask; 4438 } 4439 reg->smin_value = reg->umin_value; 4440 reg->smax_value = reg->umax_value; 4441 4442 /* If size is smaller than 32bit register the 32bit register 4443 * values are also truncated so we push 64-bit bounds into 4444 * 32-bit bounds. Above were truncated < 32-bits already. 4445 */ 4446 if (size >= 4) 4447 return; 4448 __reg_combine_64_into_32(reg); 4449 } 4450 4451 static bool bpf_map_is_rdonly(const struct bpf_map *map) 4452 { 4453 /* A map is considered read-only if the following condition are true: 4454 * 4455 * 1) BPF program side cannot change any of the map content. The 4456 * BPF_F_RDONLY_PROG flag is throughout the lifetime of a map 4457 * and was set at map creation time. 4458 * 2) The map value(s) have been initialized from user space by a 4459 * loader and then "frozen", such that no new map update/delete 4460 * operations from syscall side are possible for the rest of 4461 * the map's lifetime from that point onwards. 4462 * 3) Any parallel/pending map update/delete operations from syscall 4463 * side have been completed. Only after that point, it's safe to 4464 * assume that map value(s) are immutable. 4465 */ 4466 return (map->map_flags & BPF_F_RDONLY_PROG) && 4467 READ_ONCE(map->frozen) && 4468 !bpf_map_write_active(map); 4469 } 4470 4471 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val) 4472 { 4473 void *ptr; 4474 u64 addr; 4475 int err; 4476 4477 err = map->ops->map_direct_value_addr(map, &addr, off); 4478 if (err) 4479 return err; 4480 ptr = (void *)(long)addr + off; 4481 4482 switch (size) { 4483 case sizeof(u8): 4484 *val = (u64)*(u8 *)ptr; 4485 break; 4486 case sizeof(u16): 4487 *val = (u64)*(u16 *)ptr; 4488 break; 4489 case sizeof(u32): 4490 *val = (u64)*(u32 *)ptr; 4491 break; 4492 case sizeof(u64): 4493 *val = *(u64 *)ptr; 4494 break; 4495 default: 4496 return -EINVAL; 4497 } 4498 return 0; 4499 } 4500 4501 static int check_ptr_to_btf_access(struct bpf_verifier_env *env, 4502 struct bpf_reg_state *regs, 4503 int regno, int off, int size, 4504 enum bpf_access_type atype, 4505 int value_regno) 4506 { 4507 struct bpf_reg_state *reg = regs + regno; 4508 const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id); 4509 const char *tname = btf_name_by_offset(reg->btf, t->name_off); 4510 enum bpf_type_flag flag = 0; 4511 u32 btf_id; 4512 int ret; 4513 4514 if (off < 0) { 4515 verbose(env, 4516 "R%d is ptr_%s invalid negative access: off=%d\n", 4517 regno, tname, off); 4518 return -EACCES; 4519 } 4520 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 4521 char tn_buf[48]; 4522 4523 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 4524 verbose(env, 4525 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n", 4526 regno, tname, off, tn_buf); 4527 return -EACCES; 4528 } 4529 4530 if (reg->type & MEM_USER) { 4531 verbose(env, 4532 "R%d is ptr_%s access user memory: off=%d\n", 4533 regno, tname, off); 4534 return -EACCES; 4535 } 4536 4537 if (reg->type & MEM_PERCPU) { 4538 verbose(env, 4539 "R%d is ptr_%s access percpu memory: off=%d\n", 4540 regno, tname, off); 4541 return -EACCES; 4542 } 4543 4544 if (env->ops->btf_struct_access) { 4545 ret = env->ops->btf_struct_access(&env->log, reg->btf, t, 4546 off, size, atype, &btf_id, &flag); 4547 } else { 4548 if (atype != BPF_READ) { 4549 verbose(env, "only read is supported\n"); 4550 return -EACCES; 4551 } 4552 4553 ret = btf_struct_access(&env->log, reg->btf, t, off, size, 4554 atype, &btf_id, &flag); 4555 } 4556 4557 if (ret < 0) 4558 return ret; 4559 4560 /* If this is an untrusted pointer, all pointers formed by walking it 4561 * also inherit the untrusted flag. 4562 */ 4563 if (type_flag(reg->type) & PTR_UNTRUSTED) 4564 flag |= PTR_UNTRUSTED; 4565 4566 if (atype == BPF_READ && value_regno >= 0) 4567 mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag); 4568 4569 return 0; 4570 } 4571 4572 static int check_ptr_to_map_access(struct bpf_verifier_env *env, 4573 struct bpf_reg_state *regs, 4574 int regno, int off, int size, 4575 enum bpf_access_type atype, 4576 int value_regno) 4577 { 4578 struct bpf_reg_state *reg = regs + regno; 4579 struct bpf_map *map = reg->map_ptr; 4580 enum bpf_type_flag flag = 0; 4581 const struct btf_type *t; 4582 const char *tname; 4583 u32 btf_id; 4584 int ret; 4585 4586 if (!btf_vmlinux) { 4587 verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n"); 4588 return -ENOTSUPP; 4589 } 4590 4591 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { 4592 verbose(env, "map_ptr access not supported for map type %d\n", 4593 map->map_type); 4594 return -ENOTSUPP; 4595 } 4596 4597 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); 4598 tname = btf_name_by_offset(btf_vmlinux, t->name_off); 4599 4600 if (!env->allow_ptr_to_map_access) { 4601 verbose(env, 4602 "%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n", 4603 tname); 4604 return -EPERM; 4605 } 4606 4607 if (off < 0) { 4608 verbose(env, "R%d is %s invalid negative access: off=%d\n", 4609 regno, tname, off); 4610 return -EACCES; 4611 } 4612 4613 if (atype != BPF_READ) { 4614 verbose(env, "only read from %s is supported\n", tname); 4615 return -EACCES; 4616 } 4617 4618 ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id, &flag); 4619 if (ret < 0) 4620 return ret; 4621 4622 if (value_regno >= 0) 4623 mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag); 4624 4625 return 0; 4626 } 4627 4628 /* Check that the stack access at the given offset is within bounds. The 4629 * maximum valid offset is -1. 4630 * 4631 * The minimum valid offset is -MAX_BPF_STACK for writes, and 4632 * -state->allocated_stack for reads. 4633 */ 4634 static int check_stack_slot_within_bounds(int off, 4635 struct bpf_func_state *state, 4636 enum bpf_access_type t) 4637 { 4638 int min_valid_off; 4639 4640 if (t == BPF_WRITE) 4641 min_valid_off = -MAX_BPF_STACK; 4642 else 4643 min_valid_off = -state->allocated_stack; 4644 4645 if (off < min_valid_off || off > -1) 4646 return -EACCES; 4647 return 0; 4648 } 4649 4650 /* Check that the stack access at 'regno + off' falls within the maximum stack 4651 * bounds. 4652 * 4653 * 'off' includes `regno->offset`, but not its dynamic part (if any). 4654 */ 4655 static int check_stack_access_within_bounds( 4656 struct bpf_verifier_env *env, 4657 int regno, int off, int access_size, 4658 enum bpf_access_src src, enum bpf_access_type type) 4659 { 4660 struct bpf_reg_state *regs = cur_regs(env); 4661 struct bpf_reg_state *reg = regs + regno; 4662 struct bpf_func_state *state = func(env, reg); 4663 int min_off, max_off; 4664 int err; 4665 char *err_extra; 4666 4667 if (src == ACCESS_HELPER) 4668 /* We don't know if helpers are reading or writing (or both). */ 4669 err_extra = " indirect access to"; 4670 else if (type == BPF_READ) 4671 err_extra = " read from"; 4672 else 4673 err_extra = " write to"; 4674 4675 if (tnum_is_const(reg->var_off)) { 4676 min_off = reg->var_off.value + off; 4677 if (access_size > 0) 4678 max_off = min_off + access_size - 1; 4679 else 4680 max_off = min_off; 4681 } else { 4682 if (reg->smax_value >= BPF_MAX_VAR_OFF || 4683 reg->smin_value <= -BPF_MAX_VAR_OFF) { 4684 verbose(env, "invalid unbounded variable-offset%s stack R%d\n", 4685 err_extra, regno); 4686 return -EACCES; 4687 } 4688 min_off = reg->smin_value + off; 4689 if (access_size > 0) 4690 max_off = reg->smax_value + off + access_size - 1; 4691 else 4692 max_off = min_off; 4693 } 4694 4695 err = check_stack_slot_within_bounds(min_off, state, type); 4696 if (!err) 4697 err = check_stack_slot_within_bounds(max_off, state, type); 4698 4699 if (err) { 4700 if (tnum_is_const(reg->var_off)) { 4701 verbose(env, "invalid%s stack R%d off=%d size=%d\n", 4702 err_extra, regno, off, access_size); 4703 } else { 4704 char tn_buf[48]; 4705 4706 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 4707 verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n", 4708 err_extra, regno, tn_buf, access_size); 4709 } 4710 } 4711 return err; 4712 } 4713 4714 /* check whether memory at (regno + off) is accessible for t = (read | write) 4715 * if t==write, value_regno is a register which value is stored into memory 4716 * if t==read, value_regno is a register which will receive the value from memory 4717 * if t==write && value_regno==-1, some unknown value is stored into memory 4718 * if t==read && value_regno==-1, don't care what we read from memory 4719 */ 4720 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, 4721 int off, int bpf_size, enum bpf_access_type t, 4722 int value_regno, bool strict_alignment_once) 4723 { 4724 struct bpf_reg_state *regs = cur_regs(env); 4725 struct bpf_reg_state *reg = regs + regno; 4726 struct bpf_func_state *state; 4727 int size, err = 0; 4728 4729 size = bpf_size_to_bytes(bpf_size); 4730 if (size < 0) 4731 return size; 4732 4733 /* alignment checks will add in reg->off themselves */ 4734 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); 4735 if (err) 4736 return err; 4737 4738 /* for access checks, reg->off is just part of off */ 4739 off += reg->off; 4740 4741 if (reg->type == PTR_TO_MAP_KEY) { 4742 if (t == BPF_WRITE) { 4743 verbose(env, "write to change key R%d not allowed\n", regno); 4744 return -EACCES; 4745 } 4746 4747 err = check_mem_region_access(env, regno, off, size, 4748 reg->map_ptr->key_size, false); 4749 if (err) 4750 return err; 4751 if (value_regno >= 0) 4752 mark_reg_unknown(env, regs, value_regno); 4753 } else if (reg->type == PTR_TO_MAP_VALUE) { 4754 struct bpf_map_value_off_desc *kptr_off_desc = NULL; 4755 4756 if (t == BPF_WRITE && value_regno >= 0 && 4757 is_pointer_value(env, value_regno)) { 4758 verbose(env, "R%d leaks addr into map\n", value_regno); 4759 return -EACCES; 4760 } 4761 err = check_map_access_type(env, regno, off, size, t); 4762 if (err) 4763 return err; 4764 err = check_map_access(env, regno, off, size, false, ACCESS_DIRECT); 4765 if (err) 4766 return err; 4767 if (tnum_is_const(reg->var_off)) 4768 kptr_off_desc = bpf_map_kptr_off_contains(reg->map_ptr, 4769 off + reg->var_off.value); 4770 if (kptr_off_desc) { 4771 err = check_map_kptr_access(env, regno, value_regno, insn_idx, 4772 kptr_off_desc); 4773 } else if (t == BPF_READ && value_regno >= 0) { 4774 struct bpf_map *map = reg->map_ptr; 4775 4776 /* if map is read-only, track its contents as scalars */ 4777 if (tnum_is_const(reg->var_off) && 4778 bpf_map_is_rdonly(map) && 4779 map->ops->map_direct_value_addr) { 4780 int map_off = off + reg->var_off.value; 4781 u64 val = 0; 4782 4783 err = bpf_map_direct_read(map, map_off, size, 4784 &val); 4785 if (err) 4786 return err; 4787 4788 regs[value_regno].type = SCALAR_VALUE; 4789 __mark_reg_known(®s[value_regno], val); 4790 } else { 4791 mark_reg_unknown(env, regs, value_regno); 4792 } 4793 } 4794 } else if (base_type(reg->type) == PTR_TO_MEM) { 4795 bool rdonly_mem = type_is_rdonly_mem(reg->type); 4796 4797 if (type_may_be_null(reg->type)) { 4798 verbose(env, "R%d invalid mem access '%s'\n", regno, 4799 reg_type_str(env, reg->type)); 4800 return -EACCES; 4801 } 4802 4803 if (t == BPF_WRITE && rdonly_mem) { 4804 verbose(env, "R%d cannot write into %s\n", 4805 regno, reg_type_str(env, reg->type)); 4806 return -EACCES; 4807 } 4808 4809 if (t == BPF_WRITE && value_regno >= 0 && 4810 is_pointer_value(env, value_regno)) { 4811 verbose(env, "R%d leaks addr into mem\n", value_regno); 4812 return -EACCES; 4813 } 4814 4815 err = check_mem_region_access(env, regno, off, size, 4816 reg->mem_size, false); 4817 if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem)) 4818 mark_reg_unknown(env, regs, value_regno); 4819 } else if (reg->type == PTR_TO_CTX) { 4820 enum bpf_reg_type reg_type = SCALAR_VALUE; 4821 struct btf *btf = NULL; 4822 u32 btf_id = 0; 4823 4824 if (t == BPF_WRITE && value_regno >= 0 && 4825 is_pointer_value(env, value_regno)) { 4826 verbose(env, "R%d leaks addr into ctx\n", value_regno); 4827 return -EACCES; 4828 } 4829 4830 err = check_ptr_off_reg(env, reg, regno); 4831 if (err < 0) 4832 return err; 4833 4834 err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf, 4835 &btf_id); 4836 if (err) 4837 verbose_linfo(env, insn_idx, "; "); 4838 if (!err && t == BPF_READ && value_regno >= 0) { 4839 /* ctx access returns either a scalar, or a 4840 * PTR_TO_PACKET[_META,_END]. In the latter 4841 * case, we know the offset is zero. 4842 */ 4843 if (reg_type == SCALAR_VALUE) { 4844 mark_reg_unknown(env, regs, value_regno); 4845 } else { 4846 mark_reg_known_zero(env, regs, 4847 value_regno); 4848 if (type_may_be_null(reg_type)) 4849 regs[value_regno].id = ++env->id_gen; 4850 /* A load of ctx field could have different 4851 * actual load size with the one encoded in the 4852 * insn. When the dst is PTR, it is for sure not 4853 * a sub-register. 4854 */ 4855 regs[value_regno].subreg_def = DEF_NOT_SUBREG; 4856 if (base_type(reg_type) == PTR_TO_BTF_ID) { 4857 regs[value_regno].btf = btf; 4858 regs[value_regno].btf_id = btf_id; 4859 } 4860 } 4861 regs[value_regno].type = reg_type; 4862 } 4863 4864 } else if (reg->type == PTR_TO_STACK) { 4865 /* Basic bounds checks. */ 4866 err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t); 4867 if (err) 4868 return err; 4869 4870 state = func(env, reg); 4871 err = update_stack_depth(env, state, off); 4872 if (err) 4873 return err; 4874 4875 if (t == BPF_READ) 4876 err = check_stack_read(env, regno, off, size, 4877 value_regno); 4878 else 4879 err = check_stack_write(env, regno, off, size, 4880 value_regno, insn_idx); 4881 } else if (reg_is_pkt_pointer(reg)) { 4882 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { 4883 verbose(env, "cannot write into packet\n"); 4884 return -EACCES; 4885 } 4886 if (t == BPF_WRITE && value_regno >= 0 && 4887 is_pointer_value(env, value_regno)) { 4888 verbose(env, "R%d leaks addr into packet\n", 4889 value_regno); 4890 return -EACCES; 4891 } 4892 err = check_packet_access(env, regno, off, size, false); 4893 if (!err && t == BPF_READ && value_regno >= 0) 4894 mark_reg_unknown(env, regs, value_regno); 4895 } else if (reg->type == PTR_TO_FLOW_KEYS) { 4896 if (t == BPF_WRITE && value_regno >= 0 && 4897 is_pointer_value(env, value_regno)) { 4898 verbose(env, "R%d leaks addr into flow keys\n", 4899 value_regno); 4900 return -EACCES; 4901 } 4902 4903 err = check_flow_keys_access(env, off, size); 4904 if (!err && t == BPF_READ && value_regno >= 0) 4905 mark_reg_unknown(env, regs, value_regno); 4906 } else if (type_is_sk_pointer(reg->type)) { 4907 if (t == BPF_WRITE) { 4908 verbose(env, "R%d cannot write into %s\n", 4909 regno, reg_type_str(env, reg->type)); 4910 return -EACCES; 4911 } 4912 err = check_sock_access(env, insn_idx, regno, off, size, t); 4913 if (!err && value_regno >= 0) 4914 mark_reg_unknown(env, regs, value_regno); 4915 } else if (reg->type == PTR_TO_TP_BUFFER) { 4916 err = check_tp_buffer_access(env, reg, regno, off, size); 4917 if (!err && t == BPF_READ && value_regno >= 0) 4918 mark_reg_unknown(env, regs, value_regno); 4919 } else if (base_type(reg->type) == PTR_TO_BTF_ID && 4920 !type_may_be_null(reg->type)) { 4921 err = check_ptr_to_btf_access(env, regs, regno, off, size, t, 4922 value_regno); 4923 } else if (reg->type == CONST_PTR_TO_MAP) { 4924 err = check_ptr_to_map_access(env, regs, regno, off, size, t, 4925 value_regno); 4926 } else if (base_type(reg->type) == PTR_TO_BUF) { 4927 bool rdonly_mem = type_is_rdonly_mem(reg->type); 4928 u32 *max_access; 4929 4930 if (rdonly_mem) { 4931 if (t == BPF_WRITE) { 4932 verbose(env, "R%d cannot write into %s\n", 4933 regno, reg_type_str(env, reg->type)); 4934 return -EACCES; 4935 } 4936 max_access = &env->prog->aux->max_rdonly_access; 4937 } else { 4938 max_access = &env->prog->aux->max_rdwr_access; 4939 } 4940 4941 err = check_buffer_access(env, reg, regno, off, size, false, 4942 max_access); 4943 4944 if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ)) 4945 mark_reg_unknown(env, regs, value_regno); 4946 } else { 4947 verbose(env, "R%d invalid mem access '%s'\n", regno, 4948 reg_type_str(env, reg->type)); 4949 return -EACCES; 4950 } 4951 4952 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 4953 regs[value_regno].type == SCALAR_VALUE) { 4954 /* b/h/w load zero-extends, mark upper bits as known 0 */ 4955 coerce_reg_to_size(®s[value_regno], size); 4956 } 4957 return err; 4958 } 4959 4960 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) 4961 { 4962 int load_reg; 4963 int err; 4964 4965 switch (insn->imm) { 4966 case BPF_ADD: 4967 case BPF_ADD | BPF_FETCH: 4968 case BPF_AND: 4969 case BPF_AND | BPF_FETCH: 4970 case BPF_OR: 4971 case BPF_OR | BPF_FETCH: 4972 case BPF_XOR: 4973 case BPF_XOR | BPF_FETCH: 4974 case BPF_XCHG: 4975 case BPF_CMPXCHG: 4976 break; 4977 default: 4978 verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm); 4979 return -EINVAL; 4980 } 4981 4982 if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) { 4983 verbose(env, "invalid atomic operand size\n"); 4984 return -EINVAL; 4985 } 4986 4987 /* check src1 operand */ 4988 err = check_reg_arg(env, insn->src_reg, SRC_OP); 4989 if (err) 4990 return err; 4991 4992 /* check src2 operand */ 4993 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 4994 if (err) 4995 return err; 4996 4997 if (insn->imm == BPF_CMPXCHG) { 4998 /* Check comparison of R0 with memory location */ 4999 const u32 aux_reg = BPF_REG_0; 5000 5001 err = check_reg_arg(env, aux_reg, SRC_OP); 5002 if (err) 5003 return err; 5004 5005 if (is_pointer_value(env, aux_reg)) { 5006 verbose(env, "R%d leaks addr into mem\n", aux_reg); 5007 return -EACCES; 5008 } 5009 } 5010 5011 if (is_pointer_value(env, insn->src_reg)) { 5012 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); 5013 return -EACCES; 5014 } 5015 5016 if (is_ctx_reg(env, insn->dst_reg) || 5017 is_pkt_reg(env, insn->dst_reg) || 5018 is_flow_key_reg(env, insn->dst_reg) || 5019 is_sk_reg(env, insn->dst_reg)) { 5020 verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n", 5021 insn->dst_reg, 5022 reg_type_str(env, reg_state(env, insn->dst_reg)->type)); 5023 return -EACCES; 5024 } 5025 5026 if (insn->imm & BPF_FETCH) { 5027 if (insn->imm == BPF_CMPXCHG) 5028 load_reg = BPF_REG_0; 5029 else 5030 load_reg = insn->src_reg; 5031 5032 /* check and record load of old value */ 5033 err = check_reg_arg(env, load_reg, DST_OP); 5034 if (err) 5035 return err; 5036 } else { 5037 /* This instruction accesses a memory location but doesn't 5038 * actually load it into a register. 5039 */ 5040 load_reg = -1; 5041 } 5042 5043 /* Check whether we can read the memory, with second call for fetch 5044 * case to simulate the register fill. 5045 */ 5046 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 5047 BPF_SIZE(insn->code), BPF_READ, -1, true); 5048 if (!err && load_reg >= 0) 5049 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 5050 BPF_SIZE(insn->code), BPF_READ, load_reg, 5051 true); 5052 if (err) 5053 return err; 5054 5055 /* Check whether we can write into the same memory. */ 5056 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 5057 BPF_SIZE(insn->code), BPF_WRITE, -1, true); 5058 if (err) 5059 return err; 5060 5061 return 0; 5062 } 5063 5064 /* When register 'regno' is used to read the stack (either directly or through 5065 * a helper function) make sure that it's within stack boundary and, depending 5066 * on the access type, that all elements of the stack are initialized. 5067 * 5068 * 'off' includes 'regno->off', but not its dynamic part (if any). 5069 * 5070 * All registers that have been spilled on the stack in the slots within the 5071 * read offsets are marked as read. 5072 */ 5073 static int check_stack_range_initialized( 5074 struct bpf_verifier_env *env, int regno, int off, 5075 int access_size, bool zero_size_allowed, 5076 enum bpf_access_src type, struct bpf_call_arg_meta *meta) 5077 { 5078 struct bpf_reg_state *reg = reg_state(env, regno); 5079 struct bpf_func_state *state = func(env, reg); 5080 int err, min_off, max_off, i, j, slot, spi; 5081 char *err_extra = type == ACCESS_HELPER ? " indirect" : ""; 5082 enum bpf_access_type bounds_check_type; 5083 /* Some accesses can write anything into the stack, others are 5084 * read-only. 5085 */ 5086 bool clobber = false; 5087 5088 if (access_size == 0 && !zero_size_allowed) { 5089 verbose(env, "invalid zero-sized read\n"); 5090 return -EACCES; 5091 } 5092 5093 if (type == ACCESS_HELPER) { 5094 /* The bounds checks for writes are more permissive than for 5095 * reads. However, if raw_mode is not set, we'll do extra 5096 * checks below. 5097 */ 5098 bounds_check_type = BPF_WRITE; 5099 clobber = true; 5100 } else { 5101 bounds_check_type = BPF_READ; 5102 } 5103 err = check_stack_access_within_bounds(env, regno, off, access_size, 5104 type, bounds_check_type); 5105 if (err) 5106 return err; 5107 5108 5109 if (tnum_is_const(reg->var_off)) { 5110 min_off = max_off = reg->var_off.value + off; 5111 } else { 5112 /* Variable offset is prohibited for unprivileged mode for 5113 * simplicity since it requires corresponding support in 5114 * Spectre masking for stack ALU. 5115 * See also retrieve_ptr_limit(). 5116 */ 5117 if (!env->bypass_spec_v1) { 5118 char tn_buf[48]; 5119 5120 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 5121 verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n", 5122 regno, err_extra, tn_buf); 5123 return -EACCES; 5124 } 5125 /* Only initialized buffer on stack is allowed to be accessed 5126 * with variable offset. With uninitialized buffer it's hard to 5127 * guarantee that whole memory is marked as initialized on 5128 * helper return since specific bounds are unknown what may 5129 * cause uninitialized stack leaking. 5130 */ 5131 if (meta && meta->raw_mode) 5132 meta = NULL; 5133 5134 min_off = reg->smin_value + off; 5135 max_off = reg->smax_value + off; 5136 } 5137 5138 if (meta && meta->raw_mode) { 5139 meta->access_size = access_size; 5140 meta->regno = regno; 5141 return 0; 5142 } 5143 5144 for (i = min_off; i < max_off + access_size; i++) { 5145 u8 *stype; 5146 5147 slot = -i - 1; 5148 spi = slot / BPF_REG_SIZE; 5149 if (state->allocated_stack <= slot) 5150 goto err; 5151 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 5152 if (*stype == STACK_MISC) 5153 goto mark; 5154 if (*stype == STACK_ZERO) { 5155 if (clobber) { 5156 /* helper can write anything into the stack */ 5157 *stype = STACK_MISC; 5158 } 5159 goto mark; 5160 } 5161 5162 if (is_spilled_reg(&state->stack[spi]) && 5163 base_type(state->stack[spi].spilled_ptr.type) == PTR_TO_BTF_ID) 5164 goto mark; 5165 5166 if (is_spilled_reg(&state->stack[spi]) && 5167 (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || 5168 env->allow_ptr_leaks)) { 5169 if (clobber) { 5170 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); 5171 for (j = 0; j < BPF_REG_SIZE; j++) 5172 scrub_spilled_slot(&state->stack[spi].slot_type[j]); 5173 } 5174 goto mark; 5175 } 5176 5177 err: 5178 if (tnum_is_const(reg->var_off)) { 5179 verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n", 5180 err_extra, regno, min_off, i - min_off, access_size); 5181 } else { 5182 char tn_buf[48]; 5183 5184 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 5185 verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n", 5186 err_extra, regno, tn_buf, i - min_off, access_size); 5187 } 5188 return -EACCES; 5189 mark: 5190 /* reading any byte out of 8-byte 'spill_slot' will cause 5191 * the whole slot to be marked as 'read' 5192 */ 5193 mark_reg_read(env, &state->stack[spi].spilled_ptr, 5194 state->stack[spi].spilled_ptr.parent, 5195 REG_LIVE_READ64); 5196 } 5197 return update_stack_depth(env, state, min_off); 5198 } 5199 5200 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, 5201 int access_size, bool zero_size_allowed, 5202 struct bpf_call_arg_meta *meta) 5203 { 5204 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 5205 u32 *max_access; 5206 5207 switch (base_type(reg->type)) { 5208 case PTR_TO_PACKET: 5209 case PTR_TO_PACKET_META: 5210 return check_packet_access(env, regno, reg->off, access_size, 5211 zero_size_allowed); 5212 case PTR_TO_MAP_KEY: 5213 if (meta && meta->raw_mode) { 5214 verbose(env, "R%d cannot write into %s\n", regno, 5215 reg_type_str(env, reg->type)); 5216 return -EACCES; 5217 } 5218 return check_mem_region_access(env, regno, reg->off, access_size, 5219 reg->map_ptr->key_size, false); 5220 case PTR_TO_MAP_VALUE: 5221 if (check_map_access_type(env, regno, reg->off, access_size, 5222 meta && meta->raw_mode ? BPF_WRITE : 5223 BPF_READ)) 5224 return -EACCES; 5225 return check_map_access(env, regno, reg->off, access_size, 5226 zero_size_allowed, ACCESS_HELPER); 5227 case PTR_TO_MEM: 5228 if (type_is_rdonly_mem(reg->type)) { 5229 if (meta && meta->raw_mode) { 5230 verbose(env, "R%d cannot write into %s\n", regno, 5231 reg_type_str(env, reg->type)); 5232 return -EACCES; 5233 } 5234 } 5235 return check_mem_region_access(env, regno, reg->off, 5236 access_size, reg->mem_size, 5237 zero_size_allowed); 5238 case PTR_TO_BUF: 5239 if (type_is_rdonly_mem(reg->type)) { 5240 if (meta && meta->raw_mode) { 5241 verbose(env, "R%d cannot write into %s\n", regno, 5242 reg_type_str(env, reg->type)); 5243 return -EACCES; 5244 } 5245 5246 max_access = &env->prog->aux->max_rdonly_access; 5247 } else { 5248 max_access = &env->prog->aux->max_rdwr_access; 5249 } 5250 return check_buffer_access(env, reg, regno, reg->off, 5251 access_size, zero_size_allowed, 5252 max_access); 5253 case PTR_TO_STACK: 5254 return check_stack_range_initialized( 5255 env, 5256 regno, reg->off, access_size, 5257 zero_size_allowed, ACCESS_HELPER, meta); 5258 case PTR_TO_CTX: 5259 /* in case the function doesn't know how to access the context, 5260 * (because we are in a program of type SYSCALL for example), we 5261 * can not statically check its size. 5262 * Dynamically check it now. 5263 */ 5264 if (!env->ops->convert_ctx_access) { 5265 enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ; 5266 int offset = access_size - 1; 5267 5268 /* Allow zero-byte read from PTR_TO_CTX */ 5269 if (access_size == 0) 5270 return zero_size_allowed ? 0 : -EACCES; 5271 5272 return check_mem_access(env, env->insn_idx, regno, offset, BPF_B, 5273 atype, -1, false); 5274 } 5275 5276 fallthrough; 5277 default: /* scalar_value or invalid ptr */ 5278 /* Allow zero-byte read from NULL, regardless of pointer type */ 5279 if (zero_size_allowed && access_size == 0 && 5280 register_is_null(reg)) 5281 return 0; 5282 5283 verbose(env, "R%d type=%s ", regno, 5284 reg_type_str(env, reg->type)); 5285 verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK)); 5286 return -EACCES; 5287 } 5288 } 5289 5290 static int check_mem_size_reg(struct bpf_verifier_env *env, 5291 struct bpf_reg_state *reg, u32 regno, 5292 bool zero_size_allowed, 5293 struct bpf_call_arg_meta *meta) 5294 { 5295 int err; 5296 5297 /* This is used to refine r0 return value bounds for helpers 5298 * that enforce this value as an upper bound on return values. 5299 * See do_refine_retval_range() for helpers that can refine 5300 * the return value. C type of helper is u32 so we pull register 5301 * bound from umax_value however, if negative verifier errors 5302 * out. Only upper bounds can be learned because retval is an 5303 * int type and negative retvals are allowed. 5304 */ 5305 meta->msize_max_value = reg->umax_value; 5306 5307 /* The register is SCALAR_VALUE; the access check 5308 * happens using its boundaries. 5309 */ 5310 if (!tnum_is_const(reg->var_off)) 5311 /* For unprivileged variable accesses, disable raw 5312 * mode so that the program is required to 5313 * initialize all the memory that the helper could 5314 * just partially fill up. 5315 */ 5316 meta = NULL; 5317 5318 if (reg->smin_value < 0) { 5319 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", 5320 regno); 5321 return -EACCES; 5322 } 5323 5324 if (reg->umin_value == 0) { 5325 err = check_helper_mem_access(env, regno - 1, 0, 5326 zero_size_allowed, 5327 meta); 5328 if (err) 5329 return err; 5330 } 5331 5332 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { 5333 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", 5334 regno); 5335 return -EACCES; 5336 } 5337 err = check_helper_mem_access(env, regno - 1, 5338 reg->umax_value, 5339 zero_size_allowed, meta); 5340 if (!err) 5341 err = mark_chain_precision(env, regno); 5342 return err; 5343 } 5344 5345 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 5346 u32 regno, u32 mem_size) 5347 { 5348 bool may_be_null = type_may_be_null(reg->type); 5349 struct bpf_reg_state saved_reg; 5350 struct bpf_call_arg_meta meta; 5351 int err; 5352 5353 if (register_is_null(reg)) 5354 return 0; 5355 5356 memset(&meta, 0, sizeof(meta)); 5357 /* Assuming that the register contains a value check if the memory 5358 * access is safe. Temporarily save and restore the register's state as 5359 * the conversion shouldn't be visible to a caller. 5360 */ 5361 if (may_be_null) { 5362 saved_reg = *reg; 5363 mark_ptr_not_null_reg(reg); 5364 } 5365 5366 err = check_helper_mem_access(env, regno, mem_size, true, &meta); 5367 /* Check access for BPF_WRITE */ 5368 meta.raw_mode = true; 5369 err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta); 5370 5371 if (may_be_null) 5372 *reg = saved_reg; 5373 5374 return err; 5375 } 5376 5377 int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 5378 u32 regno) 5379 { 5380 struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1]; 5381 bool may_be_null = type_may_be_null(mem_reg->type); 5382 struct bpf_reg_state saved_reg; 5383 struct bpf_call_arg_meta meta; 5384 int err; 5385 5386 WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5); 5387 5388 memset(&meta, 0, sizeof(meta)); 5389 5390 if (may_be_null) { 5391 saved_reg = *mem_reg; 5392 mark_ptr_not_null_reg(mem_reg); 5393 } 5394 5395 err = check_mem_size_reg(env, reg, regno, true, &meta); 5396 /* Check access for BPF_WRITE */ 5397 meta.raw_mode = true; 5398 err = err ?: check_mem_size_reg(env, reg, regno, true, &meta); 5399 5400 if (may_be_null) 5401 *mem_reg = saved_reg; 5402 return err; 5403 } 5404 5405 /* Implementation details: 5406 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL 5407 * Two bpf_map_lookups (even with the same key) will have different reg->id. 5408 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after 5409 * value_or_null->value transition, since the verifier only cares about 5410 * the range of access to valid map value pointer and doesn't care about actual 5411 * address of the map element. 5412 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps 5413 * reg->id > 0 after value_or_null->value transition. By doing so 5414 * two bpf_map_lookups will be considered two different pointers that 5415 * point to different bpf_spin_locks. 5416 * The verifier allows taking only one bpf_spin_lock at a time to avoid 5417 * dead-locks. 5418 * Since only one bpf_spin_lock is allowed the checks are simpler than 5419 * reg_is_refcounted() logic. The verifier needs to remember only 5420 * one spin_lock instead of array of acquired_refs. 5421 * cur_state->active_spin_lock remembers which map value element got locked 5422 * and clears it after bpf_spin_unlock. 5423 */ 5424 static int process_spin_lock(struct bpf_verifier_env *env, int regno, 5425 bool is_lock) 5426 { 5427 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 5428 struct bpf_verifier_state *cur = env->cur_state; 5429 bool is_const = tnum_is_const(reg->var_off); 5430 struct bpf_map *map = reg->map_ptr; 5431 u64 val = reg->var_off.value; 5432 5433 if (!is_const) { 5434 verbose(env, 5435 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n", 5436 regno); 5437 return -EINVAL; 5438 } 5439 if (!map->btf) { 5440 verbose(env, 5441 "map '%s' has to have BTF in order to use bpf_spin_lock\n", 5442 map->name); 5443 return -EINVAL; 5444 } 5445 if (!map_value_has_spin_lock(map)) { 5446 if (map->spin_lock_off == -E2BIG) 5447 verbose(env, 5448 "map '%s' has more than one 'struct bpf_spin_lock'\n", 5449 map->name); 5450 else if (map->spin_lock_off == -ENOENT) 5451 verbose(env, 5452 "map '%s' doesn't have 'struct bpf_spin_lock'\n", 5453 map->name); 5454 else 5455 verbose(env, 5456 "map '%s' is not a struct type or bpf_spin_lock is mangled\n", 5457 map->name); 5458 return -EINVAL; 5459 } 5460 if (map->spin_lock_off != val + reg->off) { 5461 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n", 5462 val + reg->off); 5463 return -EINVAL; 5464 } 5465 if (is_lock) { 5466 if (cur->active_spin_lock) { 5467 verbose(env, 5468 "Locking two bpf_spin_locks are not allowed\n"); 5469 return -EINVAL; 5470 } 5471 cur->active_spin_lock = reg->id; 5472 } else { 5473 if (!cur->active_spin_lock) { 5474 verbose(env, "bpf_spin_unlock without taking a lock\n"); 5475 return -EINVAL; 5476 } 5477 if (cur->active_spin_lock != reg->id) { 5478 verbose(env, "bpf_spin_unlock of different lock\n"); 5479 return -EINVAL; 5480 } 5481 cur->active_spin_lock = 0; 5482 } 5483 return 0; 5484 } 5485 5486 static int process_timer_func(struct bpf_verifier_env *env, int regno, 5487 struct bpf_call_arg_meta *meta) 5488 { 5489 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 5490 bool is_const = tnum_is_const(reg->var_off); 5491 struct bpf_map *map = reg->map_ptr; 5492 u64 val = reg->var_off.value; 5493 5494 if (!is_const) { 5495 verbose(env, 5496 "R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n", 5497 regno); 5498 return -EINVAL; 5499 } 5500 if (!map->btf) { 5501 verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n", 5502 map->name); 5503 return -EINVAL; 5504 } 5505 if (!map_value_has_timer(map)) { 5506 if (map->timer_off == -E2BIG) 5507 verbose(env, 5508 "map '%s' has more than one 'struct bpf_timer'\n", 5509 map->name); 5510 else if (map->timer_off == -ENOENT) 5511 verbose(env, 5512 "map '%s' doesn't have 'struct bpf_timer'\n", 5513 map->name); 5514 else 5515 verbose(env, 5516 "map '%s' is not a struct type or bpf_timer is mangled\n", 5517 map->name); 5518 return -EINVAL; 5519 } 5520 if (map->timer_off != val + reg->off) { 5521 verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n", 5522 val + reg->off, map->timer_off); 5523 return -EINVAL; 5524 } 5525 if (meta->map_ptr) { 5526 verbose(env, "verifier bug. Two map pointers in a timer helper\n"); 5527 return -EFAULT; 5528 } 5529 meta->map_uid = reg->map_uid; 5530 meta->map_ptr = map; 5531 return 0; 5532 } 5533 5534 static int process_kptr_func(struct bpf_verifier_env *env, int regno, 5535 struct bpf_call_arg_meta *meta) 5536 { 5537 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 5538 struct bpf_map_value_off_desc *off_desc; 5539 struct bpf_map *map_ptr = reg->map_ptr; 5540 u32 kptr_off; 5541 int ret; 5542 5543 if (!tnum_is_const(reg->var_off)) { 5544 verbose(env, 5545 "R%d doesn't have constant offset. kptr has to be at the constant offset\n", 5546 regno); 5547 return -EINVAL; 5548 } 5549 if (!map_ptr->btf) { 5550 verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n", 5551 map_ptr->name); 5552 return -EINVAL; 5553 } 5554 if (!map_value_has_kptrs(map_ptr)) { 5555 ret = PTR_ERR_OR_ZERO(map_ptr->kptr_off_tab); 5556 if (ret == -E2BIG) 5557 verbose(env, "map '%s' has more than %d kptr\n", map_ptr->name, 5558 BPF_MAP_VALUE_OFF_MAX); 5559 else if (ret == -EEXIST) 5560 verbose(env, "map '%s' has repeating kptr BTF tags\n", map_ptr->name); 5561 else 5562 verbose(env, "map '%s' has no valid kptr\n", map_ptr->name); 5563 return -EINVAL; 5564 } 5565 5566 meta->map_ptr = map_ptr; 5567 kptr_off = reg->off + reg->var_off.value; 5568 off_desc = bpf_map_kptr_off_contains(map_ptr, kptr_off); 5569 if (!off_desc) { 5570 verbose(env, "off=%d doesn't point to kptr\n", kptr_off); 5571 return -EACCES; 5572 } 5573 if (off_desc->type != BPF_KPTR_REF) { 5574 verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off); 5575 return -EACCES; 5576 } 5577 meta->kptr_off_desc = off_desc; 5578 return 0; 5579 } 5580 5581 static bool arg_type_is_mem_size(enum bpf_arg_type type) 5582 { 5583 return type == ARG_CONST_SIZE || 5584 type == ARG_CONST_SIZE_OR_ZERO; 5585 } 5586 5587 static bool arg_type_is_release(enum bpf_arg_type type) 5588 { 5589 return type & OBJ_RELEASE; 5590 } 5591 5592 static bool arg_type_is_dynptr(enum bpf_arg_type type) 5593 { 5594 return base_type(type) == ARG_PTR_TO_DYNPTR; 5595 } 5596 5597 static int int_ptr_type_to_size(enum bpf_arg_type type) 5598 { 5599 if (type == ARG_PTR_TO_INT) 5600 return sizeof(u32); 5601 else if (type == ARG_PTR_TO_LONG) 5602 return sizeof(u64); 5603 5604 return -EINVAL; 5605 } 5606 5607 static int resolve_map_arg_type(struct bpf_verifier_env *env, 5608 const struct bpf_call_arg_meta *meta, 5609 enum bpf_arg_type *arg_type) 5610 { 5611 if (!meta->map_ptr) { 5612 /* kernel subsystem misconfigured verifier */ 5613 verbose(env, "invalid map_ptr to access map->type\n"); 5614 return -EACCES; 5615 } 5616 5617 switch (meta->map_ptr->map_type) { 5618 case BPF_MAP_TYPE_SOCKMAP: 5619 case BPF_MAP_TYPE_SOCKHASH: 5620 if (*arg_type == ARG_PTR_TO_MAP_VALUE) { 5621 *arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON; 5622 } else { 5623 verbose(env, "invalid arg_type for sockmap/sockhash\n"); 5624 return -EINVAL; 5625 } 5626 break; 5627 case BPF_MAP_TYPE_BLOOM_FILTER: 5628 if (meta->func_id == BPF_FUNC_map_peek_elem) 5629 *arg_type = ARG_PTR_TO_MAP_VALUE; 5630 break; 5631 default: 5632 break; 5633 } 5634 return 0; 5635 } 5636 5637 struct bpf_reg_types { 5638 const enum bpf_reg_type types[10]; 5639 u32 *btf_id; 5640 }; 5641 5642 static const struct bpf_reg_types map_key_value_types = { 5643 .types = { 5644 PTR_TO_STACK, 5645 PTR_TO_PACKET, 5646 PTR_TO_PACKET_META, 5647 PTR_TO_MAP_KEY, 5648 PTR_TO_MAP_VALUE, 5649 }, 5650 }; 5651 5652 static const struct bpf_reg_types sock_types = { 5653 .types = { 5654 PTR_TO_SOCK_COMMON, 5655 PTR_TO_SOCKET, 5656 PTR_TO_TCP_SOCK, 5657 PTR_TO_XDP_SOCK, 5658 }, 5659 }; 5660 5661 #ifdef CONFIG_NET 5662 static const struct bpf_reg_types btf_id_sock_common_types = { 5663 .types = { 5664 PTR_TO_SOCK_COMMON, 5665 PTR_TO_SOCKET, 5666 PTR_TO_TCP_SOCK, 5667 PTR_TO_XDP_SOCK, 5668 PTR_TO_BTF_ID, 5669 }, 5670 .btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], 5671 }; 5672 #endif 5673 5674 static const struct bpf_reg_types mem_types = { 5675 .types = { 5676 PTR_TO_STACK, 5677 PTR_TO_PACKET, 5678 PTR_TO_PACKET_META, 5679 PTR_TO_MAP_KEY, 5680 PTR_TO_MAP_VALUE, 5681 PTR_TO_MEM, 5682 PTR_TO_MEM | MEM_ALLOC, 5683 PTR_TO_BUF, 5684 }, 5685 }; 5686 5687 static const struct bpf_reg_types int_ptr_types = { 5688 .types = { 5689 PTR_TO_STACK, 5690 PTR_TO_PACKET, 5691 PTR_TO_PACKET_META, 5692 PTR_TO_MAP_KEY, 5693 PTR_TO_MAP_VALUE, 5694 }, 5695 }; 5696 5697 static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } }; 5698 static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } }; 5699 static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } }; 5700 static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM | MEM_ALLOC } }; 5701 static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } }; 5702 static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } }; 5703 static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } }; 5704 static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_BTF_ID | MEM_PERCPU } }; 5705 static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } }; 5706 static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } }; 5707 static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } }; 5708 static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } }; 5709 static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } }; 5710 static const struct bpf_reg_types dynptr_types = { 5711 .types = { 5712 PTR_TO_STACK, 5713 PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL, 5714 } 5715 }; 5716 5717 static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { 5718 [ARG_PTR_TO_MAP_KEY] = &map_key_value_types, 5719 [ARG_PTR_TO_MAP_VALUE] = &map_key_value_types, 5720 [ARG_CONST_SIZE] = &scalar_types, 5721 [ARG_CONST_SIZE_OR_ZERO] = &scalar_types, 5722 [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types, 5723 [ARG_CONST_MAP_PTR] = &const_map_ptr_types, 5724 [ARG_PTR_TO_CTX] = &context_types, 5725 [ARG_PTR_TO_SOCK_COMMON] = &sock_types, 5726 #ifdef CONFIG_NET 5727 [ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types, 5728 #endif 5729 [ARG_PTR_TO_SOCKET] = &fullsock_types, 5730 [ARG_PTR_TO_BTF_ID] = &btf_ptr_types, 5731 [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types, 5732 [ARG_PTR_TO_MEM] = &mem_types, 5733 [ARG_PTR_TO_ALLOC_MEM] = &alloc_mem_types, 5734 [ARG_PTR_TO_INT] = &int_ptr_types, 5735 [ARG_PTR_TO_LONG] = &int_ptr_types, 5736 [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types, 5737 [ARG_PTR_TO_FUNC] = &func_ptr_types, 5738 [ARG_PTR_TO_STACK] = &stack_ptr_types, 5739 [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types, 5740 [ARG_PTR_TO_TIMER] = &timer_types, 5741 [ARG_PTR_TO_KPTR] = &kptr_types, 5742 [ARG_PTR_TO_DYNPTR] = &dynptr_types, 5743 }; 5744 5745 static int check_reg_type(struct bpf_verifier_env *env, u32 regno, 5746 enum bpf_arg_type arg_type, 5747 const u32 *arg_btf_id, 5748 struct bpf_call_arg_meta *meta) 5749 { 5750 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 5751 enum bpf_reg_type expected, type = reg->type; 5752 const struct bpf_reg_types *compatible; 5753 int i, j; 5754 5755 compatible = compatible_reg_types[base_type(arg_type)]; 5756 if (!compatible) { 5757 verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type); 5758 return -EFAULT; 5759 } 5760 5761 /* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY, 5762 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY 5763 * 5764 * Same for MAYBE_NULL: 5765 * 5766 * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL, 5767 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL 5768 * 5769 * Therefore we fold these flags depending on the arg_type before comparison. 5770 */ 5771 if (arg_type & MEM_RDONLY) 5772 type &= ~MEM_RDONLY; 5773 if (arg_type & PTR_MAYBE_NULL) 5774 type &= ~PTR_MAYBE_NULL; 5775 5776 for (i = 0; i < ARRAY_SIZE(compatible->types); i++) { 5777 expected = compatible->types[i]; 5778 if (expected == NOT_INIT) 5779 break; 5780 5781 if (type == expected) 5782 goto found; 5783 } 5784 5785 verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type)); 5786 for (j = 0; j + 1 < i; j++) 5787 verbose(env, "%s, ", reg_type_str(env, compatible->types[j])); 5788 verbose(env, "%s\n", reg_type_str(env, compatible->types[j])); 5789 return -EACCES; 5790 5791 found: 5792 if (reg->type == PTR_TO_BTF_ID) { 5793 /* For bpf_sk_release, it needs to match against first member 5794 * 'struct sock_common', hence make an exception for it. This 5795 * allows bpf_sk_release to work for multiple socket types. 5796 */ 5797 bool strict_type_match = arg_type_is_release(arg_type) && 5798 meta->func_id != BPF_FUNC_sk_release; 5799 5800 if (!arg_btf_id) { 5801 if (!compatible->btf_id) { 5802 verbose(env, "verifier internal error: missing arg compatible BTF ID\n"); 5803 return -EFAULT; 5804 } 5805 arg_btf_id = compatible->btf_id; 5806 } 5807 5808 if (meta->func_id == BPF_FUNC_kptr_xchg) { 5809 if (map_kptr_match_type(env, meta->kptr_off_desc, reg, regno)) 5810 return -EACCES; 5811 } else { 5812 if (arg_btf_id == BPF_PTR_POISON) { 5813 verbose(env, "verifier internal error:"); 5814 verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n", 5815 regno); 5816 return -EACCES; 5817 } 5818 5819 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, 5820 btf_vmlinux, *arg_btf_id, 5821 strict_type_match)) { 5822 verbose(env, "R%d is of type %s but %s is expected\n", 5823 regno, kernel_type_name(reg->btf, reg->btf_id), 5824 kernel_type_name(btf_vmlinux, *arg_btf_id)); 5825 return -EACCES; 5826 } 5827 } 5828 } 5829 5830 return 0; 5831 } 5832 5833 int check_func_arg_reg_off(struct bpf_verifier_env *env, 5834 const struct bpf_reg_state *reg, int regno, 5835 enum bpf_arg_type arg_type) 5836 { 5837 enum bpf_reg_type type = reg->type; 5838 bool fixed_off_ok = false; 5839 5840 switch ((u32)type) { 5841 /* Pointer types where reg offset is explicitly allowed: */ 5842 case PTR_TO_STACK: 5843 if (arg_type_is_dynptr(arg_type) && reg->off % BPF_REG_SIZE) { 5844 verbose(env, "cannot pass in dynptr at an offset\n"); 5845 return -EINVAL; 5846 } 5847 fallthrough; 5848 case PTR_TO_PACKET: 5849 case PTR_TO_PACKET_META: 5850 case PTR_TO_MAP_KEY: 5851 case PTR_TO_MAP_VALUE: 5852 case PTR_TO_MEM: 5853 case PTR_TO_MEM | MEM_RDONLY: 5854 case PTR_TO_MEM | MEM_ALLOC: 5855 case PTR_TO_BUF: 5856 case PTR_TO_BUF | MEM_RDONLY: 5857 case SCALAR_VALUE: 5858 /* Some of the argument types nevertheless require a 5859 * zero register offset. 5860 */ 5861 if (base_type(arg_type) != ARG_PTR_TO_ALLOC_MEM) 5862 return 0; 5863 break; 5864 /* All the rest must be rejected, except PTR_TO_BTF_ID which allows 5865 * fixed offset. 5866 */ 5867 case PTR_TO_BTF_ID: 5868 /* When referenced PTR_TO_BTF_ID is passed to release function, 5869 * it's fixed offset must be 0. In the other cases, fixed offset 5870 * can be non-zero. 5871 */ 5872 if (arg_type_is_release(arg_type) && reg->off) { 5873 verbose(env, "R%d must have zero offset when passed to release func\n", 5874 regno); 5875 return -EINVAL; 5876 } 5877 /* For arg is release pointer, fixed_off_ok must be false, but 5878 * we already checked and rejected reg->off != 0 above, so set 5879 * to true to allow fixed offset for all other cases. 5880 */ 5881 fixed_off_ok = true; 5882 break; 5883 default: 5884 break; 5885 } 5886 return __check_ptr_off_reg(env, reg, regno, fixed_off_ok); 5887 } 5888 5889 static u32 stack_slot_get_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 5890 { 5891 struct bpf_func_state *state = func(env, reg); 5892 int spi = get_spi(reg->off); 5893 5894 return state->stack[spi].spilled_ptr.id; 5895 } 5896 5897 static int check_func_arg(struct bpf_verifier_env *env, u32 arg, 5898 struct bpf_call_arg_meta *meta, 5899 const struct bpf_func_proto *fn) 5900 { 5901 u32 regno = BPF_REG_1 + arg; 5902 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 5903 enum bpf_arg_type arg_type = fn->arg_type[arg]; 5904 enum bpf_reg_type type = reg->type; 5905 u32 *arg_btf_id = NULL; 5906 int err = 0; 5907 5908 if (arg_type == ARG_DONTCARE) 5909 return 0; 5910 5911 err = check_reg_arg(env, regno, SRC_OP); 5912 if (err) 5913 return err; 5914 5915 if (arg_type == ARG_ANYTHING) { 5916 if (is_pointer_value(env, regno)) { 5917 verbose(env, "R%d leaks addr into helper function\n", 5918 regno); 5919 return -EACCES; 5920 } 5921 return 0; 5922 } 5923 5924 if (type_is_pkt_pointer(type) && 5925 !may_access_direct_pkt_data(env, meta, BPF_READ)) { 5926 verbose(env, "helper access to the packet is not allowed\n"); 5927 return -EACCES; 5928 } 5929 5930 if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) { 5931 err = resolve_map_arg_type(env, meta, &arg_type); 5932 if (err) 5933 return err; 5934 } 5935 5936 if (register_is_null(reg) && type_may_be_null(arg_type)) 5937 /* A NULL register has a SCALAR_VALUE type, so skip 5938 * type checking. 5939 */ 5940 goto skip_type_check; 5941 5942 /* arg_btf_id and arg_size are in a union. */ 5943 if (base_type(arg_type) == ARG_PTR_TO_BTF_ID) 5944 arg_btf_id = fn->arg_btf_id[arg]; 5945 5946 err = check_reg_type(env, regno, arg_type, arg_btf_id, meta); 5947 if (err) 5948 return err; 5949 5950 err = check_func_arg_reg_off(env, reg, regno, arg_type); 5951 if (err) 5952 return err; 5953 5954 skip_type_check: 5955 if (arg_type_is_release(arg_type)) { 5956 if (arg_type_is_dynptr(arg_type)) { 5957 struct bpf_func_state *state = func(env, reg); 5958 int spi = get_spi(reg->off); 5959 5960 if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) || 5961 !state->stack[spi].spilled_ptr.id) { 5962 verbose(env, "arg %d is an unacquired reference\n", regno); 5963 return -EINVAL; 5964 } 5965 } else if (!reg->ref_obj_id && !register_is_null(reg)) { 5966 verbose(env, "R%d must be referenced when passed to release function\n", 5967 regno); 5968 return -EINVAL; 5969 } 5970 if (meta->release_regno) { 5971 verbose(env, "verifier internal error: more than one release argument\n"); 5972 return -EFAULT; 5973 } 5974 meta->release_regno = regno; 5975 } 5976 5977 if (reg->ref_obj_id) { 5978 if (meta->ref_obj_id) { 5979 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 5980 regno, reg->ref_obj_id, 5981 meta->ref_obj_id); 5982 return -EFAULT; 5983 } 5984 meta->ref_obj_id = reg->ref_obj_id; 5985 } 5986 5987 switch (base_type(arg_type)) { 5988 case ARG_CONST_MAP_PTR: 5989 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 5990 if (meta->map_ptr) { 5991 /* Use map_uid (which is unique id of inner map) to reject: 5992 * inner_map1 = bpf_map_lookup_elem(outer_map, key1) 5993 * inner_map2 = bpf_map_lookup_elem(outer_map, key2) 5994 * if (inner_map1 && inner_map2) { 5995 * timer = bpf_map_lookup_elem(inner_map1); 5996 * if (timer) 5997 * // mismatch would have been allowed 5998 * bpf_timer_init(timer, inner_map2); 5999 * } 6000 * 6001 * Comparing map_ptr is enough to distinguish normal and outer maps. 6002 */ 6003 if (meta->map_ptr != reg->map_ptr || 6004 meta->map_uid != reg->map_uid) { 6005 verbose(env, 6006 "timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n", 6007 meta->map_uid, reg->map_uid); 6008 return -EINVAL; 6009 } 6010 } 6011 meta->map_ptr = reg->map_ptr; 6012 meta->map_uid = reg->map_uid; 6013 break; 6014 case ARG_PTR_TO_MAP_KEY: 6015 /* bpf_map_xxx(..., map_ptr, ..., key) call: 6016 * check that [key, key + map->key_size) are within 6017 * stack limits and initialized 6018 */ 6019 if (!meta->map_ptr) { 6020 /* in function declaration map_ptr must come before 6021 * map_key, so that it's verified and known before 6022 * we have to check map_key here. Otherwise it means 6023 * that kernel subsystem misconfigured verifier 6024 */ 6025 verbose(env, "invalid map_ptr to access map->key\n"); 6026 return -EACCES; 6027 } 6028 err = check_helper_mem_access(env, regno, 6029 meta->map_ptr->key_size, false, 6030 NULL); 6031 break; 6032 case ARG_PTR_TO_MAP_VALUE: 6033 if (type_may_be_null(arg_type) && register_is_null(reg)) 6034 return 0; 6035 6036 /* bpf_map_xxx(..., map_ptr, ..., value) call: 6037 * check [value, value + map->value_size) validity 6038 */ 6039 if (!meta->map_ptr) { 6040 /* kernel subsystem misconfigured verifier */ 6041 verbose(env, "invalid map_ptr to access map->value\n"); 6042 return -EACCES; 6043 } 6044 meta->raw_mode = arg_type & MEM_UNINIT; 6045 err = check_helper_mem_access(env, regno, 6046 meta->map_ptr->value_size, false, 6047 meta); 6048 break; 6049 case ARG_PTR_TO_PERCPU_BTF_ID: 6050 if (!reg->btf_id) { 6051 verbose(env, "Helper has invalid btf_id in R%d\n", regno); 6052 return -EACCES; 6053 } 6054 meta->ret_btf = reg->btf; 6055 meta->ret_btf_id = reg->btf_id; 6056 break; 6057 case ARG_PTR_TO_SPIN_LOCK: 6058 if (meta->func_id == BPF_FUNC_spin_lock) { 6059 if (process_spin_lock(env, regno, true)) 6060 return -EACCES; 6061 } else if (meta->func_id == BPF_FUNC_spin_unlock) { 6062 if (process_spin_lock(env, regno, false)) 6063 return -EACCES; 6064 } else { 6065 verbose(env, "verifier internal error\n"); 6066 return -EFAULT; 6067 } 6068 break; 6069 case ARG_PTR_TO_TIMER: 6070 if (process_timer_func(env, regno, meta)) 6071 return -EACCES; 6072 break; 6073 case ARG_PTR_TO_FUNC: 6074 meta->subprogno = reg->subprogno; 6075 break; 6076 case ARG_PTR_TO_MEM: 6077 /* The access to this pointer is only checked when we hit the 6078 * next is_mem_size argument below. 6079 */ 6080 meta->raw_mode = arg_type & MEM_UNINIT; 6081 if (arg_type & MEM_FIXED_SIZE) { 6082 err = check_helper_mem_access(env, regno, 6083 fn->arg_size[arg], false, 6084 meta); 6085 } 6086 break; 6087 case ARG_CONST_SIZE: 6088 err = check_mem_size_reg(env, reg, regno, false, meta); 6089 break; 6090 case ARG_CONST_SIZE_OR_ZERO: 6091 err = check_mem_size_reg(env, reg, regno, true, meta); 6092 break; 6093 case ARG_PTR_TO_DYNPTR: 6094 /* We only need to check for initialized / uninitialized helper 6095 * dynptr args if the dynptr is not PTR_TO_DYNPTR, as the 6096 * assumption is that if it is, that a helper function 6097 * initialized the dynptr on behalf of the BPF program. 6098 */ 6099 if (base_type(reg->type) == PTR_TO_DYNPTR) 6100 break; 6101 if (arg_type & MEM_UNINIT) { 6102 if (!is_dynptr_reg_valid_uninit(env, reg)) { 6103 verbose(env, "Dynptr has to be an uninitialized dynptr\n"); 6104 return -EINVAL; 6105 } 6106 6107 /* We only support one dynptr being uninitialized at the moment, 6108 * which is sufficient for the helper functions we have right now. 6109 */ 6110 if (meta->uninit_dynptr_regno) { 6111 verbose(env, "verifier internal error: multiple uninitialized dynptr args\n"); 6112 return -EFAULT; 6113 } 6114 6115 meta->uninit_dynptr_regno = regno; 6116 } else if (!is_dynptr_reg_valid_init(env, reg)) { 6117 verbose(env, 6118 "Expected an initialized dynptr as arg #%d\n", 6119 arg + 1); 6120 return -EINVAL; 6121 } else if (!is_dynptr_type_expected(env, reg, arg_type)) { 6122 const char *err_extra = ""; 6123 6124 switch (arg_type & DYNPTR_TYPE_FLAG_MASK) { 6125 case DYNPTR_TYPE_LOCAL: 6126 err_extra = "local"; 6127 break; 6128 case DYNPTR_TYPE_RINGBUF: 6129 err_extra = "ringbuf"; 6130 break; 6131 default: 6132 err_extra = "<unknown>"; 6133 break; 6134 } 6135 verbose(env, 6136 "Expected a dynptr of type %s as arg #%d\n", 6137 err_extra, arg + 1); 6138 return -EINVAL; 6139 } 6140 break; 6141 case ARG_CONST_ALLOC_SIZE_OR_ZERO: 6142 if (!tnum_is_const(reg->var_off)) { 6143 verbose(env, "R%d is not a known constant'\n", 6144 regno); 6145 return -EACCES; 6146 } 6147 meta->mem_size = reg->var_off.value; 6148 err = mark_chain_precision(env, regno); 6149 if (err) 6150 return err; 6151 break; 6152 case ARG_PTR_TO_INT: 6153 case ARG_PTR_TO_LONG: 6154 { 6155 int size = int_ptr_type_to_size(arg_type); 6156 6157 err = check_helper_mem_access(env, regno, size, false, meta); 6158 if (err) 6159 return err; 6160 err = check_ptr_alignment(env, reg, 0, size, true); 6161 break; 6162 } 6163 case ARG_PTR_TO_CONST_STR: 6164 { 6165 struct bpf_map *map = reg->map_ptr; 6166 int map_off; 6167 u64 map_addr; 6168 char *str_ptr; 6169 6170 if (!bpf_map_is_rdonly(map)) { 6171 verbose(env, "R%d does not point to a readonly map'\n", regno); 6172 return -EACCES; 6173 } 6174 6175 if (!tnum_is_const(reg->var_off)) { 6176 verbose(env, "R%d is not a constant address'\n", regno); 6177 return -EACCES; 6178 } 6179 6180 if (!map->ops->map_direct_value_addr) { 6181 verbose(env, "no direct value access support for this map type\n"); 6182 return -EACCES; 6183 } 6184 6185 err = check_map_access(env, regno, reg->off, 6186 map->value_size - reg->off, false, 6187 ACCESS_HELPER); 6188 if (err) 6189 return err; 6190 6191 map_off = reg->off + reg->var_off.value; 6192 err = map->ops->map_direct_value_addr(map, &map_addr, map_off); 6193 if (err) { 6194 verbose(env, "direct value access on string failed\n"); 6195 return err; 6196 } 6197 6198 str_ptr = (char *)(long)(map_addr); 6199 if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) { 6200 verbose(env, "string is not zero-terminated\n"); 6201 return -EINVAL; 6202 } 6203 break; 6204 } 6205 case ARG_PTR_TO_KPTR: 6206 if (process_kptr_func(env, regno, meta)) 6207 return -EACCES; 6208 break; 6209 } 6210 6211 return err; 6212 } 6213 6214 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id) 6215 { 6216 enum bpf_attach_type eatype = env->prog->expected_attach_type; 6217 enum bpf_prog_type type = resolve_prog_type(env->prog); 6218 6219 if (func_id != BPF_FUNC_map_update_elem) 6220 return false; 6221 6222 /* It's not possible to get access to a locked struct sock in these 6223 * contexts, so updating is safe. 6224 */ 6225 switch (type) { 6226 case BPF_PROG_TYPE_TRACING: 6227 if (eatype == BPF_TRACE_ITER) 6228 return true; 6229 break; 6230 case BPF_PROG_TYPE_SOCKET_FILTER: 6231 case BPF_PROG_TYPE_SCHED_CLS: 6232 case BPF_PROG_TYPE_SCHED_ACT: 6233 case BPF_PROG_TYPE_XDP: 6234 case BPF_PROG_TYPE_SK_REUSEPORT: 6235 case BPF_PROG_TYPE_FLOW_DISSECTOR: 6236 case BPF_PROG_TYPE_SK_LOOKUP: 6237 return true; 6238 default: 6239 break; 6240 } 6241 6242 verbose(env, "cannot update sockmap in this context\n"); 6243 return false; 6244 } 6245 6246 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env) 6247 { 6248 return env->prog->jit_requested && 6249 bpf_jit_supports_subprog_tailcalls(); 6250 } 6251 6252 static int check_map_func_compatibility(struct bpf_verifier_env *env, 6253 struct bpf_map *map, int func_id) 6254 { 6255 if (!map) 6256 return 0; 6257 6258 /* We need a two way check, first is from map perspective ... */ 6259 switch (map->map_type) { 6260 case BPF_MAP_TYPE_PROG_ARRAY: 6261 if (func_id != BPF_FUNC_tail_call) 6262 goto error; 6263 break; 6264 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 6265 if (func_id != BPF_FUNC_perf_event_read && 6266 func_id != BPF_FUNC_perf_event_output && 6267 func_id != BPF_FUNC_skb_output && 6268 func_id != BPF_FUNC_perf_event_read_value && 6269 func_id != BPF_FUNC_xdp_output) 6270 goto error; 6271 break; 6272 case BPF_MAP_TYPE_RINGBUF: 6273 if (func_id != BPF_FUNC_ringbuf_output && 6274 func_id != BPF_FUNC_ringbuf_reserve && 6275 func_id != BPF_FUNC_ringbuf_query && 6276 func_id != BPF_FUNC_ringbuf_reserve_dynptr && 6277 func_id != BPF_FUNC_ringbuf_submit_dynptr && 6278 func_id != BPF_FUNC_ringbuf_discard_dynptr) 6279 goto error; 6280 break; 6281 case BPF_MAP_TYPE_USER_RINGBUF: 6282 if (func_id != BPF_FUNC_user_ringbuf_drain) 6283 goto error; 6284 break; 6285 case BPF_MAP_TYPE_STACK_TRACE: 6286 if (func_id != BPF_FUNC_get_stackid) 6287 goto error; 6288 break; 6289 case BPF_MAP_TYPE_CGROUP_ARRAY: 6290 if (func_id != BPF_FUNC_skb_under_cgroup && 6291 func_id != BPF_FUNC_current_task_under_cgroup) 6292 goto error; 6293 break; 6294 case BPF_MAP_TYPE_CGROUP_STORAGE: 6295 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 6296 if (func_id != BPF_FUNC_get_local_storage) 6297 goto error; 6298 break; 6299 case BPF_MAP_TYPE_DEVMAP: 6300 case BPF_MAP_TYPE_DEVMAP_HASH: 6301 if (func_id != BPF_FUNC_redirect_map && 6302 func_id != BPF_FUNC_map_lookup_elem) 6303 goto error; 6304 break; 6305 /* Restrict bpf side of cpumap and xskmap, open when use-cases 6306 * appear. 6307 */ 6308 case BPF_MAP_TYPE_CPUMAP: 6309 if (func_id != BPF_FUNC_redirect_map) 6310 goto error; 6311 break; 6312 case BPF_MAP_TYPE_XSKMAP: 6313 if (func_id != BPF_FUNC_redirect_map && 6314 func_id != BPF_FUNC_map_lookup_elem) 6315 goto error; 6316 break; 6317 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 6318 case BPF_MAP_TYPE_HASH_OF_MAPS: 6319 if (func_id != BPF_FUNC_map_lookup_elem) 6320 goto error; 6321 break; 6322 case BPF_MAP_TYPE_SOCKMAP: 6323 if (func_id != BPF_FUNC_sk_redirect_map && 6324 func_id != BPF_FUNC_sock_map_update && 6325 func_id != BPF_FUNC_map_delete_elem && 6326 func_id != BPF_FUNC_msg_redirect_map && 6327 func_id != BPF_FUNC_sk_select_reuseport && 6328 func_id != BPF_FUNC_map_lookup_elem && 6329 !may_update_sockmap(env, func_id)) 6330 goto error; 6331 break; 6332 case BPF_MAP_TYPE_SOCKHASH: 6333 if (func_id != BPF_FUNC_sk_redirect_hash && 6334 func_id != BPF_FUNC_sock_hash_update && 6335 func_id != BPF_FUNC_map_delete_elem && 6336 func_id != BPF_FUNC_msg_redirect_hash && 6337 func_id != BPF_FUNC_sk_select_reuseport && 6338 func_id != BPF_FUNC_map_lookup_elem && 6339 !may_update_sockmap(env, func_id)) 6340 goto error; 6341 break; 6342 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 6343 if (func_id != BPF_FUNC_sk_select_reuseport) 6344 goto error; 6345 break; 6346 case BPF_MAP_TYPE_QUEUE: 6347 case BPF_MAP_TYPE_STACK: 6348 if (func_id != BPF_FUNC_map_peek_elem && 6349 func_id != BPF_FUNC_map_pop_elem && 6350 func_id != BPF_FUNC_map_push_elem) 6351 goto error; 6352 break; 6353 case BPF_MAP_TYPE_SK_STORAGE: 6354 if (func_id != BPF_FUNC_sk_storage_get && 6355 func_id != BPF_FUNC_sk_storage_delete) 6356 goto error; 6357 break; 6358 case BPF_MAP_TYPE_INODE_STORAGE: 6359 if (func_id != BPF_FUNC_inode_storage_get && 6360 func_id != BPF_FUNC_inode_storage_delete) 6361 goto error; 6362 break; 6363 case BPF_MAP_TYPE_TASK_STORAGE: 6364 if (func_id != BPF_FUNC_task_storage_get && 6365 func_id != BPF_FUNC_task_storage_delete) 6366 goto error; 6367 break; 6368 case BPF_MAP_TYPE_BLOOM_FILTER: 6369 if (func_id != BPF_FUNC_map_peek_elem && 6370 func_id != BPF_FUNC_map_push_elem) 6371 goto error; 6372 break; 6373 default: 6374 break; 6375 } 6376 6377 /* ... and second from the function itself. */ 6378 switch (func_id) { 6379 case BPF_FUNC_tail_call: 6380 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 6381 goto error; 6382 if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) { 6383 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); 6384 return -EINVAL; 6385 } 6386 break; 6387 case BPF_FUNC_perf_event_read: 6388 case BPF_FUNC_perf_event_output: 6389 case BPF_FUNC_perf_event_read_value: 6390 case BPF_FUNC_skb_output: 6391 case BPF_FUNC_xdp_output: 6392 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 6393 goto error; 6394 break; 6395 case BPF_FUNC_ringbuf_output: 6396 case BPF_FUNC_ringbuf_reserve: 6397 case BPF_FUNC_ringbuf_query: 6398 case BPF_FUNC_ringbuf_reserve_dynptr: 6399 case BPF_FUNC_ringbuf_submit_dynptr: 6400 case BPF_FUNC_ringbuf_discard_dynptr: 6401 if (map->map_type != BPF_MAP_TYPE_RINGBUF) 6402 goto error; 6403 break; 6404 case BPF_FUNC_user_ringbuf_drain: 6405 if (map->map_type != BPF_MAP_TYPE_USER_RINGBUF) 6406 goto error; 6407 break; 6408 case BPF_FUNC_get_stackid: 6409 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 6410 goto error; 6411 break; 6412 case BPF_FUNC_current_task_under_cgroup: 6413 case BPF_FUNC_skb_under_cgroup: 6414 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 6415 goto error; 6416 break; 6417 case BPF_FUNC_redirect_map: 6418 if (map->map_type != BPF_MAP_TYPE_DEVMAP && 6419 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && 6420 map->map_type != BPF_MAP_TYPE_CPUMAP && 6421 map->map_type != BPF_MAP_TYPE_XSKMAP) 6422 goto error; 6423 break; 6424 case BPF_FUNC_sk_redirect_map: 6425 case BPF_FUNC_msg_redirect_map: 6426 case BPF_FUNC_sock_map_update: 6427 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) 6428 goto error; 6429 break; 6430 case BPF_FUNC_sk_redirect_hash: 6431 case BPF_FUNC_msg_redirect_hash: 6432 case BPF_FUNC_sock_hash_update: 6433 if (map->map_type != BPF_MAP_TYPE_SOCKHASH) 6434 goto error; 6435 break; 6436 case BPF_FUNC_get_local_storage: 6437 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 6438 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 6439 goto error; 6440 break; 6441 case BPF_FUNC_sk_select_reuseport: 6442 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && 6443 map->map_type != BPF_MAP_TYPE_SOCKMAP && 6444 map->map_type != BPF_MAP_TYPE_SOCKHASH) 6445 goto error; 6446 break; 6447 case BPF_FUNC_map_pop_elem: 6448 if (map->map_type != BPF_MAP_TYPE_QUEUE && 6449 map->map_type != BPF_MAP_TYPE_STACK) 6450 goto error; 6451 break; 6452 case BPF_FUNC_map_peek_elem: 6453 case BPF_FUNC_map_push_elem: 6454 if (map->map_type != BPF_MAP_TYPE_QUEUE && 6455 map->map_type != BPF_MAP_TYPE_STACK && 6456 map->map_type != BPF_MAP_TYPE_BLOOM_FILTER) 6457 goto error; 6458 break; 6459 case BPF_FUNC_map_lookup_percpu_elem: 6460 if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && 6461 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 6462 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH) 6463 goto error; 6464 break; 6465 case BPF_FUNC_sk_storage_get: 6466 case BPF_FUNC_sk_storage_delete: 6467 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) 6468 goto error; 6469 break; 6470 case BPF_FUNC_inode_storage_get: 6471 case BPF_FUNC_inode_storage_delete: 6472 if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE) 6473 goto error; 6474 break; 6475 case BPF_FUNC_task_storage_get: 6476 case BPF_FUNC_task_storage_delete: 6477 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) 6478 goto error; 6479 break; 6480 default: 6481 break; 6482 } 6483 6484 return 0; 6485 error: 6486 verbose(env, "cannot pass map_type %d into func %s#%d\n", 6487 map->map_type, func_id_name(func_id), func_id); 6488 return -EINVAL; 6489 } 6490 6491 static bool check_raw_mode_ok(const struct bpf_func_proto *fn) 6492 { 6493 int count = 0; 6494 6495 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) 6496 count++; 6497 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) 6498 count++; 6499 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) 6500 count++; 6501 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) 6502 count++; 6503 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) 6504 count++; 6505 6506 /* We only support one arg being in raw mode at the moment, 6507 * which is sufficient for the helper functions we have 6508 * right now. 6509 */ 6510 return count <= 1; 6511 } 6512 6513 static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg) 6514 { 6515 bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE; 6516 bool has_size = fn->arg_size[arg] != 0; 6517 bool is_next_size = false; 6518 6519 if (arg + 1 < ARRAY_SIZE(fn->arg_type)) 6520 is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]); 6521 6522 if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM) 6523 return is_next_size; 6524 6525 return has_size == is_next_size || is_next_size == is_fixed; 6526 } 6527 6528 static bool check_arg_pair_ok(const struct bpf_func_proto *fn) 6529 { 6530 /* bpf_xxx(..., buf, len) call will access 'len' 6531 * bytes from memory 'buf'. Both arg types need 6532 * to be paired, so make sure there's no buggy 6533 * helper function specification. 6534 */ 6535 if (arg_type_is_mem_size(fn->arg1_type) || 6536 check_args_pair_invalid(fn, 0) || 6537 check_args_pair_invalid(fn, 1) || 6538 check_args_pair_invalid(fn, 2) || 6539 check_args_pair_invalid(fn, 3) || 6540 check_args_pair_invalid(fn, 4)) 6541 return false; 6542 6543 return true; 6544 } 6545 6546 static bool check_btf_id_ok(const struct bpf_func_proto *fn) 6547 { 6548 int i; 6549 6550 for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) { 6551 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i]) 6552 return false; 6553 6554 if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] && 6555 /* arg_btf_id and arg_size are in a union. */ 6556 (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM || 6557 !(fn->arg_type[i] & MEM_FIXED_SIZE))) 6558 return false; 6559 } 6560 6561 return true; 6562 } 6563 6564 static int check_func_proto(const struct bpf_func_proto *fn, int func_id) 6565 { 6566 return check_raw_mode_ok(fn) && 6567 check_arg_pair_ok(fn) && 6568 check_btf_id_ok(fn) ? 0 : -EINVAL; 6569 } 6570 6571 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] 6572 * are now invalid, so turn them into unknown SCALAR_VALUE. 6573 */ 6574 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) 6575 { 6576 struct bpf_func_state *state; 6577 struct bpf_reg_state *reg; 6578 6579 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ 6580 if (reg_is_pkt_pointer_any(reg)) 6581 __mark_reg_unknown(env, reg); 6582 })); 6583 } 6584 6585 enum { 6586 AT_PKT_END = -1, 6587 BEYOND_PKT_END = -2, 6588 }; 6589 6590 static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open) 6591 { 6592 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 6593 struct bpf_reg_state *reg = &state->regs[regn]; 6594 6595 if (reg->type != PTR_TO_PACKET) 6596 /* PTR_TO_PACKET_META is not supported yet */ 6597 return; 6598 6599 /* The 'reg' is pkt > pkt_end or pkt >= pkt_end. 6600 * How far beyond pkt_end it goes is unknown. 6601 * if (!range_open) it's the case of pkt >= pkt_end 6602 * if (range_open) it's the case of pkt > pkt_end 6603 * hence this pointer is at least 1 byte bigger than pkt_end 6604 */ 6605 if (range_open) 6606 reg->range = BEYOND_PKT_END; 6607 else 6608 reg->range = AT_PKT_END; 6609 } 6610 6611 /* The pointer with the specified id has released its reference to kernel 6612 * resources. Identify all copies of the same pointer and clear the reference. 6613 */ 6614 static int release_reference(struct bpf_verifier_env *env, 6615 int ref_obj_id) 6616 { 6617 struct bpf_func_state *state; 6618 struct bpf_reg_state *reg; 6619 int err; 6620 6621 err = release_reference_state(cur_func(env), ref_obj_id); 6622 if (err) 6623 return err; 6624 6625 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ 6626 if (reg->ref_obj_id == ref_obj_id) { 6627 if (!env->allow_ptr_leaks) 6628 __mark_reg_not_init(env, reg); 6629 else 6630 __mark_reg_unknown(env, reg); 6631 } 6632 })); 6633 6634 return 0; 6635 } 6636 6637 static void clear_caller_saved_regs(struct bpf_verifier_env *env, 6638 struct bpf_reg_state *regs) 6639 { 6640 int i; 6641 6642 /* after the call registers r0 - r5 were scratched */ 6643 for (i = 0; i < CALLER_SAVED_REGS; i++) { 6644 mark_reg_not_init(env, regs, caller_saved[i]); 6645 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 6646 } 6647 } 6648 6649 typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env, 6650 struct bpf_func_state *caller, 6651 struct bpf_func_state *callee, 6652 int insn_idx); 6653 6654 static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 6655 int *insn_idx, int subprog, 6656 set_callee_state_fn set_callee_state_cb) 6657 { 6658 struct bpf_verifier_state *state = env->cur_state; 6659 struct bpf_func_info_aux *func_info_aux; 6660 struct bpf_func_state *caller, *callee; 6661 int err; 6662 bool is_global = false; 6663 6664 if (state->curframe + 1 >= MAX_CALL_FRAMES) { 6665 verbose(env, "the call stack of %d frames is too deep\n", 6666 state->curframe + 2); 6667 return -E2BIG; 6668 } 6669 6670 caller = state->frame[state->curframe]; 6671 if (state->frame[state->curframe + 1]) { 6672 verbose(env, "verifier bug. Frame %d already allocated\n", 6673 state->curframe + 1); 6674 return -EFAULT; 6675 } 6676 6677 func_info_aux = env->prog->aux->func_info_aux; 6678 if (func_info_aux) 6679 is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; 6680 err = btf_check_subprog_call(env, subprog, caller->regs); 6681 if (err == -EFAULT) 6682 return err; 6683 if (is_global) { 6684 if (err) { 6685 verbose(env, "Caller passes invalid args into func#%d\n", 6686 subprog); 6687 return err; 6688 } else { 6689 if (env->log.level & BPF_LOG_LEVEL) 6690 verbose(env, 6691 "Func#%d is global and valid. Skipping.\n", 6692 subprog); 6693 clear_caller_saved_regs(env, caller->regs); 6694 6695 /* All global functions return a 64-bit SCALAR_VALUE */ 6696 mark_reg_unknown(env, caller->regs, BPF_REG_0); 6697 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 6698 6699 /* continue with next insn after call */ 6700 return 0; 6701 } 6702 } 6703 6704 if (insn->code == (BPF_JMP | BPF_CALL) && 6705 insn->src_reg == 0 && 6706 insn->imm == BPF_FUNC_timer_set_callback) { 6707 struct bpf_verifier_state *async_cb; 6708 6709 /* there is no real recursion here. timer callbacks are async */ 6710 env->subprog_info[subprog].is_async_cb = true; 6711 async_cb = push_async_cb(env, env->subprog_info[subprog].start, 6712 *insn_idx, subprog); 6713 if (!async_cb) 6714 return -EFAULT; 6715 callee = async_cb->frame[0]; 6716 callee->async_entry_cnt = caller->async_entry_cnt + 1; 6717 6718 /* Convert bpf_timer_set_callback() args into timer callback args */ 6719 err = set_callee_state_cb(env, caller, callee, *insn_idx); 6720 if (err) 6721 return err; 6722 6723 clear_caller_saved_regs(env, caller->regs); 6724 mark_reg_unknown(env, caller->regs, BPF_REG_0); 6725 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 6726 /* continue with next insn after call */ 6727 return 0; 6728 } 6729 6730 callee = kzalloc(sizeof(*callee), GFP_KERNEL); 6731 if (!callee) 6732 return -ENOMEM; 6733 state->frame[state->curframe + 1] = callee; 6734 6735 /* callee cannot access r0, r6 - r9 for reading and has to write 6736 * into its own stack before reading from it. 6737 * callee can read/write into caller's stack 6738 */ 6739 init_func_state(env, callee, 6740 /* remember the callsite, it will be used by bpf_exit */ 6741 *insn_idx /* callsite */, 6742 state->curframe + 1 /* frameno within this callchain */, 6743 subprog /* subprog number within this prog */); 6744 6745 /* Transfer references to the callee */ 6746 err = copy_reference_state(callee, caller); 6747 if (err) 6748 return err; 6749 6750 err = set_callee_state_cb(env, caller, callee, *insn_idx); 6751 if (err) 6752 return err; 6753 6754 clear_caller_saved_regs(env, caller->regs); 6755 6756 /* only increment it after check_reg_arg() finished */ 6757 state->curframe++; 6758 6759 /* and go analyze first insn of the callee */ 6760 *insn_idx = env->subprog_info[subprog].start - 1; 6761 6762 if (env->log.level & BPF_LOG_LEVEL) { 6763 verbose(env, "caller:\n"); 6764 print_verifier_state(env, caller, true); 6765 verbose(env, "callee:\n"); 6766 print_verifier_state(env, callee, true); 6767 } 6768 return 0; 6769 } 6770 6771 int map_set_for_each_callback_args(struct bpf_verifier_env *env, 6772 struct bpf_func_state *caller, 6773 struct bpf_func_state *callee) 6774 { 6775 /* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, 6776 * void *callback_ctx, u64 flags); 6777 * callback_fn(struct bpf_map *map, void *key, void *value, 6778 * void *callback_ctx); 6779 */ 6780 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; 6781 6782 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; 6783 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); 6784 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr; 6785 6786 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; 6787 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); 6788 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr; 6789 6790 /* pointer to stack or null */ 6791 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3]; 6792 6793 /* unused */ 6794 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 6795 return 0; 6796 } 6797 6798 static int set_callee_state(struct bpf_verifier_env *env, 6799 struct bpf_func_state *caller, 6800 struct bpf_func_state *callee, int insn_idx) 6801 { 6802 int i; 6803 6804 /* copy r1 - r5 args that callee can access. The copy includes parent 6805 * pointers, which connects us up to the liveness chain 6806 */ 6807 for (i = BPF_REG_1; i <= BPF_REG_5; i++) 6808 callee->regs[i] = caller->regs[i]; 6809 return 0; 6810 } 6811 6812 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 6813 int *insn_idx) 6814 { 6815 int subprog, target_insn; 6816 6817 target_insn = *insn_idx + insn->imm + 1; 6818 subprog = find_subprog(env, target_insn); 6819 if (subprog < 0) { 6820 verbose(env, "verifier bug. No program starts at insn %d\n", 6821 target_insn); 6822 return -EFAULT; 6823 } 6824 6825 return __check_func_call(env, insn, insn_idx, subprog, set_callee_state); 6826 } 6827 6828 static int set_map_elem_callback_state(struct bpf_verifier_env *env, 6829 struct bpf_func_state *caller, 6830 struct bpf_func_state *callee, 6831 int insn_idx) 6832 { 6833 struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx]; 6834 struct bpf_map *map; 6835 int err; 6836 6837 if (bpf_map_ptr_poisoned(insn_aux)) { 6838 verbose(env, "tail_call abusing map_ptr\n"); 6839 return -EINVAL; 6840 } 6841 6842 map = BPF_MAP_PTR(insn_aux->map_ptr_state); 6843 if (!map->ops->map_set_for_each_callback_args || 6844 !map->ops->map_for_each_callback) { 6845 verbose(env, "callback function not allowed for map\n"); 6846 return -ENOTSUPP; 6847 } 6848 6849 err = map->ops->map_set_for_each_callback_args(env, caller, callee); 6850 if (err) 6851 return err; 6852 6853 callee->in_callback_fn = true; 6854 callee->callback_ret_range = tnum_range(0, 1); 6855 return 0; 6856 } 6857 6858 static int set_loop_callback_state(struct bpf_verifier_env *env, 6859 struct bpf_func_state *caller, 6860 struct bpf_func_state *callee, 6861 int insn_idx) 6862 { 6863 /* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, 6864 * u64 flags); 6865 * callback_fn(u32 index, void *callback_ctx); 6866 */ 6867 callee->regs[BPF_REG_1].type = SCALAR_VALUE; 6868 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; 6869 6870 /* unused */ 6871 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); 6872 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 6873 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 6874 6875 callee->in_callback_fn = true; 6876 callee->callback_ret_range = tnum_range(0, 1); 6877 return 0; 6878 } 6879 6880 static int set_timer_callback_state(struct bpf_verifier_env *env, 6881 struct bpf_func_state *caller, 6882 struct bpf_func_state *callee, 6883 int insn_idx) 6884 { 6885 struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr; 6886 6887 /* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn); 6888 * callback_fn(struct bpf_map *map, void *key, void *value); 6889 */ 6890 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; 6891 __mark_reg_known_zero(&callee->regs[BPF_REG_1]); 6892 callee->regs[BPF_REG_1].map_ptr = map_ptr; 6893 6894 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; 6895 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); 6896 callee->regs[BPF_REG_2].map_ptr = map_ptr; 6897 6898 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; 6899 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); 6900 callee->regs[BPF_REG_3].map_ptr = map_ptr; 6901 6902 /* unused */ 6903 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 6904 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 6905 callee->in_async_callback_fn = true; 6906 callee->callback_ret_range = tnum_range(0, 1); 6907 return 0; 6908 } 6909 6910 static int set_find_vma_callback_state(struct bpf_verifier_env *env, 6911 struct bpf_func_state *caller, 6912 struct bpf_func_state *callee, 6913 int insn_idx) 6914 { 6915 /* bpf_find_vma(struct task_struct *task, u64 addr, 6916 * void *callback_fn, void *callback_ctx, u64 flags) 6917 * (callback_fn)(struct task_struct *task, 6918 * struct vm_area_struct *vma, void *callback_ctx); 6919 */ 6920 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; 6921 6922 callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID; 6923 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); 6924 callee->regs[BPF_REG_2].btf = btf_vmlinux; 6925 callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA], 6926 6927 /* pointer to stack or null */ 6928 callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4]; 6929 6930 /* unused */ 6931 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 6932 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 6933 callee->in_callback_fn = true; 6934 callee->callback_ret_range = tnum_range(0, 1); 6935 return 0; 6936 } 6937 6938 static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env, 6939 struct bpf_func_state *caller, 6940 struct bpf_func_state *callee, 6941 int insn_idx) 6942 { 6943 /* bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void 6944 * callback_ctx, u64 flags); 6945 * callback_fn(struct bpf_dynptr_t* dynptr, void *callback_ctx); 6946 */ 6947 __mark_reg_not_init(env, &callee->regs[BPF_REG_0]); 6948 callee->regs[BPF_REG_1].type = PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL; 6949 __mark_reg_known_zero(&callee->regs[BPF_REG_1]); 6950 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; 6951 6952 /* unused */ 6953 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); 6954 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 6955 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 6956 6957 callee->in_callback_fn = true; 6958 callee->callback_ret_range = tnum_range(0, 1); 6959 return 0; 6960 } 6961 6962 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) 6963 { 6964 struct bpf_verifier_state *state = env->cur_state; 6965 struct bpf_func_state *caller, *callee; 6966 struct bpf_reg_state *r0; 6967 int err; 6968 6969 callee = state->frame[state->curframe]; 6970 r0 = &callee->regs[BPF_REG_0]; 6971 if (r0->type == PTR_TO_STACK) { 6972 /* technically it's ok to return caller's stack pointer 6973 * (or caller's caller's pointer) back to the caller, 6974 * since these pointers are valid. Only current stack 6975 * pointer will be invalid as soon as function exits, 6976 * but let's be conservative 6977 */ 6978 verbose(env, "cannot return stack pointer to the caller\n"); 6979 return -EINVAL; 6980 } 6981 6982 state->curframe--; 6983 caller = state->frame[state->curframe]; 6984 if (callee->in_callback_fn) { 6985 /* enforce R0 return value range [0, 1]. */ 6986 struct tnum range = callee->callback_ret_range; 6987 6988 if (r0->type != SCALAR_VALUE) { 6989 verbose(env, "R0 not a scalar value\n"); 6990 return -EACCES; 6991 } 6992 if (!tnum_in(range, r0->var_off)) { 6993 verbose_invalid_scalar(env, r0, &range, "callback return", "R0"); 6994 return -EINVAL; 6995 } 6996 } else { 6997 /* return to the caller whatever r0 had in the callee */ 6998 caller->regs[BPF_REG_0] = *r0; 6999 } 7000 7001 /* callback_fn frame should have released its own additions to parent's 7002 * reference state at this point, or check_reference_leak would 7003 * complain, hence it must be the same as the caller. There is no need 7004 * to copy it back. 7005 */ 7006 if (!callee->in_callback_fn) { 7007 /* Transfer references to the caller */ 7008 err = copy_reference_state(caller, callee); 7009 if (err) 7010 return err; 7011 } 7012 7013 *insn_idx = callee->callsite + 1; 7014 if (env->log.level & BPF_LOG_LEVEL) { 7015 verbose(env, "returning from callee:\n"); 7016 print_verifier_state(env, callee, true); 7017 verbose(env, "to caller at %d:\n", *insn_idx); 7018 print_verifier_state(env, caller, true); 7019 } 7020 /* clear everything in the callee */ 7021 free_func_state(callee); 7022 state->frame[state->curframe + 1] = NULL; 7023 return 0; 7024 } 7025 7026 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, 7027 int func_id, 7028 struct bpf_call_arg_meta *meta) 7029 { 7030 struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; 7031 7032 if (ret_type != RET_INTEGER || 7033 (func_id != BPF_FUNC_get_stack && 7034 func_id != BPF_FUNC_get_task_stack && 7035 func_id != BPF_FUNC_probe_read_str && 7036 func_id != BPF_FUNC_probe_read_kernel_str && 7037 func_id != BPF_FUNC_probe_read_user_str)) 7038 return; 7039 7040 ret_reg->smax_value = meta->msize_max_value; 7041 ret_reg->s32_max_value = meta->msize_max_value; 7042 ret_reg->smin_value = -MAX_ERRNO; 7043 ret_reg->s32_min_value = -MAX_ERRNO; 7044 reg_bounds_sync(ret_reg); 7045 } 7046 7047 static int 7048 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 7049 int func_id, int insn_idx) 7050 { 7051 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 7052 struct bpf_map *map = meta->map_ptr; 7053 7054 if (func_id != BPF_FUNC_tail_call && 7055 func_id != BPF_FUNC_map_lookup_elem && 7056 func_id != BPF_FUNC_map_update_elem && 7057 func_id != BPF_FUNC_map_delete_elem && 7058 func_id != BPF_FUNC_map_push_elem && 7059 func_id != BPF_FUNC_map_pop_elem && 7060 func_id != BPF_FUNC_map_peek_elem && 7061 func_id != BPF_FUNC_for_each_map_elem && 7062 func_id != BPF_FUNC_redirect_map && 7063 func_id != BPF_FUNC_map_lookup_percpu_elem) 7064 return 0; 7065 7066 if (map == NULL) { 7067 verbose(env, "kernel subsystem misconfigured verifier\n"); 7068 return -EINVAL; 7069 } 7070 7071 /* In case of read-only, some additional restrictions 7072 * need to be applied in order to prevent altering the 7073 * state of the map from program side. 7074 */ 7075 if ((map->map_flags & BPF_F_RDONLY_PROG) && 7076 (func_id == BPF_FUNC_map_delete_elem || 7077 func_id == BPF_FUNC_map_update_elem || 7078 func_id == BPF_FUNC_map_push_elem || 7079 func_id == BPF_FUNC_map_pop_elem)) { 7080 verbose(env, "write into map forbidden\n"); 7081 return -EACCES; 7082 } 7083 7084 if (!BPF_MAP_PTR(aux->map_ptr_state)) 7085 bpf_map_ptr_store(aux, meta->map_ptr, 7086 !meta->map_ptr->bypass_spec_v1); 7087 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr) 7088 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, 7089 !meta->map_ptr->bypass_spec_v1); 7090 return 0; 7091 } 7092 7093 static int 7094 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 7095 int func_id, int insn_idx) 7096 { 7097 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 7098 struct bpf_reg_state *regs = cur_regs(env), *reg; 7099 struct bpf_map *map = meta->map_ptr; 7100 u64 val, max; 7101 int err; 7102 7103 if (func_id != BPF_FUNC_tail_call) 7104 return 0; 7105 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) { 7106 verbose(env, "kernel subsystem misconfigured verifier\n"); 7107 return -EINVAL; 7108 } 7109 7110 reg = ®s[BPF_REG_3]; 7111 val = reg->var_off.value; 7112 max = map->max_entries; 7113 7114 if (!(register_is_const(reg) && val < max)) { 7115 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 7116 return 0; 7117 } 7118 7119 err = mark_chain_precision(env, BPF_REG_3); 7120 if (err) 7121 return err; 7122 if (bpf_map_key_unseen(aux)) 7123 bpf_map_key_store(aux, val); 7124 else if (!bpf_map_key_poisoned(aux) && 7125 bpf_map_key_immediate(aux) != val) 7126 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 7127 return 0; 7128 } 7129 7130 static int check_reference_leak(struct bpf_verifier_env *env) 7131 { 7132 struct bpf_func_state *state = cur_func(env); 7133 bool refs_lingering = false; 7134 int i; 7135 7136 if (state->frameno && !state->in_callback_fn) 7137 return 0; 7138 7139 for (i = 0; i < state->acquired_refs; i++) { 7140 if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno) 7141 continue; 7142 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", 7143 state->refs[i].id, state->refs[i].insn_idx); 7144 refs_lingering = true; 7145 } 7146 return refs_lingering ? -EINVAL : 0; 7147 } 7148 7149 static int check_bpf_snprintf_call(struct bpf_verifier_env *env, 7150 struct bpf_reg_state *regs) 7151 { 7152 struct bpf_reg_state *fmt_reg = ®s[BPF_REG_3]; 7153 struct bpf_reg_state *data_len_reg = ®s[BPF_REG_5]; 7154 struct bpf_map *fmt_map = fmt_reg->map_ptr; 7155 int err, fmt_map_off, num_args; 7156 u64 fmt_addr; 7157 char *fmt; 7158 7159 /* data must be an array of u64 */ 7160 if (data_len_reg->var_off.value % 8) 7161 return -EINVAL; 7162 num_args = data_len_reg->var_off.value / 8; 7163 7164 /* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const 7165 * and map_direct_value_addr is set. 7166 */ 7167 fmt_map_off = fmt_reg->off + fmt_reg->var_off.value; 7168 err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr, 7169 fmt_map_off); 7170 if (err) { 7171 verbose(env, "verifier bug\n"); 7172 return -EFAULT; 7173 } 7174 fmt = (char *)(long)fmt_addr + fmt_map_off; 7175 7176 /* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we 7177 * can focus on validating the format specifiers. 7178 */ 7179 err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, NULL, num_args); 7180 if (err < 0) 7181 verbose(env, "Invalid format string\n"); 7182 7183 return err; 7184 } 7185 7186 static int check_get_func_ip(struct bpf_verifier_env *env) 7187 { 7188 enum bpf_prog_type type = resolve_prog_type(env->prog); 7189 int func_id = BPF_FUNC_get_func_ip; 7190 7191 if (type == BPF_PROG_TYPE_TRACING) { 7192 if (!bpf_prog_has_trampoline(env->prog)) { 7193 verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n", 7194 func_id_name(func_id), func_id); 7195 return -ENOTSUPP; 7196 } 7197 return 0; 7198 } else if (type == BPF_PROG_TYPE_KPROBE) { 7199 return 0; 7200 } 7201 7202 verbose(env, "func %s#%d not supported for program type %d\n", 7203 func_id_name(func_id), func_id, type); 7204 return -ENOTSUPP; 7205 } 7206 7207 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) 7208 { 7209 return &env->insn_aux_data[env->insn_idx]; 7210 } 7211 7212 static bool loop_flag_is_zero(struct bpf_verifier_env *env) 7213 { 7214 struct bpf_reg_state *regs = cur_regs(env); 7215 struct bpf_reg_state *reg = ®s[BPF_REG_4]; 7216 bool reg_is_null = register_is_null(reg); 7217 7218 if (reg_is_null) 7219 mark_chain_precision(env, BPF_REG_4); 7220 7221 return reg_is_null; 7222 } 7223 7224 static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno) 7225 { 7226 struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state; 7227 7228 if (!state->initialized) { 7229 state->initialized = 1; 7230 state->fit_for_inline = loop_flag_is_zero(env); 7231 state->callback_subprogno = subprogno; 7232 return; 7233 } 7234 7235 if (!state->fit_for_inline) 7236 return; 7237 7238 state->fit_for_inline = (loop_flag_is_zero(env) && 7239 state->callback_subprogno == subprogno); 7240 } 7241 7242 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 7243 int *insn_idx_p) 7244 { 7245 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 7246 const struct bpf_func_proto *fn = NULL; 7247 enum bpf_return_type ret_type; 7248 enum bpf_type_flag ret_flag; 7249 struct bpf_reg_state *regs; 7250 struct bpf_call_arg_meta meta; 7251 int insn_idx = *insn_idx_p; 7252 bool changes_data; 7253 int i, err, func_id; 7254 7255 /* find function prototype */ 7256 func_id = insn->imm; 7257 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 7258 verbose(env, "invalid func %s#%d\n", func_id_name(func_id), 7259 func_id); 7260 return -EINVAL; 7261 } 7262 7263 if (env->ops->get_func_proto) 7264 fn = env->ops->get_func_proto(func_id, env->prog); 7265 if (!fn) { 7266 verbose(env, "unknown func %s#%d\n", func_id_name(func_id), 7267 func_id); 7268 return -EINVAL; 7269 } 7270 7271 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 7272 if (!env->prog->gpl_compatible && fn->gpl_only) { 7273 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); 7274 return -EINVAL; 7275 } 7276 7277 if (fn->allowed && !fn->allowed(env->prog)) { 7278 verbose(env, "helper call is not allowed in probe\n"); 7279 return -EINVAL; 7280 } 7281 7282 /* With LD_ABS/IND some JITs save/restore skb from r1. */ 7283 changes_data = bpf_helper_changes_pkt_data(fn->func); 7284 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { 7285 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", 7286 func_id_name(func_id), func_id); 7287 return -EINVAL; 7288 } 7289 7290 memset(&meta, 0, sizeof(meta)); 7291 meta.pkt_access = fn->pkt_access; 7292 7293 err = check_func_proto(fn, func_id); 7294 if (err) { 7295 verbose(env, "kernel subsystem misconfigured func %s#%d\n", 7296 func_id_name(func_id), func_id); 7297 return err; 7298 } 7299 7300 meta.func_id = func_id; 7301 /* check args */ 7302 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) { 7303 err = check_func_arg(env, i, &meta, fn); 7304 if (err) 7305 return err; 7306 } 7307 7308 err = record_func_map(env, &meta, func_id, insn_idx); 7309 if (err) 7310 return err; 7311 7312 err = record_func_key(env, &meta, func_id, insn_idx); 7313 if (err) 7314 return err; 7315 7316 /* Mark slots with STACK_MISC in case of raw mode, stack offset 7317 * is inferred from register state. 7318 */ 7319 for (i = 0; i < meta.access_size; i++) { 7320 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, 7321 BPF_WRITE, -1, false); 7322 if (err) 7323 return err; 7324 } 7325 7326 regs = cur_regs(env); 7327 7328 if (meta.uninit_dynptr_regno) { 7329 /* we write BPF_DW bits (8 bytes) at a time */ 7330 for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) { 7331 err = check_mem_access(env, insn_idx, meta.uninit_dynptr_regno, 7332 i, BPF_DW, BPF_WRITE, -1, false); 7333 if (err) 7334 return err; 7335 } 7336 7337 err = mark_stack_slots_dynptr(env, ®s[meta.uninit_dynptr_regno], 7338 fn->arg_type[meta.uninit_dynptr_regno - BPF_REG_1], 7339 insn_idx); 7340 if (err) 7341 return err; 7342 } 7343 7344 if (meta.release_regno) { 7345 err = -EINVAL; 7346 if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1])) 7347 err = unmark_stack_slots_dynptr(env, ®s[meta.release_regno]); 7348 else if (meta.ref_obj_id) 7349 err = release_reference(env, meta.ref_obj_id); 7350 /* meta.ref_obj_id can only be 0 if register that is meant to be 7351 * released is NULL, which must be > R0. 7352 */ 7353 else if (register_is_null(®s[meta.release_regno])) 7354 err = 0; 7355 if (err) { 7356 verbose(env, "func %s#%d reference has not been acquired before\n", 7357 func_id_name(func_id), func_id); 7358 return err; 7359 } 7360 } 7361 7362 switch (func_id) { 7363 case BPF_FUNC_tail_call: 7364 err = check_reference_leak(env); 7365 if (err) { 7366 verbose(env, "tail_call would lead to reference leak\n"); 7367 return err; 7368 } 7369 break; 7370 case BPF_FUNC_get_local_storage: 7371 /* check that flags argument in get_local_storage(map, flags) is 0, 7372 * this is required because get_local_storage() can't return an error. 7373 */ 7374 if (!register_is_null(®s[BPF_REG_2])) { 7375 verbose(env, "get_local_storage() doesn't support non-zero flags\n"); 7376 return -EINVAL; 7377 } 7378 break; 7379 case BPF_FUNC_for_each_map_elem: 7380 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 7381 set_map_elem_callback_state); 7382 break; 7383 case BPF_FUNC_timer_set_callback: 7384 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 7385 set_timer_callback_state); 7386 break; 7387 case BPF_FUNC_find_vma: 7388 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 7389 set_find_vma_callback_state); 7390 break; 7391 case BPF_FUNC_snprintf: 7392 err = check_bpf_snprintf_call(env, regs); 7393 break; 7394 case BPF_FUNC_loop: 7395 update_loop_inline_state(env, meta.subprogno); 7396 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 7397 set_loop_callback_state); 7398 break; 7399 case BPF_FUNC_dynptr_from_mem: 7400 if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) { 7401 verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n", 7402 reg_type_str(env, regs[BPF_REG_1].type)); 7403 return -EACCES; 7404 } 7405 break; 7406 case BPF_FUNC_set_retval: 7407 if (prog_type == BPF_PROG_TYPE_LSM && 7408 env->prog->expected_attach_type == BPF_LSM_CGROUP) { 7409 if (!env->prog->aux->attach_func_proto->type) { 7410 /* Make sure programs that attach to void 7411 * hooks don't try to modify return value. 7412 */ 7413 verbose(env, "BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n"); 7414 return -EINVAL; 7415 } 7416 } 7417 break; 7418 case BPF_FUNC_dynptr_data: 7419 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) { 7420 if (arg_type_is_dynptr(fn->arg_type[i])) { 7421 struct bpf_reg_state *reg = ®s[BPF_REG_1 + i]; 7422 7423 if (meta.ref_obj_id) { 7424 verbose(env, "verifier internal error: meta.ref_obj_id already set\n"); 7425 return -EFAULT; 7426 } 7427 7428 if (base_type(reg->type) != PTR_TO_DYNPTR) 7429 /* Find the id of the dynptr we're 7430 * tracking the reference of 7431 */ 7432 meta.ref_obj_id = stack_slot_get_id(env, reg); 7433 break; 7434 } 7435 } 7436 if (i == MAX_BPF_FUNC_REG_ARGS) { 7437 verbose(env, "verifier internal error: no dynptr in bpf_dynptr_data()\n"); 7438 return -EFAULT; 7439 } 7440 break; 7441 case BPF_FUNC_user_ringbuf_drain: 7442 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 7443 set_user_ringbuf_callback_state); 7444 break; 7445 } 7446 7447 if (err) 7448 return err; 7449 7450 /* reset caller saved regs */ 7451 for (i = 0; i < CALLER_SAVED_REGS; i++) { 7452 mark_reg_not_init(env, regs, caller_saved[i]); 7453 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 7454 } 7455 7456 /* helper call returns 64-bit value. */ 7457 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 7458 7459 /* update return register (already marked as written above) */ 7460 ret_type = fn->ret_type; 7461 ret_flag = type_flag(ret_type); 7462 7463 switch (base_type(ret_type)) { 7464 case RET_INTEGER: 7465 /* sets type to SCALAR_VALUE */ 7466 mark_reg_unknown(env, regs, BPF_REG_0); 7467 break; 7468 case RET_VOID: 7469 regs[BPF_REG_0].type = NOT_INIT; 7470 break; 7471 case RET_PTR_TO_MAP_VALUE: 7472 /* There is no offset yet applied, variable or fixed */ 7473 mark_reg_known_zero(env, regs, BPF_REG_0); 7474 /* remember map_ptr, so that check_map_access() 7475 * can check 'value_size' boundary of memory access 7476 * to map element returned from bpf_map_lookup_elem() 7477 */ 7478 if (meta.map_ptr == NULL) { 7479 verbose(env, 7480 "kernel subsystem misconfigured verifier\n"); 7481 return -EINVAL; 7482 } 7483 regs[BPF_REG_0].map_ptr = meta.map_ptr; 7484 regs[BPF_REG_0].map_uid = meta.map_uid; 7485 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag; 7486 if (!type_may_be_null(ret_type) && 7487 map_value_has_spin_lock(meta.map_ptr)) { 7488 regs[BPF_REG_0].id = ++env->id_gen; 7489 } 7490 break; 7491 case RET_PTR_TO_SOCKET: 7492 mark_reg_known_zero(env, regs, BPF_REG_0); 7493 regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag; 7494 break; 7495 case RET_PTR_TO_SOCK_COMMON: 7496 mark_reg_known_zero(env, regs, BPF_REG_0); 7497 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag; 7498 break; 7499 case RET_PTR_TO_TCP_SOCK: 7500 mark_reg_known_zero(env, regs, BPF_REG_0); 7501 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag; 7502 break; 7503 case RET_PTR_TO_ALLOC_MEM: 7504 mark_reg_known_zero(env, regs, BPF_REG_0); 7505 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; 7506 regs[BPF_REG_0].mem_size = meta.mem_size; 7507 break; 7508 case RET_PTR_TO_MEM_OR_BTF_ID: 7509 { 7510 const struct btf_type *t; 7511 7512 mark_reg_known_zero(env, regs, BPF_REG_0); 7513 t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL); 7514 if (!btf_type_is_struct(t)) { 7515 u32 tsize; 7516 const struct btf_type *ret; 7517 const char *tname; 7518 7519 /* resolve the type size of ksym. */ 7520 ret = btf_resolve_size(meta.ret_btf, t, &tsize); 7521 if (IS_ERR(ret)) { 7522 tname = btf_name_by_offset(meta.ret_btf, t->name_off); 7523 verbose(env, "unable to resolve the size of type '%s': %ld\n", 7524 tname, PTR_ERR(ret)); 7525 return -EINVAL; 7526 } 7527 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; 7528 regs[BPF_REG_0].mem_size = tsize; 7529 } else { 7530 /* MEM_RDONLY may be carried from ret_flag, but it 7531 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise 7532 * it will confuse the check of PTR_TO_BTF_ID in 7533 * check_mem_access(). 7534 */ 7535 ret_flag &= ~MEM_RDONLY; 7536 7537 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; 7538 regs[BPF_REG_0].btf = meta.ret_btf; 7539 regs[BPF_REG_0].btf_id = meta.ret_btf_id; 7540 } 7541 break; 7542 } 7543 case RET_PTR_TO_BTF_ID: 7544 { 7545 struct btf *ret_btf; 7546 int ret_btf_id; 7547 7548 mark_reg_known_zero(env, regs, BPF_REG_0); 7549 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; 7550 if (func_id == BPF_FUNC_kptr_xchg) { 7551 ret_btf = meta.kptr_off_desc->kptr.btf; 7552 ret_btf_id = meta.kptr_off_desc->kptr.btf_id; 7553 } else { 7554 if (fn->ret_btf_id == BPF_PTR_POISON) { 7555 verbose(env, "verifier internal error:"); 7556 verbose(env, "func %s has non-overwritten BPF_PTR_POISON return type\n", 7557 func_id_name(func_id)); 7558 return -EINVAL; 7559 } 7560 ret_btf = btf_vmlinux; 7561 ret_btf_id = *fn->ret_btf_id; 7562 } 7563 if (ret_btf_id == 0) { 7564 verbose(env, "invalid return type %u of func %s#%d\n", 7565 base_type(ret_type), func_id_name(func_id), 7566 func_id); 7567 return -EINVAL; 7568 } 7569 regs[BPF_REG_0].btf = ret_btf; 7570 regs[BPF_REG_0].btf_id = ret_btf_id; 7571 break; 7572 } 7573 default: 7574 verbose(env, "unknown return type %u of func %s#%d\n", 7575 base_type(ret_type), func_id_name(func_id), func_id); 7576 return -EINVAL; 7577 } 7578 7579 if (type_may_be_null(regs[BPF_REG_0].type)) 7580 regs[BPF_REG_0].id = ++env->id_gen; 7581 7582 if (helper_multiple_ref_obj_use(func_id, meta.map_ptr)) { 7583 verbose(env, "verifier internal error: func %s#%d sets ref_obj_id more than once\n", 7584 func_id_name(func_id), func_id); 7585 return -EFAULT; 7586 } 7587 7588 if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) { 7589 /* For release_reference() */ 7590 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; 7591 } else if (is_acquire_function(func_id, meta.map_ptr)) { 7592 int id = acquire_reference_state(env, insn_idx); 7593 7594 if (id < 0) 7595 return id; 7596 /* For mark_ptr_or_null_reg() */ 7597 regs[BPF_REG_0].id = id; 7598 /* For release_reference() */ 7599 regs[BPF_REG_0].ref_obj_id = id; 7600 } 7601 7602 do_refine_retval_range(regs, fn->ret_type, func_id, &meta); 7603 7604 err = check_map_func_compatibility(env, meta.map_ptr, func_id); 7605 if (err) 7606 return err; 7607 7608 if ((func_id == BPF_FUNC_get_stack || 7609 func_id == BPF_FUNC_get_task_stack) && 7610 !env->prog->has_callchain_buf) { 7611 const char *err_str; 7612 7613 #ifdef CONFIG_PERF_EVENTS 7614 err = get_callchain_buffers(sysctl_perf_event_max_stack); 7615 err_str = "cannot get callchain buffer for func %s#%d\n"; 7616 #else 7617 err = -ENOTSUPP; 7618 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; 7619 #endif 7620 if (err) { 7621 verbose(env, err_str, func_id_name(func_id), func_id); 7622 return err; 7623 } 7624 7625 env->prog->has_callchain_buf = true; 7626 } 7627 7628 if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack) 7629 env->prog->call_get_stack = true; 7630 7631 if (func_id == BPF_FUNC_get_func_ip) { 7632 if (check_get_func_ip(env)) 7633 return -ENOTSUPP; 7634 env->prog->call_get_func_ip = true; 7635 } 7636 7637 if (changes_data) 7638 clear_all_pkt_pointers(env); 7639 return 0; 7640 } 7641 7642 /* mark_btf_func_reg_size() is used when the reg size is determined by 7643 * the BTF func_proto's return value size and argument. 7644 */ 7645 static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno, 7646 size_t reg_size) 7647 { 7648 struct bpf_reg_state *reg = &cur_regs(env)[regno]; 7649 7650 if (regno == BPF_REG_0) { 7651 /* Function return value */ 7652 reg->live |= REG_LIVE_WRITTEN; 7653 reg->subreg_def = reg_size == sizeof(u64) ? 7654 DEF_NOT_SUBREG : env->insn_idx + 1; 7655 } else { 7656 /* Function argument */ 7657 if (reg_size == sizeof(u64)) { 7658 mark_insn_zext(env, reg); 7659 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 7660 } else { 7661 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32); 7662 } 7663 } 7664 } 7665 7666 static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 7667 int *insn_idx_p) 7668 { 7669 const struct btf_type *t, *func, *func_proto, *ptr_type; 7670 struct bpf_reg_state *regs = cur_regs(env); 7671 struct bpf_kfunc_arg_meta meta = { 0 }; 7672 const char *func_name, *ptr_type_name; 7673 u32 i, nargs, func_id, ptr_type_id; 7674 int err, insn_idx = *insn_idx_p; 7675 const struct btf_param *args; 7676 struct btf *desc_btf; 7677 u32 *kfunc_flags; 7678 bool acq; 7679 7680 /* skip for now, but return error when we find this in fixup_kfunc_call */ 7681 if (!insn->imm) 7682 return 0; 7683 7684 desc_btf = find_kfunc_desc_btf(env, insn->off); 7685 if (IS_ERR(desc_btf)) 7686 return PTR_ERR(desc_btf); 7687 7688 func_id = insn->imm; 7689 func = btf_type_by_id(desc_btf, func_id); 7690 func_name = btf_name_by_offset(desc_btf, func->name_off); 7691 func_proto = btf_type_by_id(desc_btf, func->type); 7692 7693 kfunc_flags = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog), func_id); 7694 if (!kfunc_flags) { 7695 verbose(env, "calling kernel function %s is not allowed\n", 7696 func_name); 7697 return -EACCES; 7698 } 7699 if (*kfunc_flags & KF_DESTRUCTIVE && !capable(CAP_SYS_BOOT)) { 7700 verbose(env, "destructive kfunc calls require CAP_SYS_BOOT capabilities\n"); 7701 return -EACCES; 7702 } 7703 7704 acq = *kfunc_flags & KF_ACQUIRE; 7705 7706 meta.flags = *kfunc_flags; 7707 7708 /* Check the arguments */ 7709 err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs, &meta); 7710 if (err < 0) 7711 return err; 7712 /* In case of release function, we get register number of refcounted 7713 * PTR_TO_BTF_ID back from btf_check_kfunc_arg_match, do the release now 7714 */ 7715 if (err) { 7716 err = release_reference(env, regs[err].ref_obj_id); 7717 if (err) { 7718 verbose(env, "kfunc %s#%d reference has not been acquired before\n", 7719 func_name, func_id); 7720 return err; 7721 } 7722 } 7723 7724 for (i = 0; i < CALLER_SAVED_REGS; i++) 7725 mark_reg_not_init(env, regs, caller_saved[i]); 7726 7727 /* Check return type */ 7728 t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL); 7729 7730 if (acq && !btf_type_is_struct_ptr(desc_btf, t)) { 7731 verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n"); 7732 return -EINVAL; 7733 } 7734 7735 if (btf_type_is_scalar(t)) { 7736 mark_reg_unknown(env, regs, BPF_REG_0); 7737 mark_btf_func_reg_size(env, BPF_REG_0, t->size); 7738 } else if (btf_type_is_ptr(t)) { 7739 ptr_type = btf_type_skip_modifiers(desc_btf, t->type, 7740 &ptr_type_id); 7741 if (!btf_type_is_struct(ptr_type)) { 7742 if (!meta.r0_size) { 7743 ptr_type_name = btf_name_by_offset(desc_btf, 7744 ptr_type->name_off); 7745 verbose(env, 7746 "kernel function %s returns pointer type %s %s is not supported\n", 7747 func_name, 7748 btf_type_str(ptr_type), 7749 ptr_type_name); 7750 return -EINVAL; 7751 } 7752 7753 mark_reg_known_zero(env, regs, BPF_REG_0); 7754 regs[BPF_REG_0].type = PTR_TO_MEM; 7755 regs[BPF_REG_0].mem_size = meta.r0_size; 7756 7757 if (meta.r0_rdonly) 7758 regs[BPF_REG_0].type |= MEM_RDONLY; 7759 7760 /* Ensures we don't access the memory after a release_reference() */ 7761 if (meta.ref_obj_id) 7762 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; 7763 } else { 7764 mark_reg_known_zero(env, regs, BPF_REG_0); 7765 regs[BPF_REG_0].btf = desc_btf; 7766 regs[BPF_REG_0].type = PTR_TO_BTF_ID; 7767 regs[BPF_REG_0].btf_id = ptr_type_id; 7768 } 7769 if (*kfunc_flags & KF_RET_NULL) { 7770 regs[BPF_REG_0].type |= PTR_MAYBE_NULL; 7771 /* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */ 7772 regs[BPF_REG_0].id = ++env->id_gen; 7773 } 7774 mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *)); 7775 if (acq) { 7776 int id = acquire_reference_state(env, insn_idx); 7777 7778 if (id < 0) 7779 return id; 7780 regs[BPF_REG_0].id = id; 7781 regs[BPF_REG_0].ref_obj_id = id; 7782 } 7783 } /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */ 7784 7785 nargs = btf_type_vlen(func_proto); 7786 args = (const struct btf_param *)(func_proto + 1); 7787 for (i = 0; i < nargs; i++) { 7788 u32 regno = i + 1; 7789 7790 t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL); 7791 if (btf_type_is_ptr(t)) 7792 mark_btf_func_reg_size(env, regno, sizeof(void *)); 7793 else 7794 /* scalar. ensured by btf_check_kfunc_arg_match() */ 7795 mark_btf_func_reg_size(env, regno, t->size); 7796 } 7797 7798 return 0; 7799 } 7800 7801 static bool signed_add_overflows(s64 a, s64 b) 7802 { 7803 /* Do the add in u64, where overflow is well-defined */ 7804 s64 res = (s64)((u64)a + (u64)b); 7805 7806 if (b < 0) 7807 return res > a; 7808 return res < a; 7809 } 7810 7811 static bool signed_add32_overflows(s32 a, s32 b) 7812 { 7813 /* Do the add in u32, where overflow is well-defined */ 7814 s32 res = (s32)((u32)a + (u32)b); 7815 7816 if (b < 0) 7817 return res > a; 7818 return res < a; 7819 } 7820 7821 static bool signed_sub_overflows(s64 a, s64 b) 7822 { 7823 /* Do the sub in u64, where overflow is well-defined */ 7824 s64 res = (s64)((u64)a - (u64)b); 7825 7826 if (b < 0) 7827 return res < a; 7828 return res > a; 7829 } 7830 7831 static bool signed_sub32_overflows(s32 a, s32 b) 7832 { 7833 /* Do the sub in u32, where overflow is well-defined */ 7834 s32 res = (s32)((u32)a - (u32)b); 7835 7836 if (b < 0) 7837 return res < a; 7838 return res > a; 7839 } 7840 7841 static bool check_reg_sane_offset(struct bpf_verifier_env *env, 7842 const struct bpf_reg_state *reg, 7843 enum bpf_reg_type type) 7844 { 7845 bool known = tnum_is_const(reg->var_off); 7846 s64 val = reg->var_off.value; 7847 s64 smin = reg->smin_value; 7848 7849 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { 7850 verbose(env, "math between %s pointer and %lld is not allowed\n", 7851 reg_type_str(env, type), val); 7852 return false; 7853 } 7854 7855 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { 7856 verbose(env, "%s pointer offset %d is not allowed\n", 7857 reg_type_str(env, type), reg->off); 7858 return false; 7859 } 7860 7861 if (smin == S64_MIN) { 7862 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", 7863 reg_type_str(env, type)); 7864 return false; 7865 } 7866 7867 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { 7868 verbose(env, "value %lld makes %s pointer be out of bounds\n", 7869 smin, reg_type_str(env, type)); 7870 return false; 7871 } 7872 7873 return true; 7874 } 7875 7876 enum { 7877 REASON_BOUNDS = -1, 7878 REASON_TYPE = -2, 7879 REASON_PATHS = -3, 7880 REASON_LIMIT = -4, 7881 REASON_STACK = -5, 7882 }; 7883 7884 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, 7885 u32 *alu_limit, bool mask_to_left) 7886 { 7887 u32 max = 0, ptr_limit = 0; 7888 7889 switch (ptr_reg->type) { 7890 case PTR_TO_STACK: 7891 /* Offset 0 is out-of-bounds, but acceptable start for the 7892 * left direction, see BPF_REG_FP. Also, unknown scalar 7893 * offset where we would need to deal with min/max bounds is 7894 * currently prohibited for unprivileged. 7895 */ 7896 max = MAX_BPF_STACK + mask_to_left; 7897 ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off); 7898 break; 7899 case PTR_TO_MAP_VALUE: 7900 max = ptr_reg->map_ptr->value_size; 7901 ptr_limit = (mask_to_left ? 7902 ptr_reg->smin_value : 7903 ptr_reg->umax_value) + ptr_reg->off; 7904 break; 7905 default: 7906 return REASON_TYPE; 7907 } 7908 7909 if (ptr_limit >= max) 7910 return REASON_LIMIT; 7911 *alu_limit = ptr_limit; 7912 return 0; 7913 } 7914 7915 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, 7916 const struct bpf_insn *insn) 7917 { 7918 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K; 7919 } 7920 7921 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, 7922 u32 alu_state, u32 alu_limit) 7923 { 7924 /* If we arrived here from different branches with different 7925 * state or limits to sanitize, then this won't work. 7926 */ 7927 if (aux->alu_state && 7928 (aux->alu_state != alu_state || 7929 aux->alu_limit != alu_limit)) 7930 return REASON_PATHS; 7931 7932 /* Corresponding fixup done in do_misc_fixups(). */ 7933 aux->alu_state = alu_state; 7934 aux->alu_limit = alu_limit; 7935 return 0; 7936 } 7937 7938 static int sanitize_val_alu(struct bpf_verifier_env *env, 7939 struct bpf_insn *insn) 7940 { 7941 struct bpf_insn_aux_data *aux = cur_aux(env); 7942 7943 if (can_skip_alu_sanitation(env, insn)) 7944 return 0; 7945 7946 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); 7947 } 7948 7949 static bool sanitize_needed(u8 opcode) 7950 { 7951 return opcode == BPF_ADD || opcode == BPF_SUB; 7952 } 7953 7954 struct bpf_sanitize_info { 7955 struct bpf_insn_aux_data aux; 7956 bool mask_to_left; 7957 }; 7958 7959 static struct bpf_verifier_state * 7960 sanitize_speculative_path(struct bpf_verifier_env *env, 7961 const struct bpf_insn *insn, 7962 u32 next_idx, u32 curr_idx) 7963 { 7964 struct bpf_verifier_state *branch; 7965 struct bpf_reg_state *regs; 7966 7967 branch = push_stack(env, next_idx, curr_idx, true); 7968 if (branch && insn) { 7969 regs = branch->frame[branch->curframe]->regs; 7970 if (BPF_SRC(insn->code) == BPF_K) { 7971 mark_reg_unknown(env, regs, insn->dst_reg); 7972 } else if (BPF_SRC(insn->code) == BPF_X) { 7973 mark_reg_unknown(env, regs, insn->dst_reg); 7974 mark_reg_unknown(env, regs, insn->src_reg); 7975 } 7976 } 7977 return branch; 7978 } 7979 7980 static int sanitize_ptr_alu(struct bpf_verifier_env *env, 7981 struct bpf_insn *insn, 7982 const struct bpf_reg_state *ptr_reg, 7983 const struct bpf_reg_state *off_reg, 7984 struct bpf_reg_state *dst_reg, 7985 struct bpf_sanitize_info *info, 7986 const bool commit_window) 7987 { 7988 struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux; 7989 struct bpf_verifier_state *vstate = env->cur_state; 7990 bool off_is_imm = tnum_is_const(off_reg->var_off); 7991 bool off_is_neg = off_reg->smin_value < 0; 7992 bool ptr_is_dst_reg = ptr_reg == dst_reg; 7993 u8 opcode = BPF_OP(insn->code); 7994 u32 alu_state, alu_limit; 7995 struct bpf_reg_state tmp; 7996 bool ret; 7997 int err; 7998 7999 if (can_skip_alu_sanitation(env, insn)) 8000 return 0; 8001 8002 /* We already marked aux for masking from non-speculative 8003 * paths, thus we got here in the first place. We only care 8004 * to explore bad access from here. 8005 */ 8006 if (vstate->speculative) 8007 goto do_sim; 8008 8009 if (!commit_window) { 8010 if (!tnum_is_const(off_reg->var_off) && 8011 (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) 8012 return REASON_BOUNDS; 8013 8014 info->mask_to_left = (opcode == BPF_ADD && off_is_neg) || 8015 (opcode == BPF_SUB && !off_is_neg); 8016 } 8017 8018 err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left); 8019 if (err < 0) 8020 return err; 8021 8022 if (commit_window) { 8023 /* In commit phase we narrow the masking window based on 8024 * the observed pointer move after the simulated operation. 8025 */ 8026 alu_state = info->aux.alu_state; 8027 alu_limit = abs(info->aux.alu_limit - alu_limit); 8028 } else { 8029 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; 8030 alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0; 8031 alu_state |= ptr_is_dst_reg ? 8032 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; 8033 8034 /* Limit pruning on unknown scalars to enable deep search for 8035 * potential masking differences from other program paths. 8036 */ 8037 if (!off_is_imm) 8038 env->explore_alu_limits = true; 8039 } 8040 8041 err = update_alu_sanitation_state(aux, alu_state, alu_limit); 8042 if (err < 0) 8043 return err; 8044 do_sim: 8045 /* If we're in commit phase, we're done here given we already 8046 * pushed the truncated dst_reg into the speculative verification 8047 * stack. 8048 * 8049 * Also, when register is a known constant, we rewrite register-based 8050 * operation to immediate-based, and thus do not need masking (and as 8051 * a consequence, do not need to simulate the zero-truncation either). 8052 */ 8053 if (commit_window || off_is_imm) 8054 return 0; 8055 8056 /* Simulate and find potential out-of-bounds access under 8057 * speculative execution from truncation as a result of 8058 * masking when off was not within expected range. If off 8059 * sits in dst, then we temporarily need to move ptr there 8060 * to simulate dst (== 0) +/-= ptr. Needed, for example, 8061 * for cases where we use K-based arithmetic in one direction 8062 * and truncated reg-based in the other in order to explore 8063 * bad access. 8064 */ 8065 if (!ptr_is_dst_reg) { 8066 tmp = *dst_reg; 8067 *dst_reg = *ptr_reg; 8068 } 8069 ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, 8070 env->insn_idx); 8071 if (!ptr_is_dst_reg && ret) 8072 *dst_reg = tmp; 8073 return !ret ? REASON_STACK : 0; 8074 } 8075 8076 static void sanitize_mark_insn_seen(struct bpf_verifier_env *env) 8077 { 8078 struct bpf_verifier_state *vstate = env->cur_state; 8079 8080 /* If we simulate paths under speculation, we don't update the 8081 * insn as 'seen' such that when we verify unreachable paths in 8082 * the non-speculative domain, sanitize_dead_code() can still 8083 * rewrite/sanitize them. 8084 */ 8085 if (!vstate->speculative) 8086 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; 8087 } 8088 8089 static int sanitize_err(struct bpf_verifier_env *env, 8090 const struct bpf_insn *insn, int reason, 8091 const struct bpf_reg_state *off_reg, 8092 const struct bpf_reg_state *dst_reg) 8093 { 8094 static const char *err = "pointer arithmetic with it prohibited for !root"; 8095 const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub"; 8096 u32 dst = insn->dst_reg, src = insn->src_reg; 8097 8098 switch (reason) { 8099 case REASON_BOUNDS: 8100 verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n", 8101 off_reg == dst_reg ? dst : src, err); 8102 break; 8103 case REASON_TYPE: 8104 verbose(env, "R%d has pointer with unsupported alu operation, %s\n", 8105 off_reg == dst_reg ? src : dst, err); 8106 break; 8107 case REASON_PATHS: 8108 verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n", 8109 dst, op, err); 8110 break; 8111 case REASON_LIMIT: 8112 verbose(env, "R%d tried to %s beyond pointer bounds, %s\n", 8113 dst, op, err); 8114 break; 8115 case REASON_STACK: 8116 verbose(env, "R%d could not be pushed for speculative verification, %s\n", 8117 dst, err); 8118 break; 8119 default: 8120 verbose(env, "verifier internal error: unknown reason (%d)\n", 8121 reason); 8122 break; 8123 } 8124 8125 return -EACCES; 8126 } 8127 8128 /* check that stack access falls within stack limits and that 'reg' doesn't 8129 * have a variable offset. 8130 * 8131 * Variable offset is prohibited for unprivileged mode for simplicity since it 8132 * requires corresponding support in Spectre masking for stack ALU. See also 8133 * retrieve_ptr_limit(). 8134 * 8135 * 8136 * 'off' includes 'reg->off'. 8137 */ 8138 static int check_stack_access_for_ptr_arithmetic( 8139 struct bpf_verifier_env *env, 8140 int regno, 8141 const struct bpf_reg_state *reg, 8142 int off) 8143 { 8144 if (!tnum_is_const(reg->var_off)) { 8145 char tn_buf[48]; 8146 8147 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 8148 verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n", 8149 regno, tn_buf, off); 8150 return -EACCES; 8151 } 8152 8153 if (off >= 0 || off < -MAX_BPF_STACK) { 8154 verbose(env, "R%d stack pointer arithmetic goes out of range, " 8155 "prohibited for !root; off=%d\n", regno, off); 8156 return -EACCES; 8157 } 8158 8159 return 0; 8160 } 8161 8162 static int sanitize_check_bounds(struct bpf_verifier_env *env, 8163 const struct bpf_insn *insn, 8164 const struct bpf_reg_state *dst_reg) 8165 { 8166 u32 dst = insn->dst_reg; 8167 8168 /* For unprivileged we require that resulting offset must be in bounds 8169 * in order to be able to sanitize access later on. 8170 */ 8171 if (env->bypass_spec_v1) 8172 return 0; 8173 8174 switch (dst_reg->type) { 8175 case PTR_TO_STACK: 8176 if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg, 8177 dst_reg->off + dst_reg->var_off.value)) 8178 return -EACCES; 8179 break; 8180 case PTR_TO_MAP_VALUE: 8181 if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) { 8182 verbose(env, "R%d pointer arithmetic of map value goes out of range, " 8183 "prohibited for !root\n", dst); 8184 return -EACCES; 8185 } 8186 break; 8187 default: 8188 break; 8189 } 8190 8191 return 0; 8192 } 8193 8194 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. 8195 * Caller should also handle BPF_MOV case separately. 8196 * If we return -EACCES, caller may want to try again treating pointer as a 8197 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. 8198 */ 8199 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, 8200 struct bpf_insn *insn, 8201 const struct bpf_reg_state *ptr_reg, 8202 const struct bpf_reg_state *off_reg) 8203 { 8204 struct bpf_verifier_state *vstate = env->cur_state; 8205 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 8206 struct bpf_reg_state *regs = state->regs, *dst_reg; 8207 bool known = tnum_is_const(off_reg->var_off); 8208 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, 8209 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; 8210 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, 8211 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; 8212 struct bpf_sanitize_info info = {}; 8213 u8 opcode = BPF_OP(insn->code); 8214 u32 dst = insn->dst_reg; 8215 int ret; 8216 8217 dst_reg = ®s[dst]; 8218 8219 if ((known && (smin_val != smax_val || umin_val != umax_val)) || 8220 smin_val > smax_val || umin_val > umax_val) { 8221 /* Taint dst register if offset had invalid bounds derived from 8222 * e.g. dead branches. 8223 */ 8224 __mark_reg_unknown(env, dst_reg); 8225 return 0; 8226 } 8227 8228 if (BPF_CLASS(insn->code) != BPF_ALU64) { 8229 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ 8230 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 8231 __mark_reg_unknown(env, dst_reg); 8232 return 0; 8233 } 8234 8235 verbose(env, 8236 "R%d 32-bit pointer arithmetic prohibited\n", 8237 dst); 8238 return -EACCES; 8239 } 8240 8241 if (ptr_reg->type & PTR_MAYBE_NULL) { 8242 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", 8243 dst, reg_type_str(env, ptr_reg->type)); 8244 return -EACCES; 8245 } 8246 8247 switch (base_type(ptr_reg->type)) { 8248 case CONST_PTR_TO_MAP: 8249 /* smin_val represents the known value */ 8250 if (known && smin_val == 0 && opcode == BPF_ADD) 8251 break; 8252 fallthrough; 8253 case PTR_TO_PACKET_END: 8254 case PTR_TO_SOCKET: 8255 case PTR_TO_SOCK_COMMON: 8256 case PTR_TO_TCP_SOCK: 8257 case PTR_TO_XDP_SOCK: 8258 verbose(env, "R%d pointer arithmetic on %s prohibited\n", 8259 dst, reg_type_str(env, ptr_reg->type)); 8260 return -EACCES; 8261 default: 8262 break; 8263 } 8264 8265 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. 8266 * The id may be overwritten later if we create a new variable offset. 8267 */ 8268 dst_reg->type = ptr_reg->type; 8269 dst_reg->id = ptr_reg->id; 8270 8271 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || 8272 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) 8273 return -EINVAL; 8274 8275 /* pointer types do not carry 32-bit bounds at the moment. */ 8276 __mark_reg32_unbounded(dst_reg); 8277 8278 if (sanitize_needed(opcode)) { 8279 ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg, 8280 &info, false); 8281 if (ret < 0) 8282 return sanitize_err(env, insn, ret, off_reg, dst_reg); 8283 } 8284 8285 switch (opcode) { 8286 case BPF_ADD: 8287 /* We can take a fixed offset as long as it doesn't overflow 8288 * the s32 'off' field 8289 */ 8290 if (known && (ptr_reg->off + smin_val == 8291 (s64)(s32)(ptr_reg->off + smin_val))) { 8292 /* pointer += K. Accumulate it into fixed offset */ 8293 dst_reg->smin_value = smin_ptr; 8294 dst_reg->smax_value = smax_ptr; 8295 dst_reg->umin_value = umin_ptr; 8296 dst_reg->umax_value = umax_ptr; 8297 dst_reg->var_off = ptr_reg->var_off; 8298 dst_reg->off = ptr_reg->off + smin_val; 8299 dst_reg->raw = ptr_reg->raw; 8300 break; 8301 } 8302 /* A new variable offset is created. Note that off_reg->off 8303 * == 0, since it's a scalar. 8304 * dst_reg gets the pointer type and since some positive 8305 * integer value was added to the pointer, give it a new 'id' 8306 * if it's a PTR_TO_PACKET. 8307 * this creates a new 'base' pointer, off_reg (variable) gets 8308 * added into the variable offset, and we copy the fixed offset 8309 * from ptr_reg. 8310 */ 8311 if (signed_add_overflows(smin_ptr, smin_val) || 8312 signed_add_overflows(smax_ptr, smax_val)) { 8313 dst_reg->smin_value = S64_MIN; 8314 dst_reg->smax_value = S64_MAX; 8315 } else { 8316 dst_reg->smin_value = smin_ptr + smin_val; 8317 dst_reg->smax_value = smax_ptr + smax_val; 8318 } 8319 if (umin_ptr + umin_val < umin_ptr || 8320 umax_ptr + umax_val < umax_ptr) { 8321 dst_reg->umin_value = 0; 8322 dst_reg->umax_value = U64_MAX; 8323 } else { 8324 dst_reg->umin_value = umin_ptr + umin_val; 8325 dst_reg->umax_value = umax_ptr + umax_val; 8326 } 8327 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); 8328 dst_reg->off = ptr_reg->off; 8329 dst_reg->raw = ptr_reg->raw; 8330 if (reg_is_pkt_pointer(ptr_reg)) { 8331 dst_reg->id = ++env->id_gen; 8332 /* something was added to pkt_ptr, set range to zero */ 8333 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); 8334 } 8335 break; 8336 case BPF_SUB: 8337 if (dst_reg == off_reg) { 8338 /* scalar -= pointer. Creates an unknown scalar */ 8339 verbose(env, "R%d tried to subtract pointer from scalar\n", 8340 dst); 8341 return -EACCES; 8342 } 8343 /* We don't allow subtraction from FP, because (according to 8344 * test_verifier.c test "invalid fp arithmetic", JITs might not 8345 * be able to deal with it. 8346 */ 8347 if (ptr_reg->type == PTR_TO_STACK) { 8348 verbose(env, "R%d subtraction from stack pointer prohibited\n", 8349 dst); 8350 return -EACCES; 8351 } 8352 if (known && (ptr_reg->off - smin_val == 8353 (s64)(s32)(ptr_reg->off - smin_val))) { 8354 /* pointer -= K. Subtract it from fixed offset */ 8355 dst_reg->smin_value = smin_ptr; 8356 dst_reg->smax_value = smax_ptr; 8357 dst_reg->umin_value = umin_ptr; 8358 dst_reg->umax_value = umax_ptr; 8359 dst_reg->var_off = ptr_reg->var_off; 8360 dst_reg->id = ptr_reg->id; 8361 dst_reg->off = ptr_reg->off - smin_val; 8362 dst_reg->raw = ptr_reg->raw; 8363 break; 8364 } 8365 /* A new variable offset is created. If the subtrahend is known 8366 * nonnegative, then any reg->range we had before is still good. 8367 */ 8368 if (signed_sub_overflows(smin_ptr, smax_val) || 8369 signed_sub_overflows(smax_ptr, smin_val)) { 8370 /* Overflow possible, we know nothing */ 8371 dst_reg->smin_value = S64_MIN; 8372 dst_reg->smax_value = S64_MAX; 8373 } else { 8374 dst_reg->smin_value = smin_ptr - smax_val; 8375 dst_reg->smax_value = smax_ptr - smin_val; 8376 } 8377 if (umin_ptr < umax_val) { 8378 /* Overflow possible, we know nothing */ 8379 dst_reg->umin_value = 0; 8380 dst_reg->umax_value = U64_MAX; 8381 } else { 8382 /* Cannot overflow (as long as bounds are consistent) */ 8383 dst_reg->umin_value = umin_ptr - umax_val; 8384 dst_reg->umax_value = umax_ptr - umin_val; 8385 } 8386 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); 8387 dst_reg->off = ptr_reg->off; 8388 dst_reg->raw = ptr_reg->raw; 8389 if (reg_is_pkt_pointer(ptr_reg)) { 8390 dst_reg->id = ++env->id_gen; 8391 /* something was added to pkt_ptr, set range to zero */ 8392 if (smin_val < 0) 8393 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); 8394 } 8395 break; 8396 case BPF_AND: 8397 case BPF_OR: 8398 case BPF_XOR: 8399 /* bitwise ops on pointers are troublesome, prohibit. */ 8400 verbose(env, "R%d bitwise operator %s on pointer prohibited\n", 8401 dst, bpf_alu_string[opcode >> 4]); 8402 return -EACCES; 8403 default: 8404 /* other operators (e.g. MUL,LSH) produce non-pointer results */ 8405 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", 8406 dst, bpf_alu_string[opcode >> 4]); 8407 return -EACCES; 8408 } 8409 8410 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) 8411 return -EINVAL; 8412 reg_bounds_sync(dst_reg); 8413 if (sanitize_check_bounds(env, insn, dst_reg) < 0) 8414 return -EACCES; 8415 if (sanitize_needed(opcode)) { 8416 ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg, 8417 &info, true); 8418 if (ret < 0) 8419 return sanitize_err(env, insn, ret, off_reg, dst_reg); 8420 } 8421 8422 return 0; 8423 } 8424 8425 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg, 8426 struct bpf_reg_state *src_reg) 8427 { 8428 s32 smin_val = src_reg->s32_min_value; 8429 s32 smax_val = src_reg->s32_max_value; 8430 u32 umin_val = src_reg->u32_min_value; 8431 u32 umax_val = src_reg->u32_max_value; 8432 8433 if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) || 8434 signed_add32_overflows(dst_reg->s32_max_value, smax_val)) { 8435 dst_reg->s32_min_value = S32_MIN; 8436 dst_reg->s32_max_value = S32_MAX; 8437 } else { 8438 dst_reg->s32_min_value += smin_val; 8439 dst_reg->s32_max_value += smax_val; 8440 } 8441 if (dst_reg->u32_min_value + umin_val < umin_val || 8442 dst_reg->u32_max_value + umax_val < umax_val) { 8443 dst_reg->u32_min_value = 0; 8444 dst_reg->u32_max_value = U32_MAX; 8445 } else { 8446 dst_reg->u32_min_value += umin_val; 8447 dst_reg->u32_max_value += umax_val; 8448 } 8449 } 8450 8451 static void scalar_min_max_add(struct bpf_reg_state *dst_reg, 8452 struct bpf_reg_state *src_reg) 8453 { 8454 s64 smin_val = src_reg->smin_value; 8455 s64 smax_val = src_reg->smax_value; 8456 u64 umin_val = src_reg->umin_value; 8457 u64 umax_val = src_reg->umax_value; 8458 8459 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 8460 signed_add_overflows(dst_reg->smax_value, smax_val)) { 8461 dst_reg->smin_value = S64_MIN; 8462 dst_reg->smax_value = S64_MAX; 8463 } else { 8464 dst_reg->smin_value += smin_val; 8465 dst_reg->smax_value += smax_val; 8466 } 8467 if (dst_reg->umin_value + umin_val < umin_val || 8468 dst_reg->umax_value + umax_val < umax_val) { 8469 dst_reg->umin_value = 0; 8470 dst_reg->umax_value = U64_MAX; 8471 } else { 8472 dst_reg->umin_value += umin_val; 8473 dst_reg->umax_value += umax_val; 8474 } 8475 } 8476 8477 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg, 8478 struct bpf_reg_state *src_reg) 8479 { 8480 s32 smin_val = src_reg->s32_min_value; 8481 s32 smax_val = src_reg->s32_max_value; 8482 u32 umin_val = src_reg->u32_min_value; 8483 u32 umax_val = src_reg->u32_max_value; 8484 8485 if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) || 8486 signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) { 8487 /* Overflow possible, we know nothing */ 8488 dst_reg->s32_min_value = S32_MIN; 8489 dst_reg->s32_max_value = S32_MAX; 8490 } else { 8491 dst_reg->s32_min_value -= smax_val; 8492 dst_reg->s32_max_value -= smin_val; 8493 } 8494 if (dst_reg->u32_min_value < umax_val) { 8495 /* Overflow possible, we know nothing */ 8496 dst_reg->u32_min_value = 0; 8497 dst_reg->u32_max_value = U32_MAX; 8498 } else { 8499 /* Cannot overflow (as long as bounds are consistent) */ 8500 dst_reg->u32_min_value -= umax_val; 8501 dst_reg->u32_max_value -= umin_val; 8502 } 8503 } 8504 8505 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg, 8506 struct bpf_reg_state *src_reg) 8507 { 8508 s64 smin_val = src_reg->smin_value; 8509 s64 smax_val = src_reg->smax_value; 8510 u64 umin_val = src_reg->umin_value; 8511 u64 umax_val = src_reg->umax_value; 8512 8513 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || 8514 signed_sub_overflows(dst_reg->smax_value, smin_val)) { 8515 /* Overflow possible, we know nothing */ 8516 dst_reg->smin_value = S64_MIN; 8517 dst_reg->smax_value = S64_MAX; 8518 } else { 8519 dst_reg->smin_value -= smax_val; 8520 dst_reg->smax_value -= smin_val; 8521 } 8522 if (dst_reg->umin_value < umax_val) { 8523 /* Overflow possible, we know nothing */ 8524 dst_reg->umin_value = 0; 8525 dst_reg->umax_value = U64_MAX; 8526 } else { 8527 /* Cannot overflow (as long as bounds are consistent) */ 8528 dst_reg->umin_value -= umax_val; 8529 dst_reg->umax_value -= umin_val; 8530 } 8531 } 8532 8533 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg, 8534 struct bpf_reg_state *src_reg) 8535 { 8536 s32 smin_val = src_reg->s32_min_value; 8537 u32 umin_val = src_reg->u32_min_value; 8538 u32 umax_val = src_reg->u32_max_value; 8539 8540 if (smin_val < 0 || dst_reg->s32_min_value < 0) { 8541 /* Ain't nobody got time to multiply that sign */ 8542 __mark_reg32_unbounded(dst_reg); 8543 return; 8544 } 8545 /* Both values are positive, so we can work with unsigned and 8546 * copy the result to signed (unless it exceeds S32_MAX). 8547 */ 8548 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) { 8549 /* Potential overflow, we know nothing */ 8550 __mark_reg32_unbounded(dst_reg); 8551 return; 8552 } 8553 dst_reg->u32_min_value *= umin_val; 8554 dst_reg->u32_max_value *= umax_val; 8555 if (dst_reg->u32_max_value > S32_MAX) { 8556 /* Overflow possible, we know nothing */ 8557 dst_reg->s32_min_value = S32_MIN; 8558 dst_reg->s32_max_value = S32_MAX; 8559 } else { 8560 dst_reg->s32_min_value = dst_reg->u32_min_value; 8561 dst_reg->s32_max_value = dst_reg->u32_max_value; 8562 } 8563 } 8564 8565 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg, 8566 struct bpf_reg_state *src_reg) 8567 { 8568 s64 smin_val = src_reg->smin_value; 8569 u64 umin_val = src_reg->umin_value; 8570 u64 umax_val = src_reg->umax_value; 8571 8572 if (smin_val < 0 || dst_reg->smin_value < 0) { 8573 /* Ain't nobody got time to multiply that sign */ 8574 __mark_reg64_unbounded(dst_reg); 8575 return; 8576 } 8577 /* Both values are positive, so we can work with unsigned and 8578 * copy the result to signed (unless it exceeds S64_MAX). 8579 */ 8580 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { 8581 /* Potential overflow, we know nothing */ 8582 __mark_reg64_unbounded(dst_reg); 8583 return; 8584 } 8585 dst_reg->umin_value *= umin_val; 8586 dst_reg->umax_value *= umax_val; 8587 if (dst_reg->umax_value > S64_MAX) { 8588 /* Overflow possible, we know nothing */ 8589 dst_reg->smin_value = S64_MIN; 8590 dst_reg->smax_value = S64_MAX; 8591 } else { 8592 dst_reg->smin_value = dst_reg->umin_value; 8593 dst_reg->smax_value = dst_reg->umax_value; 8594 } 8595 } 8596 8597 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg, 8598 struct bpf_reg_state *src_reg) 8599 { 8600 bool src_known = tnum_subreg_is_const(src_reg->var_off); 8601 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 8602 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 8603 s32 smin_val = src_reg->s32_min_value; 8604 u32 umax_val = src_reg->u32_max_value; 8605 8606 if (src_known && dst_known) { 8607 __mark_reg32_known(dst_reg, var32_off.value); 8608 return; 8609 } 8610 8611 /* We get our minimum from the var_off, since that's inherently 8612 * bitwise. Our maximum is the minimum of the operands' maxima. 8613 */ 8614 dst_reg->u32_min_value = var32_off.value; 8615 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val); 8616 if (dst_reg->s32_min_value < 0 || smin_val < 0) { 8617 /* Lose signed bounds when ANDing negative numbers, 8618 * ain't nobody got time for that. 8619 */ 8620 dst_reg->s32_min_value = S32_MIN; 8621 dst_reg->s32_max_value = S32_MAX; 8622 } else { 8623 /* ANDing two positives gives a positive, so safe to 8624 * cast result into s64. 8625 */ 8626 dst_reg->s32_min_value = dst_reg->u32_min_value; 8627 dst_reg->s32_max_value = dst_reg->u32_max_value; 8628 } 8629 } 8630 8631 static void scalar_min_max_and(struct bpf_reg_state *dst_reg, 8632 struct bpf_reg_state *src_reg) 8633 { 8634 bool src_known = tnum_is_const(src_reg->var_off); 8635 bool dst_known = tnum_is_const(dst_reg->var_off); 8636 s64 smin_val = src_reg->smin_value; 8637 u64 umax_val = src_reg->umax_value; 8638 8639 if (src_known && dst_known) { 8640 __mark_reg_known(dst_reg, dst_reg->var_off.value); 8641 return; 8642 } 8643 8644 /* We get our minimum from the var_off, since that's inherently 8645 * bitwise. Our maximum is the minimum of the operands' maxima. 8646 */ 8647 dst_reg->umin_value = dst_reg->var_off.value; 8648 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); 8649 if (dst_reg->smin_value < 0 || smin_val < 0) { 8650 /* Lose signed bounds when ANDing negative numbers, 8651 * ain't nobody got time for that. 8652 */ 8653 dst_reg->smin_value = S64_MIN; 8654 dst_reg->smax_value = S64_MAX; 8655 } else { 8656 /* ANDing two positives gives a positive, so safe to 8657 * cast result into s64. 8658 */ 8659 dst_reg->smin_value = dst_reg->umin_value; 8660 dst_reg->smax_value = dst_reg->umax_value; 8661 } 8662 /* We may learn something more from the var_off */ 8663 __update_reg_bounds(dst_reg); 8664 } 8665 8666 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg, 8667 struct bpf_reg_state *src_reg) 8668 { 8669 bool src_known = tnum_subreg_is_const(src_reg->var_off); 8670 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 8671 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 8672 s32 smin_val = src_reg->s32_min_value; 8673 u32 umin_val = src_reg->u32_min_value; 8674 8675 if (src_known && dst_known) { 8676 __mark_reg32_known(dst_reg, var32_off.value); 8677 return; 8678 } 8679 8680 /* We get our maximum from the var_off, and our minimum is the 8681 * maximum of the operands' minima 8682 */ 8683 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val); 8684 dst_reg->u32_max_value = var32_off.value | var32_off.mask; 8685 if (dst_reg->s32_min_value < 0 || smin_val < 0) { 8686 /* Lose signed bounds when ORing negative numbers, 8687 * ain't nobody got time for that. 8688 */ 8689 dst_reg->s32_min_value = S32_MIN; 8690 dst_reg->s32_max_value = S32_MAX; 8691 } else { 8692 /* ORing two positives gives a positive, so safe to 8693 * cast result into s64. 8694 */ 8695 dst_reg->s32_min_value = dst_reg->u32_min_value; 8696 dst_reg->s32_max_value = dst_reg->u32_max_value; 8697 } 8698 } 8699 8700 static void scalar_min_max_or(struct bpf_reg_state *dst_reg, 8701 struct bpf_reg_state *src_reg) 8702 { 8703 bool src_known = tnum_is_const(src_reg->var_off); 8704 bool dst_known = tnum_is_const(dst_reg->var_off); 8705 s64 smin_val = src_reg->smin_value; 8706 u64 umin_val = src_reg->umin_value; 8707 8708 if (src_known && dst_known) { 8709 __mark_reg_known(dst_reg, dst_reg->var_off.value); 8710 return; 8711 } 8712 8713 /* We get our maximum from the var_off, and our minimum is the 8714 * maximum of the operands' minima 8715 */ 8716 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); 8717 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; 8718 if (dst_reg->smin_value < 0 || smin_val < 0) { 8719 /* Lose signed bounds when ORing negative numbers, 8720 * ain't nobody got time for that. 8721 */ 8722 dst_reg->smin_value = S64_MIN; 8723 dst_reg->smax_value = S64_MAX; 8724 } else { 8725 /* ORing two positives gives a positive, so safe to 8726 * cast result into s64. 8727 */ 8728 dst_reg->smin_value = dst_reg->umin_value; 8729 dst_reg->smax_value = dst_reg->umax_value; 8730 } 8731 /* We may learn something more from the var_off */ 8732 __update_reg_bounds(dst_reg); 8733 } 8734 8735 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg, 8736 struct bpf_reg_state *src_reg) 8737 { 8738 bool src_known = tnum_subreg_is_const(src_reg->var_off); 8739 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 8740 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 8741 s32 smin_val = src_reg->s32_min_value; 8742 8743 if (src_known && dst_known) { 8744 __mark_reg32_known(dst_reg, var32_off.value); 8745 return; 8746 } 8747 8748 /* We get both minimum and maximum from the var32_off. */ 8749 dst_reg->u32_min_value = var32_off.value; 8750 dst_reg->u32_max_value = var32_off.value | var32_off.mask; 8751 8752 if (dst_reg->s32_min_value >= 0 && smin_val >= 0) { 8753 /* XORing two positive sign numbers gives a positive, 8754 * so safe to cast u32 result into s32. 8755 */ 8756 dst_reg->s32_min_value = dst_reg->u32_min_value; 8757 dst_reg->s32_max_value = dst_reg->u32_max_value; 8758 } else { 8759 dst_reg->s32_min_value = S32_MIN; 8760 dst_reg->s32_max_value = S32_MAX; 8761 } 8762 } 8763 8764 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg, 8765 struct bpf_reg_state *src_reg) 8766 { 8767 bool src_known = tnum_is_const(src_reg->var_off); 8768 bool dst_known = tnum_is_const(dst_reg->var_off); 8769 s64 smin_val = src_reg->smin_value; 8770 8771 if (src_known && dst_known) { 8772 /* dst_reg->var_off.value has been updated earlier */ 8773 __mark_reg_known(dst_reg, dst_reg->var_off.value); 8774 return; 8775 } 8776 8777 /* We get both minimum and maximum from the var_off. */ 8778 dst_reg->umin_value = dst_reg->var_off.value; 8779 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; 8780 8781 if (dst_reg->smin_value >= 0 && smin_val >= 0) { 8782 /* XORing two positive sign numbers gives a positive, 8783 * so safe to cast u64 result into s64. 8784 */ 8785 dst_reg->smin_value = dst_reg->umin_value; 8786 dst_reg->smax_value = dst_reg->umax_value; 8787 } else { 8788 dst_reg->smin_value = S64_MIN; 8789 dst_reg->smax_value = S64_MAX; 8790 } 8791 8792 __update_reg_bounds(dst_reg); 8793 } 8794 8795 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, 8796 u64 umin_val, u64 umax_val) 8797 { 8798 /* We lose all sign bit information (except what we can pick 8799 * up from var_off) 8800 */ 8801 dst_reg->s32_min_value = S32_MIN; 8802 dst_reg->s32_max_value = S32_MAX; 8803 /* If we might shift our top bit out, then we know nothing */ 8804 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) { 8805 dst_reg->u32_min_value = 0; 8806 dst_reg->u32_max_value = U32_MAX; 8807 } else { 8808 dst_reg->u32_min_value <<= umin_val; 8809 dst_reg->u32_max_value <<= umax_val; 8810 } 8811 } 8812 8813 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, 8814 struct bpf_reg_state *src_reg) 8815 { 8816 u32 umax_val = src_reg->u32_max_value; 8817 u32 umin_val = src_reg->u32_min_value; 8818 /* u32 alu operation will zext upper bits */ 8819 struct tnum subreg = tnum_subreg(dst_reg->var_off); 8820 8821 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); 8822 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val)); 8823 /* Not required but being careful mark reg64 bounds as unknown so 8824 * that we are forced to pick them up from tnum and zext later and 8825 * if some path skips this step we are still safe. 8826 */ 8827 __mark_reg64_unbounded(dst_reg); 8828 __update_reg32_bounds(dst_reg); 8829 } 8830 8831 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg, 8832 u64 umin_val, u64 umax_val) 8833 { 8834 /* Special case <<32 because it is a common compiler pattern to sign 8835 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are 8836 * positive we know this shift will also be positive so we can track 8837 * bounds correctly. Otherwise we lose all sign bit information except 8838 * what we can pick up from var_off. Perhaps we can generalize this 8839 * later to shifts of any length. 8840 */ 8841 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0) 8842 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32; 8843 else 8844 dst_reg->smax_value = S64_MAX; 8845 8846 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0) 8847 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32; 8848 else 8849 dst_reg->smin_value = S64_MIN; 8850 8851 /* If we might shift our top bit out, then we know nothing */ 8852 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { 8853 dst_reg->umin_value = 0; 8854 dst_reg->umax_value = U64_MAX; 8855 } else { 8856 dst_reg->umin_value <<= umin_val; 8857 dst_reg->umax_value <<= umax_val; 8858 } 8859 } 8860 8861 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg, 8862 struct bpf_reg_state *src_reg) 8863 { 8864 u64 umax_val = src_reg->umax_value; 8865 u64 umin_val = src_reg->umin_value; 8866 8867 /* scalar64 calc uses 32bit unshifted bounds so must be called first */ 8868 __scalar64_min_max_lsh(dst_reg, umin_val, umax_val); 8869 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); 8870 8871 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); 8872 /* We may learn something more from the var_off */ 8873 __update_reg_bounds(dst_reg); 8874 } 8875 8876 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg, 8877 struct bpf_reg_state *src_reg) 8878 { 8879 struct tnum subreg = tnum_subreg(dst_reg->var_off); 8880 u32 umax_val = src_reg->u32_max_value; 8881 u32 umin_val = src_reg->u32_min_value; 8882 8883 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 8884 * be negative, then either: 8885 * 1) src_reg might be zero, so the sign bit of the result is 8886 * unknown, so we lose our signed bounds 8887 * 2) it's known negative, thus the unsigned bounds capture the 8888 * signed bounds 8889 * 3) the signed bounds cross zero, so they tell us nothing 8890 * about the result 8891 * If the value in dst_reg is known nonnegative, then again the 8892 * unsigned bounds capture the signed bounds. 8893 * Thus, in all cases it suffices to blow away our signed bounds 8894 * and rely on inferring new ones from the unsigned bounds and 8895 * var_off of the result. 8896 */ 8897 dst_reg->s32_min_value = S32_MIN; 8898 dst_reg->s32_max_value = S32_MAX; 8899 8900 dst_reg->var_off = tnum_rshift(subreg, umin_val); 8901 dst_reg->u32_min_value >>= umax_val; 8902 dst_reg->u32_max_value >>= umin_val; 8903 8904 __mark_reg64_unbounded(dst_reg); 8905 __update_reg32_bounds(dst_reg); 8906 } 8907 8908 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg, 8909 struct bpf_reg_state *src_reg) 8910 { 8911 u64 umax_val = src_reg->umax_value; 8912 u64 umin_val = src_reg->umin_value; 8913 8914 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 8915 * be negative, then either: 8916 * 1) src_reg might be zero, so the sign bit of the result is 8917 * unknown, so we lose our signed bounds 8918 * 2) it's known negative, thus the unsigned bounds capture the 8919 * signed bounds 8920 * 3) the signed bounds cross zero, so they tell us nothing 8921 * about the result 8922 * If the value in dst_reg is known nonnegative, then again the 8923 * unsigned bounds capture the signed bounds. 8924 * Thus, in all cases it suffices to blow away our signed bounds 8925 * and rely on inferring new ones from the unsigned bounds and 8926 * var_off of the result. 8927 */ 8928 dst_reg->smin_value = S64_MIN; 8929 dst_reg->smax_value = S64_MAX; 8930 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); 8931 dst_reg->umin_value >>= umax_val; 8932 dst_reg->umax_value >>= umin_val; 8933 8934 /* Its not easy to operate on alu32 bounds here because it depends 8935 * on bits being shifted in. Take easy way out and mark unbounded 8936 * so we can recalculate later from tnum. 8937 */ 8938 __mark_reg32_unbounded(dst_reg); 8939 __update_reg_bounds(dst_reg); 8940 } 8941 8942 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg, 8943 struct bpf_reg_state *src_reg) 8944 { 8945 u64 umin_val = src_reg->u32_min_value; 8946 8947 /* Upon reaching here, src_known is true and 8948 * umax_val is equal to umin_val. 8949 */ 8950 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val); 8951 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val); 8952 8953 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32); 8954 8955 /* blow away the dst_reg umin_value/umax_value and rely on 8956 * dst_reg var_off to refine the result. 8957 */ 8958 dst_reg->u32_min_value = 0; 8959 dst_reg->u32_max_value = U32_MAX; 8960 8961 __mark_reg64_unbounded(dst_reg); 8962 __update_reg32_bounds(dst_reg); 8963 } 8964 8965 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg, 8966 struct bpf_reg_state *src_reg) 8967 { 8968 u64 umin_val = src_reg->umin_value; 8969 8970 /* Upon reaching here, src_known is true and umax_val is equal 8971 * to umin_val. 8972 */ 8973 dst_reg->smin_value >>= umin_val; 8974 dst_reg->smax_value >>= umin_val; 8975 8976 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64); 8977 8978 /* blow away the dst_reg umin_value/umax_value and rely on 8979 * dst_reg var_off to refine the result. 8980 */ 8981 dst_reg->umin_value = 0; 8982 dst_reg->umax_value = U64_MAX; 8983 8984 /* Its not easy to operate on alu32 bounds here because it depends 8985 * on bits being shifted in from upper 32-bits. Take easy way out 8986 * and mark unbounded so we can recalculate later from tnum. 8987 */ 8988 __mark_reg32_unbounded(dst_reg); 8989 __update_reg_bounds(dst_reg); 8990 } 8991 8992 /* WARNING: This function does calculations on 64-bit values, but the actual 8993 * execution may occur on 32-bit values. Therefore, things like bitshifts 8994 * need extra checks in the 32-bit case. 8995 */ 8996 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 8997 struct bpf_insn *insn, 8998 struct bpf_reg_state *dst_reg, 8999 struct bpf_reg_state src_reg) 9000 { 9001 struct bpf_reg_state *regs = cur_regs(env); 9002 u8 opcode = BPF_OP(insn->code); 9003 bool src_known; 9004 s64 smin_val, smax_val; 9005 u64 umin_val, umax_val; 9006 s32 s32_min_val, s32_max_val; 9007 u32 u32_min_val, u32_max_val; 9008 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 9009 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); 9010 int ret; 9011 9012 smin_val = src_reg.smin_value; 9013 smax_val = src_reg.smax_value; 9014 umin_val = src_reg.umin_value; 9015 umax_val = src_reg.umax_value; 9016 9017 s32_min_val = src_reg.s32_min_value; 9018 s32_max_val = src_reg.s32_max_value; 9019 u32_min_val = src_reg.u32_min_value; 9020 u32_max_val = src_reg.u32_max_value; 9021 9022 if (alu32) { 9023 src_known = tnum_subreg_is_const(src_reg.var_off); 9024 if ((src_known && 9025 (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) || 9026 s32_min_val > s32_max_val || u32_min_val > u32_max_val) { 9027 /* Taint dst register if offset had invalid bounds 9028 * derived from e.g. dead branches. 9029 */ 9030 __mark_reg_unknown(env, dst_reg); 9031 return 0; 9032 } 9033 } else { 9034 src_known = tnum_is_const(src_reg.var_off); 9035 if ((src_known && 9036 (smin_val != smax_val || umin_val != umax_val)) || 9037 smin_val > smax_val || umin_val > umax_val) { 9038 /* Taint dst register if offset had invalid bounds 9039 * derived from e.g. dead branches. 9040 */ 9041 __mark_reg_unknown(env, dst_reg); 9042 return 0; 9043 } 9044 } 9045 9046 if (!src_known && 9047 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { 9048 __mark_reg_unknown(env, dst_reg); 9049 return 0; 9050 } 9051 9052 if (sanitize_needed(opcode)) { 9053 ret = sanitize_val_alu(env, insn); 9054 if (ret < 0) 9055 return sanitize_err(env, insn, ret, NULL, NULL); 9056 } 9057 9058 /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops. 9059 * There are two classes of instructions: The first class we track both 9060 * alu32 and alu64 sign/unsigned bounds independently this provides the 9061 * greatest amount of precision when alu operations are mixed with jmp32 9062 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD, 9063 * and BPF_OR. This is possible because these ops have fairly easy to 9064 * understand and calculate behavior in both 32-bit and 64-bit alu ops. 9065 * See alu32 verifier tests for examples. The second class of 9066 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy 9067 * with regards to tracking sign/unsigned bounds because the bits may 9068 * cross subreg boundaries in the alu64 case. When this happens we mark 9069 * the reg unbounded in the subreg bound space and use the resulting 9070 * tnum to calculate an approximation of the sign/unsigned bounds. 9071 */ 9072 switch (opcode) { 9073 case BPF_ADD: 9074 scalar32_min_max_add(dst_reg, &src_reg); 9075 scalar_min_max_add(dst_reg, &src_reg); 9076 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); 9077 break; 9078 case BPF_SUB: 9079 scalar32_min_max_sub(dst_reg, &src_reg); 9080 scalar_min_max_sub(dst_reg, &src_reg); 9081 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); 9082 break; 9083 case BPF_MUL: 9084 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); 9085 scalar32_min_max_mul(dst_reg, &src_reg); 9086 scalar_min_max_mul(dst_reg, &src_reg); 9087 break; 9088 case BPF_AND: 9089 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); 9090 scalar32_min_max_and(dst_reg, &src_reg); 9091 scalar_min_max_and(dst_reg, &src_reg); 9092 break; 9093 case BPF_OR: 9094 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); 9095 scalar32_min_max_or(dst_reg, &src_reg); 9096 scalar_min_max_or(dst_reg, &src_reg); 9097 break; 9098 case BPF_XOR: 9099 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off); 9100 scalar32_min_max_xor(dst_reg, &src_reg); 9101 scalar_min_max_xor(dst_reg, &src_reg); 9102 break; 9103 case BPF_LSH: 9104 if (umax_val >= insn_bitness) { 9105 /* Shifts greater than 31 or 63 are undefined. 9106 * This includes shifts by a negative number. 9107 */ 9108 mark_reg_unknown(env, regs, insn->dst_reg); 9109 break; 9110 } 9111 if (alu32) 9112 scalar32_min_max_lsh(dst_reg, &src_reg); 9113 else 9114 scalar_min_max_lsh(dst_reg, &src_reg); 9115 break; 9116 case BPF_RSH: 9117 if (umax_val >= insn_bitness) { 9118 /* Shifts greater than 31 or 63 are undefined. 9119 * This includes shifts by a negative number. 9120 */ 9121 mark_reg_unknown(env, regs, insn->dst_reg); 9122 break; 9123 } 9124 if (alu32) 9125 scalar32_min_max_rsh(dst_reg, &src_reg); 9126 else 9127 scalar_min_max_rsh(dst_reg, &src_reg); 9128 break; 9129 case BPF_ARSH: 9130 if (umax_val >= insn_bitness) { 9131 /* Shifts greater than 31 or 63 are undefined. 9132 * This includes shifts by a negative number. 9133 */ 9134 mark_reg_unknown(env, regs, insn->dst_reg); 9135 break; 9136 } 9137 if (alu32) 9138 scalar32_min_max_arsh(dst_reg, &src_reg); 9139 else 9140 scalar_min_max_arsh(dst_reg, &src_reg); 9141 break; 9142 default: 9143 mark_reg_unknown(env, regs, insn->dst_reg); 9144 break; 9145 } 9146 9147 /* ALU32 ops are zero extended into 64bit register */ 9148 if (alu32) 9149 zext_32_to_64(dst_reg); 9150 reg_bounds_sync(dst_reg); 9151 return 0; 9152 } 9153 9154 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max 9155 * and var_off. 9156 */ 9157 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, 9158 struct bpf_insn *insn) 9159 { 9160 struct bpf_verifier_state *vstate = env->cur_state; 9161 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 9162 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; 9163 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; 9164 u8 opcode = BPF_OP(insn->code); 9165 int err; 9166 9167 dst_reg = ®s[insn->dst_reg]; 9168 src_reg = NULL; 9169 if (dst_reg->type != SCALAR_VALUE) 9170 ptr_reg = dst_reg; 9171 else 9172 /* Make sure ID is cleared otherwise dst_reg min/max could be 9173 * incorrectly propagated into other registers by find_equal_scalars() 9174 */ 9175 dst_reg->id = 0; 9176 if (BPF_SRC(insn->code) == BPF_X) { 9177 src_reg = ®s[insn->src_reg]; 9178 if (src_reg->type != SCALAR_VALUE) { 9179 if (dst_reg->type != SCALAR_VALUE) { 9180 /* Combining two pointers by any ALU op yields 9181 * an arbitrary scalar. Disallow all math except 9182 * pointer subtraction 9183 */ 9184 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 9185 mark_reg_unknown(env, regs, insn->dst_reg); 9186 return 0; 9187 } 9188 verbose(env, "R%d pointer %s pointer prohibited\n", 9189 insn->dst_reg, 9190 bpf_alu_string[opcode >> 4]); 9191 return -EACCES; 9192 } else { 9193 /* scalar += pointer 9194 * This is legal, but we have to reverse our 9195 * src/dest handling in computing the range 9196 */ 9197 err = mark_chain_precision(env, insn->dst_reg); 9198 if (err) 9199 return err; 9200 return adjust_ptr_min_max_vals(env, insn, 9201 src_reg, dst_reg); 9202 } 9203 } else if (ptr_reg) { 9204 /* pointer += scalar */ 9205 err = mark_chain_precision(env, insn->src_reg); 9206 if (err) 9207 return err; 9208 return adjust_ptr_min_max_vals(env, insn, 9209 dst_reg, src_reg); 9210 } 9211 } else { 9212 /* Pretend the src is a reg with a known value, since we only 9213 * need to be able to read from this state. 9214 */ 9215 off_reg.type = SCALAR_VALUE; 9216 __mark_reg_known(&off_reg, insn->imm); 9217 src_reg = &off_reg; 9218 if (ptr_reg) /* pointer += K */ 9219 return adjust_ptr_min_max_vals(env, insn, 9220 ptr_reg, src_reg); 9221 } 9222 9223 /* Got here implies adding two SCALAR_VALUEs */ 9224 if (WARN_ON_ONCE(ptr_reg)) { 9225 print_verifier_state(env, state, true); 9226 verbose(env, "verifier internal error: unexpected ptr_reg\n"); 9227 return -EINVAL; 9228 } 9229 if (WARN_ON(!src_reg)) { 9230 print_verifier_state(env, state, true); 9231 verbose(env, "verifier internal error: no src_reg\n"); 9232 return -EINVAL; 9233 } 9234 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); 9235 } 9236 9237 /* check validity of 32-bit and 64-bit arithmetic operations */ 9238 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) 9239 { 9240 struct bpf_reg_state *regs = cur_regs(env); 9241 u8 opcode = BPF_OP(insn->code); 9242 int err; 9243 9244 if (opcode == BPF_END || opcode == BPF_NEG) { 9245 if (opcode == BPF_NEG) { 9246 if (BPF_SRC(insn->code) != BPF_K || 9247 insn->src_reg != BPF_REG_0 || 9248 insn->off != 0 || insn->imm != 0) { 9249 verbose(env, "BPF_NEG uses reserved fields\n"); 9250 return -EINVAL; 9251 } 9252 } else { 9253 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 9254 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || 9255 BPF_CLASS(insn->code) == BPF_ALU64) { 9256 verbose(env, "BPF_END uses reserved fields\n"); 9257 return -EINVAL; 9258 } 9259 } 9260 9261 /* check src operand */ 9262 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 9263 if (err) 9264 return err; 9265 9266 if (is_pointer_value(env, insn->dst_reg)) { 9267 verbose(env, "R%d pointer arithmetic prohibited\n", 9268 insn->dst_reg); 9269 return -EACCES; 9270 } 9271 9272 /* check dest operand */ 9273 err = check_reg_arg(env, insn->dst_reg, DST_OP); 9274 if (err) 9275 return err; 9276 9277 } else if (opcode == BPF_MOV) { 9278 9279 if (BPF_SRC(insn->code) == BPF_X) { 9280 if (insn->imm != 0 || insn->off != 0) { 9281 verbose(env, "BPF_MOV uses reserved fields\n"); 9282 return -EINVAL; 9283 } 9284 9285 /* check src operand */ 9286 err = check_reg_arg(env, insn->src_reg, SRC_OP); 9287 if (err) 9288 return err; 9289 } else { 9290 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 9291 verbose(env, "BPF_MOV uses reserved fields\n"); 9292 return -EINVAL; 9293 } 9294 } 9295 9296 /* check dest operand, mark as required later */ 9297 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 9298 if (err) 9299 return err; 9300 9301 if (BPF_SRC(insn->code) == BPF_X) { 9302 struct bpf_reg_state *src_reg = regs + insn->src_reg; 9303 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; 9304 9305 if (BPF_CLASS(insn->code) == BPF_ALU64) { 9306 /* case: R1 = R2 9307 * copy register state to dest reg 9308 */ 9309 if (src_reg->type == SCALAR_VALUE && !src_reg->id) 9310 /* Assign src and dst registers the same ID 9311 * that will be used by find_equal_scalars() 9312 * to propagate min/max range. 9313 */ 9314 src_reg->id = ++env->id_gen; 9315 *dst_reg = *src_reg; 9316 dst_reg->live |= REG_LIVE_WRITTEN; 9317 dst_reg->subreg_def = DEF_NOT_SUBREG; 9318 } else { 9319 /* R1 = (u32) R2 */ 9320 if (is_pointer_value(env, insn->src_reg)) { 9321 verbose(env, 9322 "R%d partial copy of pointer\n", 9323 insn->src_reg); 9324 return -EACCES; 9325 } else if (src_reg->type == SCALAR_VALUE) { 9326 *dst_reg = *src_reg; 9327 /* Make sure ID is cleared otherwise 9328 * dst_reg min/max could be incorrectly 9329 * propagated into src_reg by find_equal_scalars() 9330 */ 9331 dst_reg->id = 0; 9332 dst_reg->live |= REG_LIVE_WRITTEN; 9333 dst_reg->subreg_def = env->insn_idx + 1; 9334 } else { 9335 mark_reg_unknown(env, regs, 9336 insn->dst_reg); 9337 } 9338 zext_32_to_64(dst_reg); 9339 reg_bounds_sync(dst_reg); 9340 } 9341 } else { 9342 /* case: R = imm 9343 * remember the value we stored into this reg 9344 */ 9345 /* clear any state __mark_reg_known doesn't set */ 9346 mark_reg_unknown(env, regs, insn->dst_reg); 9347 regs[insn->dst_reg].type = SCALAR_VALUE; 9348 if (BPF_CLASS(insn->code) == BPF_ALU64) { 9349 __mark_reg_known(regs + insn->dst_reg, 9350 insn->imm); 9351 } else { 9352 __mark_reg_known(regs + insn->dst_reg, 9353 (u32)insn->imm); 9354 } 9355 } 9356 9357 } else if (opcode > BPF_END) { 9358 verbose(env, "invalid BPF_ALU opcode %x\n", opcode); 9359 return -EINVAL; 9360 9361 } else { /* all other ALU ops: and, sub, xor, add, ... */ 9362 9363 if (BPF_SRC(insn->code) == BPF_X) { 9364 if (insn->imm != 0 || insn->off != 0) { 9365 verbose(env, "BPF_ALU uses reserved fields\n"); 9366 return -EINVAL; 9367 } 9368 /* check src1 operand */ 9369 err = check_reg_arg(env, insn->src_reg, SRC_OP); 9370 if (err) 9371 return err; 9372 } else { 9373 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 9374 verbose(env, "BPF_ALU uses reserved fields\n"); 9375 return -EINVAL; 9376 } 9377 } 9378 9379 /* check src2 operand */ 9380 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 9381 if (err) 9382 return err; 9383 9384 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 9385 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 9386 verbose(env, "div by zero\n"); 9387 return -EINVAL; 9388 } 9389 9390 if ((opcode == BPF_LSH || opcode == BPF_RSH || 9391 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 9392 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 9393 9394 if (insn->imm < 0 || insn->imm >= size) { 9395 verbose(env, "invalid shift %d\n", insn->imm); 9396 return -EINVAL; 9397 } 9398 } 9399 9400 /* check dest operand */ 9401 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 9402 if (err) 9403 return err; 9404 9405 return adjust_reg_min_max_vals(env, insn); 9406 } 9407 9408 return 0; 9409 } 9410 9411 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, 9412 struct bpf_reg_state *dst_reg, 9413 enum bpf_reg_type type, 9414 bool range_right_open) 9415 { 9416 struct bpf_func_state *state; 9417 struct bpf_reg_state *reg; 9418 int new_range; 9419 9420 if (dst_reg->off < 0 || 9421 (dst_reg->off == 0 && range_right_open)) 9422 /* This doesn't give us any range */ 9423 return; 9424 9425 if (dst_reg->umax_value > MAX_PACKET_OFF || 9426 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) 9427 /* Risk of overflow. For instance, ptr + (1<<63) may be less 9428 * than pkt_end, but that's because it's also less than pkt. 9429 */ 9430 return; 9431 9432 new_range = dst_reg->off; 9433 if (range_right_open) 9434 new_range++; 9435 9436 /* Examples for register markings: 9437 * 9438 * pkt_data in dst register: 9439 * 9440 * r2 = r3; 9441 * r2 += 8; 9442 * if (r2 > pkt_end) goto <handle exception> 9443 * <access okay> 9444 * 9445 * r2 = r3; 9446 * r2 += 8; 9447 * if (r2 < pkt_end) goto <access okay> 9448 * <handle exception> 9449 * 9450 * Where: 9451 * r2 == dst_reg, pkt_end == src_reg 9452 * r2=pkt(id=n,off=8,r=0) 9453 * r3=pkt(id=n,off=0,r=0) 9454 * 9455 * pkt_data in src register: 9456 * 9457 * r2 = r3; 9458 * r2 += 8; 9459 * if (pkt_end >= r2) goto <access okay> 9460 * <handle exception> 9461 * 9462 * r2 = r3; 9463 * r2 += 8; 9464 * if (pkt_end <= r2) goto <handle exception> 9465 * <access okay> 9466 * 9467 * Where: 9468 * pkt_end == dst_reg, r2 == src_reg 9469 * r2=pkt(id=n,off=8,r=0) 9470 * r3=pkt(id=n,off=0,r=0) 9471 * 9472 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 9473 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) 9474 * and [r3, r3 + 8-1) respectively is safe to access depending on 9475 * the check. 9476 */ 9477 9478 /* If our ids match, then we must have the same max_value. And we 9479 * don't care about the other reg's fixed offset, since if it's too big 9480 * the range won't allow anything. 9481 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. 9482 */ 9483 bpf_for_each_reg_in_vstate(vstate, state, reg, ({ 9484 if (reg->type == type && reg->id == dst_reg->id) 9485 /* keep the maximum range already checked */ 9486 reg->range = max(reg->range, new_range); 9487 })); 9488 } 9489 9490 static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode) 9491 { 9492 struct tnum subreg = tnum_subreg(reg->var_off); 9493 s32 sval = (s32)val; 9494 9495 switch (opcode) { 9496 case BPF_JEQ: 9497 if (tnum_is_const(subreg)) 9498 return !!tnum_equals_const(subreg, val); 9499 break; 9500 case BPF_JNE: 9501 if (tnum_is_const(subreg)) 9502 return !tnum_equals_const(subreg, val); 9503 break; 9504 case BPF_JSET: 9505 if ((~subreg.mask & subreg.value) & val) 9506 return 1; 9507 if (!((subreg.mask | subreg.value) & val)) 9508 return 0; 9509 break; 9510 case BPF_JGT: 9511 if (reg->u32_min_value > val) 9512 return 1; 9513 else if (reg->u32_max_value <= val) 9514 return 0; 9515 break; 9516 case BPF_JSGT: 9517 if (reg->s32_min_value > sval) 9518 return 1; 9519 else if (reg->s32_max_value <= sval) 9520 return 0; 9521 break; 9522 case BPF_JLT: 9523 if (reg->u32_max_value < val) 9524 return 1; 9525 else if (reg->u32_min_value >= val) 9526 return 0; 9527 break; 9528 case BPF_JSLT: 9529 if (reg->s32_max_value < sval) 9530 return 1; 9531 else if (reg->s32_min_value >= sval) 9532 return 0; 9533 break; 9534 case BPF_JGE: 9535 if (reg->u32_min_value >= val) 9536 return 1; 9537 else if (reg->u32_max_value < val) 9538 return 0; 9539 break; 9540 case BPF_JSGE: 9541 if (reg->s32_min_value >= sval) 9542 return 1; 9543 else if (reg->s32_max_value < sval) 9544 return 0; 9545 break; 9546 case BPF_JLE: 9547 if (reg->u32_max_value <= val) 9548 return 1; 9549 else if (reg->u32_min_value > val) 9550 return 0; 9551 break; 9552 case BPF_JSLE: 9553 if (reg->s32_max_value <= sval) 9554 return 1; 9555 else if (reg->s32_min_value > sval) 9556 return 0; 9557 break; 9558 } 9559 9560 return -1; 9561 } 9562 9563 9564 static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) 9565 { 9566 s64 sval = (s64)val; 9567 9568 switch (opcode) { 9569 case BPF_JEQ: 9570 if (tnum_is_const(reg->var_off)) 9571 return !!tnum_equals_const(reg->var_off, val); 9572 break; 9573 case BPF_JNE: 9574 if (tnum_is_const(reg->var_off)) 9575 return !tnum_equals_const(reg->var_off, val); 9576 break; 9577 case BPF_JSET: 9578 if ((~reg->var_off.mask & reg->var_off.value) & val) 9579 return 1; 9580 if (!((reg->var_off.mask | reg->var_off.value) & val)) 9581 return 0; 9582 break; 9583 case BPF_JGT: 9584 if (reg->umin_value > val) 9585 return 1; 9586 else if (reg->umax_value <= val) 9587 return 0; 9588 break; 9589 case BPF_JSGT: 9590 if (reg->smin_value > sval) 9591 return 1; 9592 else if (reg->smax_value <= sval) 9593 return 0; 9594 break; 9595 case BPF_JLT: 9596 if (reg->umax_value < val) 9597 return 1; 9598 else if (reg->umin_value >= val) 9599 return 0; 9600 break; 9601 case BPF_JSLT: 9602 if (reg->smax_value < sval) 9603 return 1; 9604 else if (reg->smin_value >= sval) 9605 return 0; 9606 break; 9607 case BPF_JGE: 9608 if (reg->umin_value >= val) 9609 return 1; 9610 else if (reg->umax_value < val) 9611 return 0; 9612 break; 9613 case BPF_JSGE: 9614 if (reg->smin_value >= sval) 9615 return 1; 9616 else if (reg->smax_value < sval) 9617 return 0; 9618 break; 9619 case BPF_JLE: 9620 if (reg->umax_value <= val) 9621 return 1; 9622 else if (reg->umin_value > val) 9623 return 0; 9624 break; 9625 case BPF_JSLE: 9626 if (reg->smax_value <= sval) 9627 return 1; 9628 else if (reg->smin_value > sval) 9629 return 0; 9630 break; 9631 } 9632 9633 return -1; 9634 } 9635 9636 /* compute branch direction of the expression "if (reg opcode val) goto target;" 9637 * and return: 9638 * 1 - branch will be taken and "goto target" will be executed 9639 * 0 - branch will not be taken and fall-through to next insn 9640 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value 9641 * range [0,10] 9642 */ 9643 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, 9644 bool is_jmp32) 9645 { 9646 if (__is_pointer_value(false, reg)) { 9647 if (!reg_type_not_null(reg->type)) 9648 return -1; 9649 9650 /* If pointer is valid tests against zero will fail so we can 9651 * use this to direct branch taken. 9652 */ 9653 if (val != 0) 9654 return -1; 9655 9656 switch (opcode) { 9657 case BPF_JEQ: 9658 return 0; 9659 case BPF_JNE: 9660 return 1; 9661 default: 9662 return -1; 9663 } 9664 } 9665 9666 if (is_jmp32) 9667 return is_branch32_taken(reg, val, opcode); 9668 return is_branch64_taken(reg, val, opcode); 9669 } 9670 9671 static int flip_opcode(u32 opcode) 9672 { 9673 /* How can we transform "a <op> b" into "b <op> a"? */ 9674 static const u8 opcode_flip[16] = { 9675 /* these stay the same */ 9676 [BPF_JEQ >> 4] = BPF_JEQ, 9677 [BPF_JNE >> 4] = BPF_JNE, 9678 [BPF_JSET >> 4] = BPF_JSET, 9679 /* these swap "lesser" and "greater" (L and G in the opcodes) */ 9680 [BPF_JGE >> 4] = BPF_JLE, 9681 [BPF_JGT >> 4] = BPF_JLT, 9682 [BPF_JLE >> 4] = BPF_JGE, 9683 [BPF_JLT >> 4] = BPF_JGT, 9684 [BPF_JSGE >> 4] = BPF_JSLE, 9685 [BPF_JSGT >> 4] = BPF_JSLT, 9686 [BPF_JSLE >> 4] = BPF_JSGE, 9687 [BPF_JSLT >> 4] = BPF_JSGT 9688 }; 9689 return opcode_flip[opcode >> 4]; 9690 } 9691 9692 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg, 9693 struct bpf_reg_state *src_reg, 9694 u8 opcode) 9695 { 9696 struct bpf_reg_state *pkt; 9697 9698 if (src_reg->type == PTR_TO_PACKET_END) { 9699 pkt = dst_reg; 9700 } else if (dst_reg->type == PTR_TO_PACKET_END) { 9701 pkt = src_reg; 9702 opcode = flip_opcode(opcode); 9703 } else { 9704 return -1; 9705 } 9706 9707 if (pkt->range >= 0) 9708 return -1; 9709 9710 switch (opcode) { 9711 case BPF_JLE: 9712 /* pkt <= pkt_end */ 9713 fallthrough; 9714 case BPF_JGT: 9715 /* pkt > pkt_end */ 9716 if (pkt->range == BEYOND_PKT_END) 9717 /* pkt has at last one extra byte beyond pkt_end */ 9718 return opcode == BPF_JGT; 9719 break; 9720 case BPF_JLT: 9721 /* pkt < pkt_end */ 9722 fallthrough; 9723 case BPF_JGE: 9724 /* pkt >= pkt_end */ 9725 if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END) 9726 return opcode == BPF_JGE; 9727 break; 9728 } 9729 return -1; 9730 } 9731 9732 /* Adjusts the register min/max values in the case that the dst_reg is the 9733 * variable register that we are working on, and src_reg is a constant or we're 9734 * simply doing a BPF_K check. 9735 * In JEQ/JNE cases we also adjust the var_off values. 9736 */ 9737 static void reg_set_min_max(struct bpf_reg_state *true_reg, 9738 struct bpf_reg_state *false_reg, 9739 u64 val, u32 val32, 9740 u8 opcode, bool is_jmp32) 9741 { 9742 struct tnum false_32off = tnum_subreg(false_reg->var_off); 9743 struct tnum false_64off = false_reg->var_off; 9744 struct tnum true_32off = tnum_subreg(true_reg->var_off); 9745 struct tnum true_64off = true_reg->var_off; 9746 s64 sval = (s64)val; 9747 s32 sval32 = (s32)val32; 9748 9749 /* If the dst_reg is a pointer, we can't learn anything about its 9750 * variable offset from the compare (unless src_reg were a pointer into 9751 * the same object, but we don't bother with that. 9752 * Since false_reg and true_reg have the same type by construction, we 9753 * only need to check one of them for pointerness. 9754 */ 9755 if (__is_pointer_value(false, false_reg)) 9756 return; 9757 9758 switch (opcode) { 9759 /* JEQ/JNE comparison doesn't change the register equivalence. 9760 * 9761 * r1 = r2; 9762 * if (r1 == 42) goto label; 9763 * ... 9764 * label: // here both r1 and r2 are known to be 42. 9765 * 9766 * Hence when marking register as known preserve it's ID. 9767 */ 9768 case BPF_JEQ: 9769 if (is_jmp32) { 9770 __mark_reg32_known(true_reg, val32); 9771 true_32off = tnum_subreg(true_reg->var_off); 9772 } else { 9773 ___mark_reg_known(true_reg, val); 9774 true_64off = true_reg->var_off; 9775 } 9776 break; 9777 case BPF_JNE: 9778 if (is_jmp32) { 9779 __mark_reg32_known(false_reg, val32); 9780 false_32off = tnum_subreg(false_reg->var_off); 9781 } else { 9782 ___mark_reg_known(false_reg, val); 9783 false_64off = false_reg->var_off; 9784 } 9785 break; 9786 case BPF_JSET: 9787 if (is_jmp32) { 9788 false_32off = tnum_and(false_32off, tnum_const(~val32)); 9789 if (is_power_of_2(val32)) 9790 true_32off = tnum_or(true_32off, 9791 tnum_const(val32)); 9792 } else { 9793 false_64off = tnum_and(false_64off, tnum_const(~val)); 9794 if (is_power_of_2(val)) 9795 true_64off = tnum_or(true_64off, 9796 tnum_const(val)); 9797 } 9798 break; 9799 case BPF_JGE: 9800 case BPF_JGT: 9801 { 9802 if (is_jmp32) { 9803 u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1; 9804 u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32; 9805 9806 false_reg->u32_max_value = min(false_reg->u32_max_value, 9807 false_umax); 9808 true_reg->u32_min_value = max(true_reg->u32_min_value, 9809 true_umin); 9810 } else { 9811 u64 false_umax = opcode == BPF_JGT ? val : val - 1; 9812 u64 true_umin = opcode == BPF_JGT ? val + 1 : val; 9813 9814 false_reg->umax_value = min(false_reg->umax_value, false_umax); 9815 true_reg->umin_value = max(true_reg->umin_value, true_umin); 9816 } 9817 break; 9818 } 9819 case BPF_JSGE: 9820 case BPF_JSGT: 9821 { 9822 if (is_jmp32) { 9823 s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1; 9824 s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32; 9825 9826 false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax); 9827 true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin); 9828 } else { 9829 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; 9830 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval; 9831 9832 false_reg->smax_value = min(false_reg->smax_value, false_smax); 9833 true_reg->smin_value = max(true_reg->smin_value, true_smin); 9834 } 9835 break; 9836 } 9837 case BPF_JLE: 9838 case BPF_JLT: 9839 { 9840 if (is_jmp32) { 9841 u32 false_umin = opcode == BPF_JLT ? val32 : val32 + 1; 9842 u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32; 9843 9844 false_reg->u32_min_value = max(false_reg->u32_min_value, 9845 false_umin); 9846 true_reg->u32_max_value = min(true_reg->u32_max_value, 9847 true_umax); 9848 } else { 9849 u64 false_umin = opcode == BPF_JLT ? val : val + 1; 9850 u64 true_umax = opcode == BPF_JLT ? val - 1 : val; 9851 9852 false_reg->umin_value = max(false_reg->umin_value, false_umin); 9853 true_reg->umax_value = min(true_reg->umax_value, true_umax); 9854 } 9855 break; 9856 } 9857 case BPF_JSLE: 9858 case BPF_JSLT: 9859 { 9860 if (is_jmp32) { 9861 s32 false_smin = opcode == BPF_JSLT ? sval32 : sval32 + 1; 9862 s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32; 9863 9864 false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin); 9865 true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax); 9866 } else { 9867 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; 9868 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval; 9869 9870 false_reg->smin_value = max(false_reg->smin_value, false_smin); 9871 true_reg->smax_value = min(true_reg->smax_value, true_smax); 9872 } 9873 break; 9874 } 9875 default: 9876 return; 9877 } 9878 9879 if (is_jmp32) { 9880 false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off), 9881 tnum_subreg(false_32off)); 9882 true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off), 9883 tnum_subreg(true_32off)); 9884 __reg_combine_32_into_64(false_reg); 9885 __reg_combine_32_into_64(true_reg); 9886 } else { 9887 false_reg->var_off = false_64off; 9888 true_reg->var_off = true_64off; 9889 __reg_combine_64_into_32(false_reg); 9890 __reg_combine_64_into_32(true_reg); 9891 } 9892 } 9893 9894 /* Same as above, but for the case that dst_reg holds a constant and src_reg is 9895 * the variable reg. 9896 */ 9897 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, 9898 struct bpf_reg_state *false_reg, 9899 u64 val, u32 val32, 9900 u8 opcode, bool is_jmp32) 9901 { 9902 opcode = flip_opcode(opcode); 9903 /* This uses zero as "not present in table"; luckily the zero opcode, 9904 * BPF_JA, can't get here. 9905 */ 9906 if (opcode) 9907 reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32); 9908 } 9909 9910 /* Regs are known to be equal, so intersect their min/max/var_off */ 9911 static void __reg_combine_min_max(struct bpf_reg_state *src_reg, 9912 struct bpf_reg_state *dst_reg) 9913 { 9914 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, 9915 dst_reg->umin_value); 9916 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, 9917 dst_reg->umax_value); 9918 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, 9919 dst_reg->smin_value); 9920 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, 9921 dst_reg->smax_value); 9922 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, 9923 dst_reg->var_off); 9924 reg_bounds_sync(src_reg); 9925 reg_bounds_sync(dst_reg); 9926 } 9927 9928 static void reg_combine_min_max(struct bpf_reg_state *true_src, 9929 struct bpf_reg_state *true_dst, 9930 struct bpf_reg_state *false_src, 9931 struct bpf_reg_state *false_dst, 9932 u8 opcode) 9933 { 9934 switch (opcode) { 9935 case BPF_JEQ: 9936 __reg_combine_min_max(true_src, true_dst); 9937 break; 9938 case BPF_JNE: 9939 __reg_combine_min_max(false_src, false_dst); 9940 break; 9941 } 9942 } 9943 9944 static void mark_ptr_or_null_reg(struct bpf_func_state *state, 9945 struct bpf_reg_state *reg, u32 id, 9946 bool is_null) 9947 { 9948 if (type_may_be_null(reg->type) && reg->id == id && 9949 !WARN_ON_ONCE(!reg->id)) { 9950 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || 9951 !tnum_equals_const(reg->var_off, 0) || 9952 reg->off)) { 9953 /* Old offset (both fixed and variable parts) should 9954 * have been known-zero, because we don't allow pointer 9955 * arithmetic on pointers that might be NULL. If we 9956 * see this happening, don't convert the register. 9957 */ 9958 return; 9959 } 9960 if (is_null) { 9961 reg->type = SCALAR_VALUE; 9962 /* We don't need id and ref_obj_id from this point 9963 * onwards anymore, thus we should better reset it, 9964 * so that state pruning has chances to take effect. 9965 */ 9966 reg->id = 0; 9967 reg->ref_obj_id = 0; 9968 9969 return; 9970 } 9971 9972 mark_ptr_not_null_reg(reg); 9973 9974 if (!reg_may_point_to_spin_lock(reg)) { 9975 /* For not-NULL ptr, reg->ref_obj_id will be reset 9976 * in release_reference(). 9977 * 9978 * reg->id is still used by spin_lock ptr. Other 9979 * than spin_lock ptr type, reg->id can be reset. 9980 */ 9981 reg->id = 0; 9982 } 9983 } 9984 } 9985 9986 /* The logic is similar to find_good_pkt_pointers(), both could eventually 9987 * be folded together at some point. 9988 */ 9989 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, 9990 bool is_null) 9991 { 9992 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 9993 struct bpf_reg_state *regs = state->regs, *reg; 9994 u32 ref_obj_id = regs[regno].ref_obj_id; 9995 u32 id = regs[regno].id; 9996 9997 if (ref_obj_id && ref_obj_id == id && is_null) 9998 /* regs[regno] is in the " == NULL" branch. 9999 * No one could have freed the reference state before 10000 * doing the NULL check. 10001 */ 10002 WARN_ON_ONCE(release_reference_state(state, id)); 10003 10004 bpf_for_each_reg_in_vstate(vstate, state, reg, ({ 10005 mark_ptr_or_null_reg(state, reg, id, is_null); 10006 })); 10007 } 10008 10009 static bool try_match_pkt_pointers(const struct bpf_insn *insn, 10010 struct bpf_reg_state *dst_reg, 10011 struct bpf_reg_state *src_reg, 10012 struct bpf_verifier_state *this_branch, 10013 struct bpf_verifier_state *other_branch) 10014 { 10015 if (BPF_SRC(insn->code) != BPF_X) 10016 return false; 10017 10018 /* Pointers are always 64-bit. */ 10019 if (BPF_CLASS(insn->code) == BPF_JMP32) 10020 return false; 10021 10022 switch (BPF_OP(insn->code)) { 10023 case BPF_JGT: 10024 if ((dst_reg->type == PTR_TO_PACKET && 10025 src_reg->type == PTR_TO_PACKET_END) || 10026 (dst_reg->type == PTR_TO_PACKET_META && 10027 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 10028 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ 10029 find_good_pkt_pointers(this_branch, dst_reg, 10030 dst_reg->type, false); 10031 mark_pkt_end(other_branch, insn->dst_reg, true); 10032 } else if ((dst_reg->type == PTR_TO_PACKET_END && 10033 src_reg->type == PTR_TO_PACKET) || 10034 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 10035 src_reg->type == PTR_TO_PACKET_META)) { 10036 /* pkt_end > pkt_data', pkt_data > pkt_meta' */ 10037 find_good_pkt_pointers(other_branch, src_reg, 10038 src_reg->type, true); 10039 mark_pkt_end(this_branch, insn->src_reg, false); 10040 } else { 10041 return false; 10042 } 10043 break; 10044 case BPF_JLT: 10045 if ((dst_reg->type == PTR_TO_PACKET && 10046 src_reg->type == PTR_TO_PACKET_END) || 10047 (dst_reg->type == PTR_TO_PACKET_META && 10048 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 10049 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ 10050 find_good_pkt_pointers(other_branch, dst_reg, 10051 dst_reg->type, true); 10052 mark_pkt_end(this_branch, insn->dst_reg, false); 10053 } else if ((dst_reg->type == PTR_TO_PACKET_END && 10054 src_reg->type == PTR_TO_PACKET) || 10055 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 10056 src_reg->type == PTR_TO_PACKET_META)) { 10057 /* pkt_end < pkt_data', pkt_data > pkt_meta' */ 10058 find_good_pkt_pointers(this_branch, src_reg, 10059 src_reg->type, false); 10060 mark_pkt_end(other_branch, insn->src_reg, true); 10061 } else { 10062 return false; 10063 } 10064 break; 10065 case BPF_JGE: 10066 if ((dst_reg->type == PTR_TO_PACKET && 10067 src_reg->type == PTR_TO_PACKET_END) || 10068 (dst_reg->type == PTR_TO_PACKET_META && 10069 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 10070 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ 10071 find_good_pkt_pointers(this_branch, dst_reg, 10072 dst_reg->type, true); 10073 mark_pkt_end(other_branch, insn->dst_reg, false); 10074 } else if ((dst_reg->type == PTR_TO_PACKET_END && 10075 src_reg->type == PTR_TO_PACKET) || 10076 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 10077 src_reg->type == PTR_TO_PACKET_META)) { 10078 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ 10079 find_good_pkt_pointers(other_branch, src_reg, 10080 src_reg->type, false); 10081 mark_pkt_end(this_branch, insn->src_reg, true); 10082 } else { 10083 return false; 10084 } 10085 break; 10086 case BPF_JLE: 10087 if ((dst_reg->type == PTR_TO_PACKET && 10088 src_reg->type == PTR_TO_PACKET_END) || 10089 (dst_reg->type == PTR_TO_PACKET_META && 10090 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 10091 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ 10092 find_good_pkt_pointers(other_branch, dst_reg, 10093 dst_reg->type, false); 10094 mark_pkt_end(this_branch, insn->dst_reg, true); 10095 } else if ((dst_reg->type == PTR_TO_PACKET_END && 10096 src_reg->type == PTR_TO_PACKET) || 10097 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 10098 src_reg->type == PTR_TO_PACKET_META)) { 10099 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ 10100 find_good_pkt_pointers(this_branch, src_reg, 10101 src_reg->type, true); 10102 mark_pkt_end(other_branch, insn->src_reg, false); 10103 } else { 10104 return false; 10105 } 10106 break; 10107 default: 10108 return false; 10109 } 10110 10111 return true; 10112 } 10113 10114 static void find_equal_scalars(struct bpf_verifier_state *vstate, 10115 struct bpf_reg_state *known_reg) 10116 { 10117 struct bpf_func_state *state; 10118 struct bpf_reg_state *reg; 10119 10120 bpf_for_each_reg_in_vstate(vstate, state, reg, ({ 10121 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) 10122 *reg = *known_reg; 10123 })); 10124 } 10125 10126 static int check_cond_jmp_op(struct bpf_verifier_env *env, 10127 struct bpf_insn *insn, int *insn_idx) 10128 { 10129 struct bpf_verifier_state *this_branch = env->cur_state; 10130 struct bpf_verifier_state *other_branch; 10131 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; 10132 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; 10133 u8 opcode = BPF_OP(insn->code); 10134 bool is_jmp32; 10135 int pred = -1; 10136 int err; 10137 10138 /* Only conditional jumps are expected to reach here. */ 10139 if (opcode == BPF_JA || opcode > BPF_JSLE) { 10140 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); 10141 return -EINVAL; 10142 } 10143 10144 if (BPF_SRC(insn->code) == BPF_X) { 10145 if (insn->imm != 0) { 10146 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 10147 return -EINVAL; 10148 } 10149 10150 /* check src1 operand */ 10151 err = check_reg_arg(env, insn->src_reg, SRC_OP); 10152 if (err) 10153 return err; 10154 10155 if (is_pointer_value(env, insn->src_reg)) { 10156 verbose(env, "R%d pointer comparison prohibited\n", 10157 insn->src_reg); 10158 return -EACCES; 10159 } 10160 src_reg = ®s[insn->src_reg]; 10161 } else { 10162 if (insn->src_reg != BPF_REG_0) { 10163 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 10164 return -EINVAL; 10165 } 10166 } 10167 10168 /* check src2 operand */ 10169 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 10170 if (err) 10171 return err; 10172 10173 dst_reg = ®s[insn->dst_reg]; 10174 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 10175 10176 if (BPF_SRC(insn->code) == BPF_K) { 10177 pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32); 10178 } else if (src_reg->type == SCALAR_VALUE && 10179 is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) { 10180 pred = is_branch_taken(dst_reg, 10181 tnum_subreg(src_reg->var_off).value, 10182 opcode, 10183 is_jmp32); 10184 } else if (src_reg->type == SCALAR_VALUE && 10185 !is_jmp32 && tnum_is_const(src_reg->var_off)) { 10186 pred = is_branch_taken(dst_reg, 10187 src_reg->var_off.value, 10188 opcode, 10189 is_jmp32); 10190 } else if (reg_is_pkt_pointer_any(dst_reg) && 10191 reg_is_pkt_pointer_any(src_reg) && 10192 !is_jmp32) { 10193 pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode); 10194 } 10195 10196 if (pred >= 0) { 10197 /* If we get here with a dst_reg pointer type it is because 10198 * above is_branch_taken() special cased the 0 comparison. 10199 */ 10200 if (!__is_pointer_value(false, dst_reg)) 10201 err = mark_chain_precision(env, insn->dst_reg); 10202 if (BPF_SRC(insn->code) == BPF_X && !err && 10203 !__is_pointer_value(false, src_reg)) 10204 err = mark_chain_precision(env, insn->src_reg); 10205 if (err) 10206 return err; 10207 } 10208 10209 if (pred == 1) { 10210 /* Only follow the goto, ignore fall-through. If needed, push 10211 * the fall-through branch for simulation under speculative 10212 * execution. 10213 */ 10214 if (!env->bypass_spec_v1 && 10215 !sanitize_speculative_path(env, insn, *insn_idx + 1, 10216 *insn_idx)) 10217 return -EFAULT; 10218 *insn_idx += insn->off; 10219 return 0; 10220 } else if (pred == 0) { 10221 /* Only follow the fall-through branch, since that's where the 10222 * program will go. If needed, push the goto branch for 10223 * simulation under speculative execution. 10224 */ 10225 if (!env->bypass_spec_v1 && 10226 !sanitize_speculative_path(env, insn, 10227 *insn_idx + insn->off + 1, 10228 *insn_idx)) 10229 return -EFAULT; 10230 return 0; 10231 } 10232 10233 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, 10234 false); 10235 if (!other_branch) 10236 return -EFAULT; 10237 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; 10238 10239 /* detect if we are comparing against a constant value so we can adjust 10240 * our min/max values for our dst register. 10241 * this is only legit if both are scalars (or pointers to the same 10242 * object, I suppose, but we don't support that right now), because 10243 * otherwise the different base pointers mean the offsets aren't 10244 * comparable. 10245 */ 10246 if (BPF_SRC(insn->code) == BPF_X) { 10247 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 10248 10249 if (dst_reg->type == SCALAR_VALUE && 10250 src_reg->type == SCALAR_VALUE) { 10251 if (tnum_is_const(src_reg->var_off) || 10252 (is_jmp32 && 10253 tnum_is_const(tnum_subreg(src_reg->var_off)))) 10254 reg_set_min_max(&other_branch_regs[insn->dst_reg], 10255 dst_reg, 10256 src_reg->var_off.value, 10257 tnum_subreg(src_reg->var_off).value, 10258 opcode, is_jmp32); 10259 else if (tnum_is_const(dst_reg->var_off) || 10260 (is_jmp32 && 10261 tnum_is_const(tnum_subreg(dst_reg->var_off)))) 10262 reg_set_min_max_inv(&other_branch_regs[insn->src_reg], 10263 src_reg, 10264 dst_reg->var_off.value, 10265 tnum_subreg(dst_reg->var_off).value, 10266 opcode, is_jmp32); 10267 else if (!is_jmp32 && 10268 (opcode == BPF_JEQ || opcode == BPF_JNE)) 10269 /* Comparing for equality, we can combine knowledge */ 10270 reg_combine_min_max(&other_branch_regs[insn->src_reg], 10271 &other_branch_regs[insn->dst_reg], 10272 src_reg, dst_reg, opcode); 10273 if (src_reg->id && 10274 !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) { 10275 find_equal_scalars(this_branch, src_reg); 10276 find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]); 10277 } 10278 10279 } 10280 } else if (dst_reg->type == SCALAR_VALUE) { 10281 reg_set_min_max(&other_branch_regs[insn->dst_reg], 10282 dst_reg, insn->imm, (u32)insn->imm, 10283 opcode, is_jmp32); 10284 } 10285 10286 if (dst_reg->type == SCALAR_VALUE && dst_reg->id && 10287 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) { 10288 find_equal_scalars(this_branch, dst_reg); 10289 find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]); 10290 } 10291 10292 /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). 10293 * NOTE: these optimizations below are related with pointer comparison 10294 * which will never be JMP32. 10295 */ 10296 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && 10297 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 10298 type_may_be_null(dst_reg->type)) { 10299 /* Mark all identical registers in each branch as either 10300 * safe or unknown depending R == 0 or R != 0 conditional. 10301 */ 10302 mark_ptr_or_null_regs(this_branch, insn->dst_reg, 10303 opcode == BPF_JNE); 10304 mark_ptr_or_null_regs(other_branch, insn->dst_reg, 10305 opcode == BPF_JEQ); 10306 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], 10307 this_branch, other_branch) && 10308 is_pointer_value(env, insn->dst_reg)) { 10309 verbose(env, "R%d pointer comparison prohibited\n", 10310 insn->dst_reg); 10311 return -EACCES; 10312 } 10313 if (env->log.level & BPF_LOG_LEVEL) 10314 print_insn_state(env, this_branch->frame[this_branch->curframe]); 10315 return 0; 10316 } 10317 10318 /* verify BPF_LD_IMM64 instruction */ 10319 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) 10320 { 10321 struct bpf_insn_aux_data *aux = cur_aux(env); 10322 struct bpf_reg_state *regs = cur_regs(env); 10323 struct bpf_reg_state *dst_reg; 10324 struct bpf_map *map; 10325 int err; 10326 10327 if (BPF_SIZE(insn->code) != BPF_DW) { 10328 verbose(env, "invalid BPF_LD_IMM insn\n"); 10329 return -EINVAL; 10330 } 10331 if (insn->off != 0) { 10332 verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); 10333 return -EINVAL; 10334 } 10335 10336 err = check_reg_arg(env, insn->dst_reg, DST_OP); 10337 if (err) 10338 return err; 10339 10340 dst_reg = ®s[insn->dst_reg]; 10341 if (insn->src_reg == 0) { 10342 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 10343 10344 dst_reg->type = SCALAR_VALUE; 10345 __mark_reg_known(®s[insn->dst_reg], imm); 10346 return 0; 10347 } 10348 10349 /* All special src_reg cases are listed below. From this point onwards 10350 * we either succeed and assign a corresponding dst_reg->type after 10351 * zeroing the offset, or fail and reject the program. 10352 */ 10353 mark_reg_known_zero(env, regs, insn->dst_reg); 10354 10355 if (insn->src_reg == BPF_PSEUDO_BTF_ID) { 10356 dst_reg->type = aux->btf_var.reg_type; 10357 switch (base_type(dst_reg->type)) { 10358 case PTR_TO_MEM: 10359 dst_reg->mem_size = aux->btf_var.mem_size; 10360 break; 10361 case PTR_TO_BTF_ID: 10362 dst_reg->btf = aux->btf_var.btf; 10363 dst_reg->btf_id = aux->btf_var.btf_id; 10364 break; 10365 default: 10366 verbose(env, "bpf verifier is misconfigured\n"); 10367 return -EFAULT; 10368 } 10369 return 0; 10370 } 10371 10372 if (insn->src_reg == BPF_PSEUDO_FUNC) { 10373 struct bpf_prog_aux *aux = env->prog->aux; 10374 u32 subprogno = find_subprog(env, 10375 env->insn_idx + insn->imm + 1); 10376 10377 if (!aux->func_info) { 10378 verbose(env, "missing btf func_info\n"); 10379 return -EINVAL; 10380 } 10381 if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) { 10382 verbose(env, "callback function not static\n"); 10383 return -EINVAL; 10384 } 10385 10386 dst_reg->type = PTR_TO_FUNC; 10387 dst_reg->subprogno = subprogno; 10388 return 0; 10389 } 10390 10391 map = env->used_maps[aux->map_index]; 10392 dst_reg->map_ptr = map; 10393 10394 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE || 10395 insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) { 10396 dst_reg->type = PTR_TO_MAP_VALUE; 10397 dst_reg->off = aux->map_off; 10398 if (map_value_has_spin_lock(map)) 10399 dst_reg->id = ++env->id_gen; 10400 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD || 10401 insn->src_reg == BPF_PSEUDO_MAP_IDX) { 10402 dst_reg->type = CONST_PTR_TO_MAP; 10403 } else { 10404 verbose(env, "bpf verifier is misconfigured\n"); 10405 return -EINVAL; 10406 } 10407 10408 return 0; 10409 } 10410 10411 static bool may_access_skb(enum bpf_prog_type type) 10412 { 10413 switch (type) { 10414 case BPF_PROG_TYPE_SOCKET_FILTER: 10415 case BPF_PROG_TYPE_SCHED_CLS: 10416 case BPF_PROG_TYPE_SCHED_ACT: 10417 return true; 10418 default: 10419 return false; 10420 } 10421 } 10422 10423 /* verify safety of LD_ABS|LD_IND instructions: 10424 * - they can only appear in the programs where ctx == skb 10425 * - since they are wrappers of function calls, they scratch R1-R5 registers, 10426 * preserve R6-R9, and store return value into R0 10427 * 10428 * Implicit input: 10429 * ctx == skb == R6 == CTX 10430 * 10431 * Explicit input: 10432 * SRC == any register 10433 * IMM == 32-bit immediate 10434 * 10435 * Output: 10436 * R0 - 8/16/32-bit skb data converted to cpu endianness 10437 */ 10438 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 10439 { 10440 struct bpf_reg_state *regs = cur_regs(env); 10441 static const int ctx_reg = BPF_REG_6; 10442 u8 mode = BPF_MODE(insn->code); 10443 int i, err; 10444 10445 if (!may_access_skb(resolve_prog_type(env->prog))) { 10446 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 10447 return -EINVAL; 10448 } 10449 10450 if (!env->ops->gen_ld_abs) { 10451 verbose(env, "bpf verifier is misconfigured\n"); 10452 return -EINVAL; 10453 } 10454 10455 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 10456 BPF_SIZE(insn->code) == BPF_DW || 10457 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 10458 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); 10459 return -EINVAL; 10460 } 10461 10462 /* check whether implicit source operand (register R6) is readable */ 10463 err = check_reg_arg(env, ctx_reg, SRC_OP); 10464 if (err) 10465 return err; 10466 10467 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as 10468 * gen_ld_abs() may terminate the program at runtime, leading to 10469 * reference leak. 10470 */ 10471 err = check_reference_leak(env); 10472 if (err) { 10473 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); 10474 return err; 10475 } 10476 10477 if (env->cur_state->active_spin_lock) { 10478 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); 10479 return -EINVAL; 10480 } 10481 10482 if (regs[ctx_reg].type != PTR_TO_CTX) { 10483 verbose(env, 10484 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 10485 return -EINVAL; 10486 } 10487 10488 if (mode == BPF_IND) { 10489 /* check explicit source operand */ 10490 err = check_reg_arg(env, insn->src_reg, SRC_OP); 10491 if (err) 10492 return err; 10493 } 10494 10495 err = check_ptr_off_reg(env, ®s[ctx_reg], ctx_reg); 10496 if (err < 0) 10497 return err; 10498 10499 /* reset caller saved regs to unreadable */ 10500 for (i = 0; i < CALLER_SAVED_REGS; i++) { 10501 mark_reg_not_init(env, regs, caller_saved[i]); 10502 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 10503 } 10504 10505 /* mark destination R0 register as readable, since it contains 10506 * the value fetched from the packet. 10507 * Already marked as written above. 10508 */ 10509 mark_reg_unknown(env, regs, BPF_REG_0); 10510 /* ld_abs load up to 32-bit skb data. */ 10511 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; 10512 return 0; 10513 } 10514 10515 static int check_return_code(struct bpf_verifier_env *env) 10516 { 10517 struct tnum enforce_attach_type_range = tnum_unknown; 10518 const struct bpf_prog *prog = env->prog; 10519 struct bpf_reg_state *reg; 10520 struct tnum range = tnum_range(0, 1); 10521 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 10522 int err; 10523 struct bpf_func_state *frame = env->cur_state->frame[0]; 10524 const bool is_subprog = frame->subprogno; 10525 10526 /* LSM and struct_ops func-ptr's return type could be "void" */ 10527 if (!is_subprog) { 10528 switch (prog_type) { 10529 case BPF_PROG_TYPE_LSM: 10530 if (prog->expected_attach_type == BPF_LSM_CGROUP) 10531 /* See below, can be 0 or 0-1 depending on hook. */ 10532 break; 10533 fallthrough; 10534 case BPF_PROG_TYPE_STRUCT_OPS: 10535 if (!prog->aux->attach_func_proto->type) 10536 return 0; 10537 break; 10538 default: 10539 break; 10540 } 10541 } 10542 10543 /* eBPF calling convention is such that R0 is used 10544 * to return the value from eBPF program. 10545 * Make sure that it's readable at this time 10546 * of bpf_exit, which means that program wrote 10547 * something into it earlier 10548 */ 10549 err = check_reg_arg(env, BPF_REG_0, SRC_OP); 10550 if (err) 10551 return err; 10552 10553 if (is_pointer_value(env, BPF_REG_0)) { 10554 verbose(env, "R0 leaks addr as return value\n"); 10555 return -EACCES; 10556 } 10557 10558 reg = cur_regs(env) + BPF_REG_0; 10559 10560 if (frame->in_async_callback_fn) { 10561 /* enforce return zero from async callbacks like timer */ 10562 if (reg->type != SCALAR_VALUE) { 10563 verbose(env, "In async callback the register R0 is not a known value (%s)\n", 10564 reg_type_str(env, reg->type)); 10565 return -EINVAL; 10566 } 10567 10568 if (!tnum_in(tnum_const(0), reg->var_off)) { 10569 verbose_invalid_scalar(env, reg, &range, "async callback", "R0"); 10570 return -EINVAL; 10571 } 10572 return 0; 10573 } 10574 10575 if (is_subprog) { 10576 if (reg->type != SCALAR_VALUE) { 10577 verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n", 10578 reg_type_str(env, reg->type)); 10579 return -EINVAL; 10580 } 10581 return 0; 10582 } 10583 10584 switch (prog_type) { 10585 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 10586 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || 10587 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || 10588 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME || 10589 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || 10590 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || 10591 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME) 10592 range = tnum_range(1, 1); 10593 if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND || 10594 env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND) 10595 range = tnum_range(0, 3); 10596 break; 10597 case BPF_PROG_TYPE_CGROUP_SKB: 10598 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { 10599 range = tnum_range(0, 3); 10600 enforce_attach_type_range = tnum_range(2, 3); 10601 } 10602 break; 10603 case BPF_PROG_TYPE_CGROUP_SOCK: 10604 case BPF_PROG_TYPE_SOCK_OPS: 10605 case BPF_PROG_TYPE_CGROUP_DEVICE: 10606 case BPF_PROG_TYPE_CGROUP_SYSCTL: 10607 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 10608 break; 10609 case BPF_PROG_TYPE_RAW_TRACEPOINT: 10610 if (!env->prog->aux->attach_btf_id) 10611 return 0; 10612 range = tnum_const(0); 10613 break; 10614 case BPF_PROG_TYPE_TRACING: 10615 switch (env->prog->expected_attach_type) { 10616 case BPF_TRACE_FENTRY: 10617 case BPF_TRACE_FEXIT: 10618 range = tnum_const(0); 10619 break; 10620 case BPF_TRACE_RAW_TP: 10621 case BPF_MODIFY_RETURN: 10622 return 0; 10623 case BPF_TRACE_ITER: 10624 break; 10625 default: 10626 return -ENOTSUPP; 10627 } 10628 break; 10629 case BPF_PROG_TYPE_SK_LOOKUP: 10630 range = tnum_range(SK_DROP, SK_PASS); 10631 break; 10632 10633 case BPF_PROG_TYPE_LSM: 10634 if (env->prog->expected_attach_type != BPF_LSM_CGROUP) { 10635 /* Regular BPF_PROG_TYPE_LSM programs can return 10636 * any value. 10637 */ 10638 return 0; 10639 } 10640 if (!env->prog->aux->attach_func_proto->type) { 10641 /* Make sure programs that attach to void 10642 * hooks don't try to modify return value. 10643 */ 10644 range = tnum_range(1, 1); 10645 } 10646 break; 10647 10648 case BPF_PROG_TYPE_EXT: 10649 /* freplace program can return anything as its return value 10650 * depends on the to-be-replaced kernel func or bpf program. 10651 */ 10652 default: 10653 return 0; 10654 } 10655 10656 if (reg->type != SCALAR_VALUE) { 10657 verbose(env, "At program exit the register R0 is not a known value (%s)\n", 10658 reg_type_str(env, reg->type)); 10659 return -EINVAL; 10660 } 10661 10662 if (!tnum_in(range, reg->var_off)) { 10663 verbose_invalid_scalar(env, reg, &range, "program exit", "R0"); 10664 if (prog->expected_attach_type == BPF_LSM_CGROUP && 10665 prog_type == BPF_PROG_TYPE_LSM && 10666 !prog->aux->attach_func_proto->type) 10667 verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n"); 10668 return -EINVAL; 10669 } 10670 10671 if (!tnum_is_unknown(enforce_attach_type_range) && 10672 tnum_in(enforce_attach_type_range, reg->var_off)) 10673 env->prog->enforce_expected_attach_type = 1; 10674 return 0; 10675 } 10676 10677 /* non-recursive DFS pseudo code 10678 * 1 procedure DFS-iterative(G,v): 10679 * 2 label v as discovered 10680 * 3 let S be a stack 10681 * 4 S.push(v) 10682 * 5 while S is not empty 10683 * 6 t <- S.pop() 10684 * 7 if t is what we're looking for: 10685 * 8 return t 10686 * 9 for all edges e in G.adjacentEdges(t) do 10687 * 10 if edge e is already labelled 10688 * 11 continue with the next edge 10689 * 12 w <- G.adjacentVertex(t,e) 10690 * 13 if vertex w is not discovered and not explored 10691 * 14 label e as tree-edge 10692 * 15 label w as discovered 10693 * 16 S.push(w) 10694 * 17 continue at 5 10695 * 18 else if vertex w is discovered 10696 * 19 label e as back-edge 10697 * 20 else 10698 * 21 // vertex w is explored 10699 * 22 label e as forward- or cross-edge 10700 * 23 label t as explored 10701 * 24 S.pop() 10702 * 10703 * convention: 10704 * 0x10 - discovered 10705 * 0x11 - discovered and fall-through edge labelled 10706 * 0x12 - discovered and fall-through and branch edges labelled 10707 * 0x20 - explored 10708 */ 10709 10710 enum { 10711 DISCOVERED = 0x10, 10712 EXPLORED = 0x20, 10713 FALLTHROUGH = 1, 10714 BRANCH = 2, 10715 }; 10716 10717 static u32 state_htab_size(struct bpf_verifier_env *env) 10718 { 10719 return env->prog->len; 10720 } 10721 10722 static struct bpf_verifier_state_list **explored_state( 10723 struct bpf_verifier_env *env, 10724 int idx) 10725 { 10726 struct bpf_verifier_state *cur = env->cur_state; 10727 struct bpf_func_state *state = cur->frame[cur->curframe]; 10728 10729 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; 10730 } 10731 10732 static void init_explored_state(struct bpf_verifier_env *env, int idx) 10733 { 10734 env->insn_aux_data[idx].prune_point = true; 10735 } 10736 10737 enum { 10738 DONE_EXPLORING = 0, 10739 KEEP_EXPLORING = 1, 10740 }; 10741 10742 /* t, w, e - match pseudo-code above: 10743 * t - index of current instruction 10744 * w - next instruction 10745 * e - edge 10746 */ 10747 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, 10748 bool loop_ok) 10749 { 10750 int *insn_stack = env->cfg.insn_stack; 10751 int *insn_state = env->cfg.insn_state; 10752 10753 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 10754 return DONE_EXPLORING; 10755 10756 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 10757 return DONE_EXPLORING; 10758 10759 if (w < 0 || w >= env->prog->len) { 10760 verbose_linfo(env, t, "%d: ", t); 10761 verbose(env, "jump out of range from insn %d to %d\n", t, w); 10762 return -EINVAL; 10763 } 10764 10765 if (e == BRANCH) 10766 /* mark branch target for state pruning */ 10767 init_explored_state(env, w); 10768 10769 if (insn_state[w] == 0) { 10770 /* tree-edge */ 10771 insn_state[t] = DISCOVERED | e; 10772 insn_state[w] = DISCOVERED; 10773 if (env->cfg.cur_stack >= env->prog->len) 10774 return -E2BIG; 10775 insn_stack[env->cfg.cur_stack++] = w; 10776 return KEEP_EXPLORING; 10777 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 10778 if (loop_ok && env->bpf_capable) 10779 return DONE_EXPLORING; 10780 verbose_linfo(env, t, "%d: ", t); 10781 verbose_linfo(env, w, "%d: ", w); 10782 verbose(env, "back-edge from insn %d to %d\n", t, w); 10783 return -EINVAL; 10784 } else if (insn_state[w] == EXPLORED) { 10785 /* forward- or cross-edge */ 10786 insn_state[t] = DISCOVERED | e; 10787 } else { 10788 verbose(env, "insn state internal bug\n"); 10789 return -EFAULT; 10790 } 10791 return DONE_EXPLORING; 10792 } 10793 10794 static int visit_func_call_insn(int t, int insn_cnt, 10795 struct bpf_insn *insns, 10796 struct bpf_verifier_env *env, 10797 bool visit_callee) 10798 { 10799 int ret; 10800 10801 ret = push_insn(t, t + 1, FALLTHROUGH, env, false); 10802 if (ret) 10803 return ret; 10804 10805 if (t + 1 < insn_cnt) 10806 init_explored_state(env, t + 1); 10807 if (visit_callee) { 10808 init_explored_state(env, t); 10809 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env, 10810 /* It's ok to allow recursion from CFG point of 10811 * view. __check_func_call() will do the actual 10812 * check. 10813 */ 10814 bpf_pseudo_func(insns + t)); 10815 } 10816 return ret; 10817 } 10818 10819 /* Visits the instruction at index t and returns one of the following: 10820 * < 0 - an error occurred 10821 * DONE_EXPLORING - the instruction was fully explored 10822 * KEEP_EXPLORING - there is still work to be done before it is fully explored 10823 */ 10824 static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env) 10825 { 10826 struct bpf_insn *insns = env->prog->insnsi; 10827 int ret; 10828 10829 if (bpf_pseudo_func(insns + t)) 10830 return visit_func_call_insn(t, insn_cnt, insns, env, true); 10831 10832 /* All non-branch instructions have a single fall-through edge. */ 10833 if (BPF_CLASS(insns[t].code) != BPF_JMP && 10834 BPF_CLASS(insns[t].code) != BPF_JMP32) 10835 return push_insn(t, t + 1, FALLTHROUGH, env, false); 10836 10837 switch (BPF_OP(insns[t].code)) { 10838 case BPF_EXIT: 10839 return DONE_EXPLORING; 10840 10841 case BPF_CALL: 10842 if (insns[t].imm == BPF_FUNC_timer_set_callback) 10843 /* Mark this call insn to trigger is_state_visited() check 10844 * before call itself is processed by __check_func_call(). 10845 * Otherwise new async state will be pushed for further 10846 * exploration. 10847 */ 10848 init_explored_state(env, t); 10849 return visit_func_call_insn(t, insn_cnt, insns, env, 10850 insns[t].src_reg == BPF_PSEUDO_CALL); 10851 10852 case BPF_JA: 10853 if (BPF_SRC(insns[t].code) != BPF_K) 10854 return -EINVAL; 10855 10856 /* unconditional jump with single edge */ 10857 ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env, 10858 true); 10859 if (ret) 10860 return ret; 10861 10862 /* unconditional jmp is not a good pruning point, 10863 * but it's marked, since backtracking needs 10864 * to record jmp history in is_state_visited(). 10865 */ 10866 init_explored_state(env, t + insns[t].off + 1); 10867 /* tell verifier to check for equivalent states 10868 * after every call and jump 10869 */ 10870 if (t + 1 < insn_cnt) 10871 init_explored_state(env, t + 1); 10872 10873 return ret; 10874 10875 default: 10876 /* conditional jump with two edges */ 10877 init_explored_state(env, t); 10878 ret = push_insn(t, t + 1, FALLTHROUGH, env, true); 10879 if (ret) 10880 return ret; 10881 10882 return push_insn(t, t + insns[t].off + 1, BRANCH, env, true); 10883 } 10884 } 10885 10886 /* non-recursive depth-first-search to detect loops in BPF program 10887 * loop == back-edge in directed graph 10888 */ 10889 static int check_cfg(struct bpf_verifier_env *env) 10890 { 10891 int insn_cnt = env->prog->len; 10892 int *insn_stack, *insn_state; 10893 int ret = 0; 10894 int i; 10895 10896 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 10897 if (!insn_state) 10898 return -ENOMEM; 10899 10900 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 10901 if (!insn_stack) { 10902 kvfree(insn_state); 10903 return -ENOMEM; 10904 } 10905 10906 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 10907 insn_stack[0] = 0; /* 0 is the first instruction */ 10908 env->cfg.cur_stack = 1; 10909 10910 while (env->cfg.cur_stack > 0) { 10911 int t = insn_stack[env->cfg.cur_stack - 1]; 10912 10913 ret = visit_insn(t, insn_cnt, env); 10914 switch (ret) { 10915 case DONE_EXPLORING: 10916 insn_state[t] = EXPLORED; 10917 env->cfg.cur_stack--; 10918 break; 10919 case KEEP_EXPLORING: 10920 break; 10921 default: 10922 if (ret > 0) { 10923 verbose(env, "visit_insn internal bug\n"); 10924 ret = -EFAULT; 10925 } 10926 goto err_free; 10927 } 10928 } 10929 10930 if (env->cfg.cur_stack < 0) { 10931 verbose(env, "pop stack internal bug\n"); 10932 ret = -EFAULT; 10933 goto err_free; 10934 } 10935 10936 for (i = 0; i < insn_cnt; i++) { 10937 if (insn_state[i] != EXPLORED) { 10938 verbose(env, "unreachable insn %d\n", i); 10939 ret = -EINVAL; 10940 goto err_free; 10941 } 10942 } 10943 ret = 0; /* cfg looks good */ 10944 10945 err_free: 10946 kvfree(insn_state); 10947 kvfree(insn_stack); 10948 env->cfg.insn_state = env->cfg.insn_stack = NULL; 10949 return ret; 10950 } 10951 10952 static int check_abnormal_return(struct bpf_verifier_env *env) 10953 { 10954 int i; 10955 10956 for (i = 1; i < env->subprog_cnt; i++) { 10957 if (env->subprog_info[i].has_ld_abs) { 10958 verbose(env, "LD_ABS is not allowed in subprogs without BTF\n"); 10959 return -EINVAL; 10960 } 10961 if (env->subprog_info[i].has_tail_call) { 10962 verbose(env, "tail_call is not allowed in subprogs without BTF\n"); 10963 return -EINVAL; 10964 } 10965 } 10966 return 0; 10967 } 10968 10969 /* The minimum supported BTF func info size */ 10970 #define MIN_BPF_FUNCINFO_SIZE 8 10971 #define MAX_FUNCINFO_REC_SIZE 252 10972 10973 static int check_btf_func(struct bpf_verifier_env *env, 10974 const union bpf_attr *attr, 10975 bpfptr_t uattr) 10976 { 10977 const struct btf_type *type, *func_proto, *ret_type; 10978 u32 i, nfuncs, urec_size, min_size; 10979 u32 krec_size = sizeof(struct bpf_func_info); 10980 struct bpf_func_info *krecord; 10981 struct bpf_func_info_aux *info_aux = NULL; 10982 struct bpf_prog *prog; 10983 const struct btf *btf; 10984 bpfptr_t urecord; 10985 u32 prev_offset = 0; 10986 bool scalar_return; 10987 int ret = -ENOMEM; 10988 10989 nfuncs = attr->func_info_cnt; 10990 if (!nfuncs) { 10991 if (check_abnormal_return(env)) 10992 return -EINVAL; 10993 return 0; 10994 } 10995 10996 if (nfuncs != env->subprog_cnt) { 10997 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n"); 10998 return -EINVAL; 10999 } 11000 11001 urec_size = attr->func_info_rec_size; 11002 if (urec_size < MIN_BPF_FUNCINFO_SIZE || 11003 urec_size > MAX_FUNCINFO_REC_SIZE || 11004 urec_size % sizeof(u32)) { 11005 verbose(env, "invalid func info rec size %u\n", urec_size); 11006 return -EINVAL; 11007 } 11008 11009 prog = env->prog; 11010 btf = prog->aux->btf; 11011 11012 urecord = make_bpfptr(attr->func_info, uattr.is_kernel); 11013 min_size = min_t(u32, krec_size, urec_size); 11014 11015 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); 11016 if (!krecord) 11017 return -ENOMEM; 11018 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN); 11019 if (!info_aux) 11020 goto err_free; 11021 11022 for (i = 0; i < nfuncs; i++) { 11023 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size); 11024 if (ret) { 11025 if (ret == -E2BIG) { 11026 verbose(env, "nonzero tailing record in func info"); 11027 /* set the size kernel expects so loader can zero 11028 * out the rest of the record. 11029 */ 11030 if (copy_to_bpfptr_offset(uattr, 11031 offsetof(union bpf_attr, func_info_rec_size), 11032 &min_size, sizeof(min_size))) 11033 ret = -EFAULT; 11034 } 11035 goto err_free; 11036 } 11037 11038 if (copy_from_bpfptr(&krecord[i], urecord, min_size)) { 11039 ret = -EFAULT; 11040 goto err_free; 11041 } 11042 11043 /* check insn_off */ 11044 ret = -EINVAL; 11045 if (i == 0) { 11046 if (krecord[i].insn_off) { 11047 verbose(env, 11048 "nonzero insn_off %u for the first func info record", 11049 krecord[i].insn_off); 11050 goto err_free; 11051 } 11052 } else if (krecord[i].insn_off <= prev_offset) { 11053 verbose(env, 11054 "same or smaller insn offset (%u) than previous func info record (%u)", 11055 krecord[i].insn_off, prev_offset); 11056 goto err_free; 11057 } 11058 11059 if (env->subprog_info[i].start != krecord[i].insn_off) { 11060 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); 11061 goto err_free; 11062 } 11063 11064 /* check type_id */ 11065 type = btf_type_by_id(btf, krecord[i].type_id); 11066 if (!type || !btf_type_is_func(type)) { 11067 verbose(env, "invalid type id %d in func info", 11068 krecord[i].type_id); 11069 goto err_free; 11070 } 11071 info_aux[i].linkage = BTF_INFO_VLEN(type->info); 11072 11073 func_proto = btf_type_by_id(btf, type->type); 11074 if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto))) 11075 /* btf_func_check() already verified it during BTF load */ 11076 goto err_free; 11077 ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL); 11078 scalar_return = 11079 btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type); 11080 if (i && !scalar_return && env->subprog_info[i].has_ld_abs) { 11081 verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n"); 11082 goto err_free; 11083 } 11084 if (i && !scalar_return && env->subprog_info[i].has_tail_call) { 11085 verbose(env, "tail_call is only allowed in functions that return 'int'.\n"); 11086 goto err_free; 11087 } 11088 11089 prev_offset = krecord[i].insn_off; 11090 bpfptr_add(&urecord, urec_size); 11091 } 11092 11093 prog->aux->func_info = krecord; 11094 prog->aux->func_info_cnt = nfuncs; 11095 prog->aux->func_info_aux = info_aux; 11096 return 0; 11097 11098 err_free: 11099 kvfree(krecord); 11100 kfree(info_aux); 11101 return ret; 11102 } 11103 11104 static void adjust_btf_func(struct bpf_verifier_env *env) 11105 { 11106 struct bpf_prog_aux *aux = env->prog->aux; 11107 int i; 11108 11109 if (!aux->func_info) 11110 return; 11111 11112 for (i = 0; i < env->subprog_cnt; i++) 11113 aux->func_info[i].insn_off = env->subprog_info[i].start; 11114 } 11115 11116 #define MIN_BPF_LINEINFO_SIZE offsetofend(struct bpf_line_info, line_col) 11117 #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE 11118 11119 static int check_btf_line(struct bpf_verifier_env *env, 11120 const union bpf_attr *attr, 11121 bpfptr_t uattr) 11122 { 11123 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0; 11124 struct bpf_subprog_info *sub; 11125 struct bpf_line_info *linfo; 11126 struct bpf_prog *prog; 11127 const struct btf *btf; 11128 bpfptr_t ulinfo; 11129 int err; 11130 11131 nr_linfo = attr->line_info_cnt; 11132 if (!nr_linfo) 11133 return 0; 11134 if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info)) 11135 return -EINVAL; 11136 11137 rec_size = attr->line_info_rec_size; 11138 if (rec_size < MIN_BPF_LINEINFO_SIZE || 11139 rec_size > MAX_LINEINFO_REC_SIZE || 11140 rec_size & (sizeof(u32) - 1)) 11141 return -EINVAL; 11142 11143 /* Need to zero it in case the userspace may 11144 * pass in a smaller bpf_line_info object. 11145 */ 11146 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info), 11147 GFP_KERNEL | __GFP_NOWARN); 11148 if (!linfo) 11149 return -ENOMEM; 11150 11151 prog = env->prog; 11152 btf = prog->aux->btf; 11153 11154 s = 0; 11155 sub = env->subprog_info; 11156 ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel); 11157 expected_size = sizeof(struct bpf_line_info); 11158 ncopy = min_t(u32, expected_size, rec_size); 11159 for (i = 0; i < nr_linfo; i++) { 11160 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size); 11161 if (err) { 11162 if (err == -E2BIG) { 11163 verbose(env, "nonzero tailing record in line_info"); 11164 if (copy_to_bpfptr_offset(uattr, 11165 offsetof(union bpf_attr, line_info_rec_size), 11166 &expected_size, sizeof(expected_size))) 11167 err = -EFAULT; 11168 } 11169 goto err_free; 11170 } 11171 11172 if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) { 11173 err = -EFAULT; 11174 goto err_free; 11175 } 11176 11177 /* 11178 * Check insn_off to ensure 11179 * 1) strictly increasing AND 11180 * 2) bounded by prog->len 11181 * 11182 * The linfo[0].insn_off == 0 check logically falls into 11183 * the later "missing bpf_line_info for func..." case 11184 * because the first linfo[0].insn_off must be the 11185 * first sub also and the first sub must have 11186 * subprog_info[0].start == 0. 11187 */ 11188 if ((i && linfo[i].insn_off <= prev_offset) || 11189 linfo[i].insn_off >= prog->len) { 11190 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", 11191 i, linfo[i].insn_off, prev_offset, 11192 prog->len); 11193 err = -EINVAL; 11194 goto err_free; 11195 } 11196 11197 if (!prog->insnsi[linfo[i].insn_off].code) { 11198 verbose(env, 11199 "Invalid insn code at line_info[%u].insn_off\n", 11200 i); 11201 err = -EINVAL; 11202 goto err_free; 11203 } 11204 11205 if (!btf_name_by_offset(btf, linfo[i].line_off) || 11206 !btf_name_by_offset(btf, linfo[i].file_name_off)) { 11207 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); 11208 err = -EINVAL; 11209 goto err_free; 11210 } 11211 11212 if (s != env->subprog_cnt) { 11213 if (linfo[i].insn_off == sub[s].start) { 11214 sub[s].linfo_idx = i; 11215 s++; 11216 } else if (sub[s].start < linfo[i].insn_off) { 11217 verbose(env, "missing bpf_line_info for func#%u\n", s); 11218 err = -EINVAL; 11219 goto err_free; 11220 } 11221 } 11222 11223 prev_offset = linfo[i].insn_off; 11224 bpfptr_add(&ulinfo, rec_size); 11225 } 11226 11227 if (s != env->subprog_cnt) { 11228 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n", 11229 env->subprog_cnt - s, s); 11230 err = -EINVAL; 11231 goto err_free; 11232 } 11233 11234 prog->aux->linfo = linfo; 11235 prog->aux->nr_linfo = nr_linfo; 11236 11237 return 0; 11238 11239 err_free: 11240 kvfree(linfo); 11241 return err; 11242 } 11243 11244 #define MIN_CORE_RELO_SIZE sizeof(struct bpf_core_relo) 11245 #define MAX_CORE_RELO_SIZE MAX_FUNCINFO_REC_SIZE 11246 11247 static int check_core_relo(struct bpf_verifier_env *env, 11248 const union bpf_attr *attr, 11249 bpfptr_t uattr) 11250 { 11251 u32 i, nr_core_relo, ncopy, expected_size, rec_size; 11252 struct bpf_core_relo core_relo = {}; 11253 struct bpf_prog *prog = env->prog; 11254 const struct btf *btf = prog->aux->btf; 11255 struct bpf_core_ctx ctx = { 11256 .log = &env->log, 11257 .btf = btf, 11258 }; 11259 bpfptr_t u_core_relo; 11260 int err; 11261 11262 nr_core_relo = attr->core_relo_cnt; 11263 if (!nr_core_relo) 11264 return 0; 11265 if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo)) 11266 return -EINVAL; 11267 11268 rec_size = attr->core_relo_rec_size; 11269 if (rec_size < MIN_CORE_RELO_SIZE || 11270 rec_size > MAX_CORE_RELO_SIZE || 11271 rec_size % sizeof(u32)) 11272 return -EINVAL; 11273 11274 u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel); 11275 expected_size = sizeof(struct bpf_core_relo); 11276 ncopy = min_t(u32, expected_size, rec_size); 11277 11278 /* Unlike func_info and line_info, copy and apply each CO-RE 11279 * relocation record one at a time. 11280 */ 11281 for (i = 0; i < nr_core_relo; i++) { 11282 /* future proofing when sizeof(bpf_core_relo) changes */ 11283 err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size); 11284 if (err) { 11285 if (err == -E2BIG) { 11286 verbose(env, "nonzero tailing record in core_relo"); 11287 if (copy_to_bpfptr_offset(uattr, 11288 offsetof(union bpf_attr, core_relo_rec_size), 11289 &expected_size, sizeof(expected_size))) 11290 err = -EFAULT; 11291 } 11292 break; 11293 } 11294 11295 if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) { 11296 err = -EFAULT; 11297 break; 11298 } 11299 11300 if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) { 11301 verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n", 11302 i, core_relo.insn_off, prog->len); 11303 err = -EINVAL; 11304 break; 11305 } 11306 11307 err = bpf_core_apply(&ctx, &core_relo, i, 11308 &prog->insnsi[core_relo.insn_off / 8]); 11309 if (err) 11310 break; 11311 bpfptr_add(&u_core_relo, rec_size); 11312 } 11313 return err; 11314 } 11315 11316 static int check_btf_info(struct bpf_verifier_env *env, 11317 const union bpf_attr *attr, 11318 bpfptr_t uattr) 11319 { 11320 struct btf *btf; 11321 int err; 11322 11323 if (!attr->func_info_cnt && !attr->line_info_cnt) { 11324 if (check_abnormal_return(env)) 11325 return -EINVAL; 11326 return 0; 11327 } 11328 11329 btf = btf_get_by_fd(attr->prog_btf_fd); 11330 if (IS_ERR(btf)) 11331 return PTR_ERR(btf); 11332 if (btf_is_kernel(btf)) { 11333 btf_put(btf); 11334 return -EACCES; 11335 } 11336 env->prog->aux->btf = btf; 11337 11338 err = check_btf_func(env, attr, uattr); 11339 if (err) 11340 return err; 11341 11342 err = check_btf_line(env, attr, uattr); 11343 if (err) 11344 return err; 11345 11346 err = check_core_relo(env, attr, uattr); 11347 if (err) 11348 return err; 11349 11350 return 0; 11351 } 11352 11353 /* check %cur's range satisfies %old's */ 11354 static bool range_within(struct bpf_reg_state *old, 11355 struct bpf_reg_state *cur) 11356 { 11357 return old->umin_value <= cur->umin_value && 11358 old->umax_value >= cur->umax_value && 11359 old->smin_value <= cur->smin_value && 11360 old->smax_value >= cur->smax_value && 11361 old->u32_min_value <= cur->u32_min_value && 11362 old->u32_max_value >= cur->u32_max_value && 11363 old->s32_min_value <= cur->s32_min_value && 11364 old->s32_max_value >= cur->s32_max_value; 11365 } 11366 11367 /* If in the old state two registers had the same id, then they need to have 11368 * the same id in the new state as well. But that id could be different from 11369 * the old state, so we need to track the mapping from old to new ids. 11370 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent 11371 * regs with old id 5 must also have new id 9 for the new state to be safe. But 11372 * regs with a different old id could still have new id 9, we don't care about 11373 * that. 11374 * So we look through our idmap to see if this old id has been seen before. If 11375 * so, we require the new id to match; otherwise, we add the id pair to the map. 11376 */ 11377 static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap) 11378 { 11379 unsigned int i; 11380 11381 for (i = 0; i < BPF_ID_MAP_SIZE; i++) { 11382 if (!idmap[i].old) { 11383 /* Reached an empty slot; haven't seen this id before */ 11384 idmap[i].old = old_id; 11385 idmap[i].cur = cur_id; 11386 return true; 11387 } 11388 if (idmap[i].old == old_id) 11389 return idmap[i].cur == cur_id; 11390 } 11391 /* We ran out of idmap slots, which should be impossible */ 11392 WARN_ON_ONCE(1); 11393 return false; 11394 } 11395 11396 static void clean_func_state(struct bpf_verifier_env *env, 11397 struct bpf_func_state *st) 11398 { 11399 enum bpf_reg_liveness live; 11400 int i, j; 11401 11402 for (i = 0; i < BPF_REG_FP; i++) { 11403 live = st->regs[i].live; 11404 /* liveness must not touch this register anymore */ 11405 st->regs[i].live |= REG_LIVE_DONE; 11406 if (!(live & REG_LIVE_READ)) 11407 /* since the register is unused, clear its state 11408 * to make further comparison simpler 11409 */ 11410 __mark_reg_not_init(env, &st->regs[i]); 11411 } 11412 11413 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { 11414 live = st->stack[i].spilled_ptr.live; 11415 /* liveness must not touch this stack slot anymore */ 11416 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; 11417 if (!(live & REG_LIVE_READ)) { 11418 __mark_reg_not_init(env, &st->stack[i].spilled_ptr); 11419 for (j = 0; j < BPF_REG_SIZE; j++) 11420 st->stack[i].slot_type[j] = STACK_INVALID; 11421 } 11422 } 11423 } 11424 11425 static void clean_verifier_state(struct bpf_verifier_env *env, 11426 struct bpf_verifier_state *st) 11427 { 11428 int i; 11429 11430 if (st->frame[0]->regs[0].live & REG_LIVE_DONE) 11431 /* all regs in this state in all frames were already marked */ 11432 return; 11433 11434 for (i = 0; i <= st->curframe; i++) 11435 clean_func_state(env, st->frame[i]); 11436 } 11437 11438 /* the parentage chains form a tree. 11439 * the verifier states are added to state lists at given insn and 11440 * pushed into state stack for future exploration. 11441 * when the verifier reaches bpf_exit insn some of the verifer states 11442 * stored in the state lists have their final liveness state already, 11443 * but a lot of states will get revised from liveness point of view when 11444 * the verifier explores other branches. 11445 * Example: 11446 * 1: r0 = 1 11447 * 2: if r1 == 100 goto pc+1 11448 * 3: r0 = 2 11449 * 4: exit 11450 * when the verifier reaches exit insn the register r0 in the state list of 11451 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch 11452 * of insn 2 and goes exploring further. At the insn 4 it will walk the 11453 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ. 11454 * 11455 * Since the verifier pushes the branch states as it sees them while exploring 11456 * the program the condition of walking the branch instruction for the second 11457 * time means that all states below this branch were already explored and 11458 * their final liveness marks are already propagated. 11459 * Hence when the verifier completes the search of state list in is_state_visited() 11460 * we can call this clean_live_states() function to mark all liveness states 11461 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state' 11462 * will not be used. 11463 * This function also clears the registers and stack for states that !READ 11464 * to simplify state merging. 11465 * 11466 * Important note here that walking the same branch instruction in the callee 11467 * doesn't meant that the states are DONE. The verifier has to compare 11468 * the callsites 11469 */ 11470 static void clean_live_states(struct bpf_verifier_env *env, int insn, 11471 struct bpf_verifier_state *cur) 11472 { 11473 struct bpf_verifier_state_list *sl; 11474 int i; 11475 11476 sl = *explored_state(env, insn); 11477 while (sl) { 11478 if (sl->state.branches) 11479 goto next; 11480 if (sl->state.insn_idx != insn || 11481 sl->state.curframe != cur->curframe) 11482 goto next; 11483 for (i = 0; i <= cur->curframe; i++) 11484 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) 11485 goto next; 11486 clean_verifier_state(env, &sl->state); 11487 next: 11488 sl = sl->next; 11489 } 11490 } 11491 11492 /* Returns true if (rold safe implies rcur safe) */ 11493 static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, 11494 struct bpf_reg_state *rcur, struct bpf_id_pair *idmap) 11495 { 11496 bool equal; 11497 11498 if (!(rold->live & REG_LIVE_READ)) 11499 /* explored state didn't use this */ 11500 return true; 11501 11502 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0; 11503 11504 if (rold->type == PTR_TO_STACK) 11505 /* two stack pointers are equal only if they're pointing to 11506 * the same stack frame, since fp-8 in foo != fp-8 in bar 11507 */ 11508 return equal && rold->frameno == rcur->frameno; 11509 11510 if (equal) 11511 return true; 11512 11513 if (rold->type == NOT_INIT) 11514 /* explored state can't have used this */ 11515 return true; 11516 if (rcur->type == NOT_INIT) 11517 return false; 11518 switch (base_type(rold->type)) { 11519 case SCALAR_VALUE: 11520 if (env->explore_alu_limits) 11521 return false; 11522 if (rcur->type == SCALAR_VALUE) { 11523 if (!rold->precise && !rcur->precise) 11524 return true; 11525 /* new val must satisfy old val knowledge */ 11526 return range_within(rold, rcur) && 11527 tnum_in(rold->var_off, rcur->var_off); 11528 } else { 11529 /* We're trying to use a pointer in place of a scalar. 11530 * Even if the scalar was unbounded, this could lead to 11531 * pointer leaks because scalars are allowed to leak 11532 * while pointers are not. We could make this safe in 11533 * special cases if root is calling us, but it's 11534 * probably not worth the hassle. 11535 */ 11536 return false; 11537 } 11538 case PTR_TO_MAP_KEY: 11539 case PTR_TO_MAP_VALUE: 11540 /* a PTR_TO_MAP_VALUE could be safe to use as a 11541 * PTR_TO_MAP_VALUE_OR_NULL into the same map. 11542 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- 11543 * checked, doing so could have affected others with the same 11544 * id, and we can't check for that because we lost the id when 11545 * we converted to a PTR_TO_MAP_VALUE. 11546 */ 11547 if (type_may_be_null(rold->type)) { 11548 if (!type_may_be_null(rcur->type)) 11549 return false; 11550 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) 11551 return false; 11552 /* Check our ids match any regs they're supposed to */ 11553 return check_ids(rold->id, rcur->id, idmap); 11554 } 11555 11556 /* If the new min/max/var_off satisfy the old ones and 11557 * everything else matches, we are OK. 11558 * 'id' is not compared, since it's only used for maps with 11559 * bpf_spin_lock inside map element and in such cases if 11560 * the rest of the prog is valid for one map element then 11561 * it's valid for all map elements regardless of the key 11562 * used in bpf_map_lookup() 11563 */ 11564 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 11565 range_within(rold, rcur) && 11566 tnum_in(rold->var_off, rcur->var_off); 11567 case PTR_TO_PACKET_META: 11568 case PTR_TO_PACKET: 11569 if (rcur->type != rold->type) 11570 return false; 11571 /* We must have at least as much range as the old ptr 11572 * did, so that any accesses which were safe before are 11573 * still safe. This is true even if old range < old off, 11574 * since someone could have accessed through (ptr - k), or 11575 * even done ptr -= k in a register, to get a safe access. 11576 */ 11577 if (rold->range > rcur->range) 11578 return false; 11579 /* If the offsets don't match, we can't trust our alignment; 11580 * nor can we be sure that we won't fall out of range. 11581 */ 11582 if (rold->off != rcur->off) 11583 return false; 11584 /* id relations must be preserved */ 11585 if (rold->id && !check_ids(rold->id, rcur->id, idmap)) 11586 return false; 11587 /* new val must satisfy old val knowledge */ 11588 return range_within(rold, rcur) && 11589 tnum_in(rold->var_off, rcur->var_off); 11590 case PTR_TO_CTX: 11591 case CONST_PTR_TO_MAP: 11592 case PTR_TO_PACKET_END: 11593 case PTR_TO_FLOW_KEYS: 11594 case PTR_TO_SOCKET: 11595 case PTR_TO_SOCK_COMMON: 11596 case PTR_TO_TCP_SOCK: 11597 case PTR_TO_XDP_SOCK: 11598 /* Only valid matches are exact, which memcmp() above 11599 * would have accepted 11600 */ 11601 default: 11602 /* Don't know what's going on, just say it's not safe */ 11603 return false; 11604 } 11605 11606 /* Shouldn't get here; if we do, say it's not safe */ 11607 WARN_ON_ONCE(1); 11608 return false; 11609 } 11610 11611 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, 11612 struct bpf_func_state *cur, struct bpf_id_pair *idmap) 11613 { 11614 int i, spi; 11615 11616 /* walk slots of the explored stack and ignore any additional 11617 * slots in the current stack, since explored(safe) state 11618 * didn't use them 11619 */ 11620 for (i = 0; i < old->allocated_stack; i++) { 11621 spi = i / BPF_REG_SIZE; 11622 11623 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) { 11624 i += BPF_REG_SIZE - 1; 11625 /* explored state didn't use this */ 11626 continue; 11627 } 11628 11629 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) 11630 continue; 11631 11632 /* explored stack has more populated slots than current stack 11633 * and these slots were used 11634 */ 11635 if (i >= cur->allocated_stack) 11636 return false; 11637 11638 /* if old state was safe with misc data in the stack 11639 * it will be safe with zero-initialized stack. 11640 * The opposite is not true 11641 */ 11642 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && 11643 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) 11644 continue; 11645 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != 11646 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) 11647 /* Ex: old explored (safe) state has STACK_SPILL in 11648 * this stack slot, but current has STACK_MISC -> 11649 * this verifier states are not equivalent, 11650 * return false to continue verification of this path 11651 */ 11652 return false; 11653 if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) 11654 continue; 11655 if (!is_spilled_reg(&old->stack[spi])) 11656 continue; 11657 if (!regsafe(env, &old->stack[spi].spilled_ptr, 11658 &cur->stack[spi].spilled_ptr, idmap)) 11659 /* when explored and current stack slot are both storing 11660 * spilled registers, check that stored pointers types 11661 * are the same as well. 11662 * Ex: explored safe path could have stored 11663 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} 11664 * but current path has stored: 11665 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} 11666 * such verifier states are not equivalent. 11667 * return false to continue verification of this path 11668 */ 11669 return false; 11670 } 11671 return true; 11672 } 11673 11674 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur) 11675 { 11676 if (old->acquired_refs != cur->acquired_refs) 11677 return false; 11678 return !memcmp(old->refs, cur->refs, 11679 sizeof(*old->refs) * old->acquired_refs); 11680 } 11681 11682 /* compare two verifier states 11683 * 11684 * all states stored in state_list are known to be valid, since 11685 * verifier reached 'bpf_exit' instruction through them 11686 * 11687 * this function is called when verifier exploring different branches of 11688 * execution popped from the state stack. If it sees an old state that has 11689 * more strict register state and more strict stack state then this execution 11690 * branch doesn't need to be explored further, since verifier already 11691 * concluded that more strict state leads to valid finish. 11692 * 11693 * Therefore two states are equivalent if register state is more conservative 11694 * and explored stack state is more conservative than the current one. 11695 * Example: 11696 * explored current 11697 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 11698 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 11699 * 11700 * In other words if current stack state (one being explored) has more 11701 * valid slots than old one that already passed validation, it means 11702 * the verifier can stop exploring and conclude that current state is valid too 11703 * 11704 * Similarly with registers. If explored state has register type as invalid 11705 * whereas register type in current state is meaningful, it means that 11706 * the current state will reach 'bpf_exit' instruction safely 11707 */ 11708 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old, 11709 struct bpf_func_state *cur) 11710 { 11711 int i; 11712 11713 memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch)); 11714 for (i = 0; i < MAX_BPF_REG; i++) 11715 if (!regsafe(env, &old->regs[i], &cur->regs[i], 11716 env->idmap_scratch)) 11717 return false; 11718 11719 if (!stacksafe(env, old, cur, env->idmap_scratch)) 11720 return false; 11721 11722 if (!refsafe(old, cur)) 11723 return false; 11724 11725 return true; 11726 } 11727 11728 static bool states_equal(struct bpf_verifier_env *env, 11729 struct bpf_verifier_state *old, 11730 struct bpf_verifier_state *cur) 11731 { 11732 int i; 11733 11734 if (old->curframe != cur->curframe) 11735 return false; 11736 11737 /* Verification state from speculative execution simulation 11738 * must never prune a non-speculative execution one. 11739 */ 11740 if (old->speculative && !cur->speculative) 11741 return false; 11742 11743 if (old->active_spin_lock != cur->active_spin_lock) 11744 return false; 11745 11746 /* for states to be equal callsites have to be the same 11747 * and all frame states need to be equivalent 11748 */ 11749 for (i = 0; i <= old->curframe; i++) { 11750 if (old->frame[i]->callsite != cur->frame[i]->callsite) 11751 return false; 11752 if (!func_states_equal(env, old->frame[i], cur->frame[i])) 11753 return false; 11754 } 11755 return true; 11756 } 11757 11758 /* Return 0 if no propagation happened. Return negative error code if error 11759 * happened. Otherwise, return the propagated bit. 11760 */ 11761 static int propagate_liveness_reg(struct bpf_verifier_env *env, 11762 struct bpf_reg_state *reg, 11763 struct bpf_reg_state *parent_reg) 11764 { 11765 u8 parent_flag = parent_reg->live & REG_LIVE_READ; 11766 u8 flag = reg->live & REG_LIVE_READ; 11767 int err; 11768 11769 /* When comes here, read flags of PARENT_REG or REG could be any of 11770 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need 11771 * of propagation if PARENT_REG has strongest REG_LIVE_READ64. 11772 */ 11773 if (parent_flag == REG_LIVE_READ64 || 11774 /* Or if there is no read flag from REG. */ 11775 !flag || 11776 /* Or if the read flag from REG is the same as PARENT_REG. */ 11777 parent_flag == flag) 11778 return 0; 11779 11780 err = mark_reg_read(env, reg, parent_reg, flag); 11781 if (err) 11782 return err; 11783 11784 return flag; 11785 } 11786 11787 /* A write screens off any subsequent reads; but write marks come from the 11788 * straight-line code between a state and its parent. When we arrive at an 11789 * equivalent state (jump target or such) we didn't arrive by the straight-line 11790 * code, so read marks in the state must propagate to the parent regardless 11791 * of the state's write marks. That's what 'parent == state->parent' comparison 11792 * in mark_reg_read() is for. 11793 */ 11794 static int propagate_liveness(struct bpf_verifier_env *env, 11795 const struct bpf_verifier_state *vstate, 11796 struct bpf_verifier_state *vparent) 11797 { 11798 struct bpf_reg_state *state_reg, *parent_reg; 11799 struct bpf_func_state *state, *parent; 11800 int i, frame, err = 0; 11801 11802 if (vparent->curframe != vstate->curframe) { 11803 WARN(1, "propagate_live: parent frame %d current frame %d\n", 11804 vparent->curframe, vstate->curframe); 11805 return -EFAULT; 11806 } 11807 /* Propagate read liveness of registers... */ 11808 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 11809 for (frame = 0; frame <= vstate->curframe; frame++) { 11810 parent = vparent->frame[frame]; 11811 state = vstate->frame[frame]; 11812 parent_reg = parent->regs; 11813 state_reg = state->regs; 11814 /* We don't need to worry about FP liveness, it's read-only */ 11815 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { 11816 err = propagate_liveness_reg(env, &state_reg[i], 11817 &parent_reg[i]); 11818 if (err < 0) 11819 return err; 11820 if (err == REG_LIVE_READ64) 11821 mark_insn_zext(env, &parent_reg[i]); 11822 } 11823 11824 /* Propagate stack slots. */ 11825 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && 11826 i < parent->allocated_stack / BPF_REG_SIZE; i++) { 11827 parent_reg = &parent->stack[i].spilled_ptr; 11828 state_reg = &state->stack[i].spilled_ptr; 11829 err = propagate_liveness_reg(env, state_reg, 11830 parent_reg); 11831 if (err < 0) 11832 return err; 11833 } 11834 } 11835 return 0; 11836 } 11837 11838 /* find precise scalars in the previous equivalent state and 11839 * propagate them into the current state 11840 */ 11841 static int propagate_precision(struct bpf_verifier_env *env, 11842 const struct bpf_verifier_state *old) 11843 { 11844 struct bpf_reg_state *state_reg; 11845 struct bpf_func_state *state; 11846 int i, err = 0; 11847 11848 state = old->frame[old->curframe]; 11849 state_reg = state->regs; 11850 for (i = 0; i < BPF_REG_FP; i++, state_reg++) { 11851 if (state_reg->type != SCALAR_VALUE || 11852 !state_reg->precise) 11853 continue; 11854 if (env->log.level & BPF_LOG_LEVEL2) 11855 verbose(env, "propagating r%d\n", i); 11856 err = mark_chain_precision(env, i); 11857 if (err < 0) 11858 return err; 11859 } 11860 11861 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 11862 if (!is_spilled_reg(&state->stack[i])) 11863 continue; 11864 state_reg = &state->stack[i].spilled_ptr; 11865 if (state_reg->type != SCALAR_VALUE || 11866 !state_reg->precise) 11867 continue; 11868 if (env->log.level & BPF_LOG_LEVEL2) 11869 verbose(env, "propagating fp%d\n", 11870 (-i - 1) * BPF_REG_SIZE); 11871 err = mark_chain_precision_stack(env, i); 11872 if (err < 0) 11873 return err; 11874 } 11875 return 0; 11876 } 11877 11878 static bool states_maybe_looping(struct bpf_verifier_state *old, 11879 struct bpf_verifier_state *cur) 11880 { 11881 struct bpf_func_state *fold, *fcur; 11882 int i, fr = cur->curframe; 11883 11884 if (old->curframe != fr) 11885 return false; 11886 11887 fold = old->frame[fr]; 11888 fcur = cur->frame[fr]; 11889 for (i = 0; i < MAX_BPF_REG; i++) 11890 if (memcmp(&fold->regs[i], &fcur->regs[i], 11891 offsetof(struct bpf_reg_state, parent))) 11892 return false; 11893 return true; 11894 } 11895 11896 11897 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) 11898 { 11899 struct bpf_verifier_state_list *new_sl; 11900 struct bpf_verifier_state_list *sl, **pprev; 11901 struct bpf_verifier_state *cur = env->cur_state, *new; 11902 int i, j, err, states_cnt = 0; 11903 bool add_new_state = env->test_state_freq ? true : false; 11904 11905 cur->last_insn_idx = env->prev_insn_idx; 11906 if (!env->insn_aux_data[insn_idx].prune_point) 11907 /* this 'insn_idx' instruction wasn't marked, so we will not 11908 * be doing state search here 11909 */ 11910 return 0; 11911 11912 /* bpf progs typically have pruning point every 4 instructions 11913 * http://vger.kernel.org/bpfconf2019.html#session-1 11914 * Do not add new state for future pruning if the verifier hasn't seen 11915 * at least 2 jumps and at least 8 instructions. 11916 * This heuristics helps decrease 'total_states' and 'peak_states' metric. 11917 * In tests that amounts to up to 50% reduction into total verifier 11918 * memory consumption and 20% verifier time speedup. 11919 */ 11920 if (env->jmps_processed - env->prev_jmps_processed >= 2 && 11921 env->insn_processed - env->prev_insn_processed >= 8) 11922 add_new_state = true; 11923 11924 pprev = explored_state(env, insn_idx); 11925 sl = *pprev; 11926 11927 clean_live_states(env, insn_idx, cur); 11928 11929 while (sl) { 11930 states_cnt++; 11931 if (sl->state.insn_idx != insn_idx) 11932 goto next; 11933 11934 if (sl->state.branches) { 11935 struct bpf_func_state *frame = sl->state.frame[sl->state.curframe]; 11936 11937 if (frame->in_async_callback_fn && 11938 frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) { 11939 /* Different async_entry_cnt means that the verifier is 11940 * processing another entry into async callback. 11941 * Seeing the same state is not an indication of infinite 11942 * loop or infinite recursion. 11943 * But finding the same state doesn't mean that it's safe 11944 * to stop processing the current state. The previous state 11945 * hasn't yet reached bpf_exit, since state.branches > 0. 11946 * Checking in_async_callback_fn alone is not enough either. 11947 * Since the verifier still needs to catch infinite loops 11948 * inside async callbacks. 11949 */ 11950 } else if (states_maybe_looping(&sl->state, cur) && 11951 states_equal(env, &sl->state, cur)) { 11952 verbose_linfo(env, insn_idx, "; "); 11953 verbose(env, "infinite loop detected at insn %d\n", insn_idx); 11954 return -EINVAL; 11955 } 11956 /* if the verifier is processing a loop, avoid adding new state 11957 * too often, since different loop iterations have distinct 11958 * states and may not help future pruning. 11959 * This threshold shouldn't be too low to make sure that 11960 * a loop with large bound will be rejected quickly. 11961 * The most abusive loop will be: 11962 * r1 += 1 11963 * if r1 < 1000000 goto pc-2 11964 * 1M insn_procssed limit / 100 == 10k peak states. 11965 * This threshold shouldn't be too high either, since states 11966 * at the end of the loop are likely to be useful in pruning. 11967 */ 11968 if (env->jmps_processed - env->prev_jmps_processed < 20 && 11969 env->insn_processed - env->prev_insn_processed < 100) 11970 add_new_state = false; 11971 goto miss; 11972 } 11973 if (states_equal(env, &sl->state, cur)) { 11974 sl->hit_cnt++; 11975 /* reached equivalent register/stack state, 11976 * prune the search. 11977 * Registers read by the continuation are read by us. 11978 * If we have any write marks in env->cur_state, they 11979 * will prevent corresponding reads in the continuation 11980 * from reaching our parent (an explored_state). Our 11981 * own state will get the read marks recorded, but 11982 * they'll be immediately forgotten as we're pruning 11983 * this state and will pop a new one. 11984 */ 11985 err = propagate_liveness(env, &sl->state, cur); 11986 11987 /* if previous state reached the exit with precision and 11988 * current state is equivalent to it (except precsion marks) 11989 * the precision needs to be propagated back in 11990 * the current state. 11991 */ 11992 err = err ? : push_jmp_history(env, cur); 11993 err = err ? : propagate_precision(env, &sl->state); 11994 if (err) 11995 return err; 11996 return 1; 11997 } 11998 miss: 11999 /* when new state is not going to be added do not increase miss count. 12000 * Otherwise several loop iterations will remove the state 12001 * recorded earlier. The goal of these heuristics is to have 12002 * states from some iterations of the loop (some in the beginning 12003 * and some at the end) to help pruning. 12004 */ 12005 if (add_new_state) 12006 sl->miss_cnt++; 12007 /* heuristic to determine whether this state is beneficial 12008 * to keep checking from state equivalence point of view. 12009 * Higher numbers increase max_states_per_insn and verification time, 12010 * but do not meaningfully decrease insn_processed. 12011 */ 12012 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) { 12013 /* the state is unlikely to be useful. Remove it to 12014 * speed up verification 12015 */ 12016 *pprev = sl->next; 12017 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) { 12018 u32 br = sl->state.branches; 12019 12020 WARN_ONCE(br, 12021 "BUG live_done but branches_to_explore %d\n", 12022 br); 12023 free_verifier_state(&sl->state, false); 12024 kfree(sl); 12025 env->peak_states--; 12026 } else { 12027 /* cannot free this state, since parentage chain may 12028 * walk it later. Add it for free_list instead to 12029 * be freed at the end of verification 12030 */ 12031 sl->next = env->free_list; 12032 env->free_list = sl; 12033 } 12034 sl = *pprev; 12035 continue; 12036 } 12037 next: 12038 pprev = &sl->next; 12039 sl = *pprev; 12040 } 12041 12042 if (env->max_states_per_insn < states_cnt) 12043 env->max_states_per_insn = states_cnt; 12044 12045 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) 12046 return push_jmp_history(env, cur); 12047 12048 if (!add_new_state) 12049 return push_jmp_history(env, cur); 12050 12051 /* There were no equivalent states, remember the current one. 12052 * Technically the current state is not proven to be safe yet, 12053 * but it will either reach outer most bpf_exit (which means it's safe) 12054 * or it will be rejected. When there are no loops the verifier won't be 12055 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) 12056 * again on the way to bpf_exit. 12057 * When looping the sl->state.branches will be > 0 and this state 12058 * will not be considered for equivalence until branches == 0. 12059 */ 12060 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); 12061 if (!new_sl) 12062 return -ENOMEM; 12063 env->total_states++; 12064 env->peak_states++; 12065 env->prev_jmps_processed = env->jmps_processed; 12066 env->prev_insn_processed = env->insn_processed; 12067 12068 /* add new state to the head of linked list */ 12069 new = &new_sl->state; 12070 err = copy_verifier_state(new, cur); 12071 if (err) { 12072 free_verifier_state(new, false); 12073 kfree(new_sl); 12074 return err; 12075 } 12076 new->insn_idx = insn_idx; 12077 WARN_ONCE(new->branches != 1, 12078 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); 12079 12080 cur->parent = new; 12081 cur->first_insn_idx = insn_idx; 12082 clear_jmp_history(cur); 12083 new_sl->next = *explored_state(env, insn_idx); 12084 *explored_state(env, insn_idx) = new_sl; 12085 /* connect new state to parentage chain. Current frame needs all 12086 * registers connected. Only r6 - r9 of the callers are alive (pushed 12087 * to the stack implicitly by JITs) so in callers' frames connect just 12088 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to 12089 * the state of the call instruction (with WRITTEN set), and r0 comes 12090 * from callee with its full parentage chain, anyway. 12091 */ 12092 /* clear write marks in current state: the writes we did are not writes 12093 * our child did, so they don't screen off its reads from us. 12094 * (There are no read marks in current state, because reads always mark 12095 * their parent and current state never has children yet. Only 12096 * explored_states can get read marks.) 12097 */ 12098 for (j = 0; j <= cur->curframe; j++) { 12099 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) 12100 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; 12101 for (i = 0; i < BPF_REG_FP; i++) 12102 cur->frame[j]->regs[i].live = REG_LIVE_NONE; 12103 } 12104 12105 /* all stack frames are accessible from callee, clear them all */ 12106 for (j = 0; j <= cur->curframe; j++) { 12107 struct bpf_func_state *frame = cur->frame[j]; 12108 struct bpf_func_state *newframe = new->frame[j]; 12109 12110 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { 12111 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; 12112 frame->stack[i].spilled_ptr.parent = 12113 &newframe->stack[i].spilled_ptr; 12114 } 12115 } 12116 return 0; 12117 } 12118 12119 /* Return true if it's OK to have the same insn return a different type. */ 12120 static bool reg_type_mismatch_ok(enum bpf_reg_type type) 12121 { 12122 switch (base_type(type)) { 12123 case PTR_TO_CTX: 12124 case PTR_TO_SOCKET: 12125 case PTR_TO_SOCK_COMMON: 12126 case PTR_TO_TCP_SOCK: 12127 case PTR_TO_XDP_SOCK: 12128 case PTR_TO_BTF_ID: 12129 return false; 12130 default: 12131 return true; 12132 } 12133 } 12134 12135 /* If an instruction was previously used with particular pointer types, then we 12136 * need to be careful to avoid cases such as the below, where it may be ok 12137 * for one branch accessing the pointer, but not ok for the other branch: 12138 * 12139 * R1 = sock_ptr 12140 * goto X; 12141 * ... 12142 * R1 = some_other_valid_ptr; 12143 * goto X; 12144 * ... 12145 * R2 = *(u32 *)(R1 + 0); 12146 */ 12147 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) 12148 { 12149 return src != prev && (!reg_type_mismatch_ok(src) || 12150 !reg_type_mismatch_ok(prev)); 12151 } 12152 12153 static int do_check(struct bpf_verifier_env *env) 12154 { 12155 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); 12156 struct bpf_verifier_state *state = env->cur_state; 12157 struct bpf_insn *insns = env->prog->insnsi; 12158 struct bpf_reg_state *regs; 12159 int insn_cnt = env->prog->len; 12160 bool do_print_state = false; 12161 int prev_insn_idx = -1; 12162 12163 for (;;) { 12164 struct bpf_insn *insn; 12165 u8 class; 12166 int err; 12167 12168 env->prev_insn_idx = prev_insn_idx; 12169 if (env->insn_idx >= insn_cnt) { 12170 verbose(env, "invalid insn idx %d insn_cnt %d\n", 12171 env->insn_idx, insn_cnt); 12172 return -EFAULT; 12173 } 12174 12175 insn = &insns[env->insn_idx]; 12176 class = BPF_CLASS(insn->code); 12177 12178 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 12179 verbose(env, 12180 "BPF program is too large. Processed %d insn\n", 12181 env->insn_processed); 12182 return -E2BIG; 12183 } 12184 12185 err = is_state_visited(env, env->insn_idx); 12186 if (err < 0) 12187 return err; 12188 if (err == 1) { 12189 /* found equivalent state, can prune the search */ 12190 if (env->log.level & BPF_LOG_LEVEL) { 12191 if (do_print_state) 12192 verbose(env, "\nfrom %d to %d%s: safe\n", 12193 env->prev_insn_idx, env->insn_idx, 12194 env->cur_state->speculative ? 12195 " (speculative execution)" : ""); 12196 else 12197 verbose(env, "%d: safe\n", env->insn_idx); 12198 } 12199 goto process_bpf_exit; 12200 } 12201 12202 if (signal_pending(current)) 12203 return -EAGAIN; 12204 12205 if (need_resched()) 12206 cond_resched(); 12207 12208 if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) { 12209 verbose(env, "\nfrom %d to %d%s:", 12210 env->prev_insn_idx, env->insn_idx, 12211 env->cur_state->speculative ? 12212 " (speculative execution)" : ""); 12213 print_verifier_state(env, state->frame[state->curframe], true); 12214 do_print_state = false; 12215 } 12216 12217 if (env->log.level & BPF_LOG_LEVEL) { 12218 const struct bpf_insn_cbs cbs = { 12219 .cb_call = disasm_kfunc_name, 12220 .cb_print = verbose, 12221 .private_data = env, 12222 }; 12223 12224 if (verifier_state_scratched(env)) 12225 print_insn_state(env, state->frame[state->curframe]); 12226 12227 verbose_linfo(env, env->insn_idx, "; "); 12228 env->prev_log_len = env->log.len_used; 12229 verbose(env, "%d: ", env->insn_idx); 12230 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 12231 env->prev_insn_print_len = env->log.len_used - env->prev_log_len; 12232 env->prev_log_len = env->log.len_used; 12233 } 12234 12235 if (bpf_prog_is_dev_bound(env->prog->aux)) { 12236 err = bpf_prog_offload_verify_insn(env, env->insn_idx, 12237 env->prev_insn_idx); 12238 if (err) 12239 return err; 12240 } 12241 12242 regs = cur_regs(env); 12243 sanitize_mark_insn_seen(env); 12244 prev_insn_idx = env->insn_idx; 12245 12246 if (class == BPF_ALU || class == BPF_ALU64) { 12247 err = check_alu_op(env, insn); 12248 if (err) 12249 return err; 12250 12251 } else if (class == BPF_LDX) { 12252 enum bpf_reg_type *prev_src_type, src_reg_type; 12253 12254 /* check for reserved fields is already done */ 12255 12256 /* check src operand */ 12257 err = check_reg_arg(env, insn->src_reg, SRC_OP); 12258 if (err) 12259 return err; 12260 12261 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 12262 if (err) 12263 return err; 12264 12265 src_reg_type = regs[insn->src_reg].type; 12266 12267 /* check that memory (src_reg + off) is readable, 12268 * the state of dst_reg will be updated by this func 12269 */ 12270 err = check_mem_access(env, env->insn_idx, insn->src_reg, 12271 insn->off, BPF_SIZE(insn->code), 12272 BPF_READ, insn->dst_reg, false); 12273 if (err) 12274 return err; 12275 12276 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type; 12277 12278 if (*prev_src_type == NOT_INIT) { 12279 /* saw a valid insn 12280 * dst_reg = *(u32 *)(src_reg + off) 12281 * save type to validate intersecting paths 12282 */ 12283 *prev_src_type = src_reg_type; 12284 12285 } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) { 12286 /* ABuser program is trying to use the same insn 12287 * dst_reg = *(u32*) (src_reg + off) 12288 * with different pointer types: 12289 * src_reg == ctx in one branch and 12290 * src_reg == stack|map in some other branch. 12291 * Reject it. 12292 */ 12293 verbose(env, "same insn cannot be used with different pointers\n"); 12294 return -EINVAL; 12295 } 12296 12297 } else if (class == BPF_STX) { 12298 enum bpf_reg_type *prev_dst_type, dst_reg_type; 12299 12300 if (BPF_MODE(insn->code) == BPF_ATOMIC) { 12301 err = check_atomic(env, env->insn_idx, insn); 12302 if (err) 12303 return err; 12304 env->insn_idx++; 12305 continue; 12306 } 12307 12308 if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) { 12309 verbose(env, "BPF_STX uses reserved fields\n"); 12310 return -EINVAL; 12311 } 12312 12313 /* check src1 operand */ 12314 err = check_reg_arg(env, insn->src_reg, SRC_OP); 12315 if (err) 12316 return err; 12317 /* check src2 operand */ 12318 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 12319 if (err) 12320 return err; 12321 12322 dst_reg_type = regs[insn->dst_reg].type; 12323 12324 /* check that memory (dst_reg + off) is writeable */ 12325 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 12326 insn->off, BPF_SIZE(insn->code), 12327 BPF_WRITE, insn->src_reg, false); 12328 if (err) 12329 return err; 12330 12331 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type; 12332 12333 if (*prev_dst_type == NOT_INIT) { 12334 *prev_dst_type = dst_reg_type; 12335 } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) { 12336 verbose(env, "same insn cannot be used with different pointers\n"); 12337 return -EINVAL; 12338 } 12339 12340 } else if (class == BPF_ST) { 12341 if (BPF_MODE(insn->code) != BPF_MEM || 12342 insn->src_reg != BPF_REG_0) { 12343 verbose(env, "BPF_ST uses reserved fields\n"); 12344 return -EINVAL; 12345 } 12346 /* check src operand */ 12347 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 12348 if (err) 12349 return err; 12350 12351 if (is_ctx_reg(env, insn->dst_reg)) { 12352 verbose(env, "BPF_ST stores into R%d %s is not allowed\n", 12353 insn->dst_reg, 12354 reg_type_str(env, reg_state(env, insn->dst_reg)->type)); 12355 return -EACCES; 12356 } 12357 12358 /* check that memory (dst_reg + off) is writeable */ 12359 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 12360 insn->off, BPF_SIZE(insn->code), 12361 BPF_WRITE, -1, false); 12362 if (err) 12363 return err; 12364 12365 } else if (class == BPF_JMP || class == BPF_JMP32) { 12366 u8 opcode = BPF_OP(insn->code); 12367 12368 env->jmps_processed++; 12369 if (opcode == BPF_CALL) { 12370 if (BPF_SRC(insn->code) != BPF_K || 12371 (insn->src_reg != BPF_PSEUDO_KFUNC_CALL 12372 && insn->off != 0) || 12373 (insn->src_reg != BPF_REG_0 && 12374 insn->src_reg != BPF_PSEUDO_CALL && 12375 insn->src_reg != BPF_PSEUDO_KFUNC_CALL) || 12376 insn->dst_reg != BPF_REG_0 || 12377 class == BPF_JMP32) { 12378 verbose(env, "BPF_CALL uses reserved fields\n"); 12379 return -EINVAL; 12380 } 12381 12382 if (env->cur_state->active_spin_lock && 12383 (insn->src_reg == BPF_PSEUDO_CALL || 12384 insn->imm != BPF_FUNC_spin_unlock)) { 12385 verbose(env, "function calls are not allowed while holding a lock\n"); 12386 return -EINVAL; 12387 } 12388 if (insn->src_reg == BPF_PSEUDO_CALL) 12389 err = check_func_call(env, insn, &env->insn_idx); 12390 else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) 12391 err = check_kfunc_call(env, insn, &env->insn_idx); 12392 else 12393 err = check_helper_call(env, insn, &env->insn_idx); 12394 if (err) 12395 return err; 12396 } else if (opcode == BPF_JA) { 12397 if (BPF_SRC(insn->code) != BPF_K || 12398 insn->imm != 0 || 12399 insn->src_reg != BPF_REG_0 || 12400 insn->dst_reg != BPF_REG_0 || 12401 class == BPF_JMP32) { 12402 verbose(env, "BPF_JA uses reserved fields\n"); 12403 return -EINVAL; 12404 } 12405 12406 env->insn_idx += insn->off + 1; 12407 continue; 12408 12409 } else if (opcode == BPF_EXIT) { 12410 if (BPF_SRC(insn->code) != BPF_K || 12411 insn->imm != 0 || 12412 insn->src_reg != BPF_REG_0 || 12413 insn->dst_reg != BPF_REG_0 || 12414 class == BPF_JMP32) { 12415 verbose(env, "BPF_EXIT uses reserved fields\n"); 12416 return -EINVAL; 12417 } 12418 12419 if (env->cur_state->active_spin_lock) { 12420 verbose(env, "bpf_spin_unlock is missing\n"); 12421 return -EINVAL; 12422 } 12423 12424 /* We must do check_reference_leak here before 12425 * prepare_func_exit to handle the case when 12426 * state->curframe > 0, it may be a callback 12427 * function, for which reference_state must 12428 * match caller reference state when it exits. 12429 */ 12430 err = check_reference_leak(env); 12431 if (err) 12432 return err; 12433 12434 if (state->curframe) { 12435 /* exit from nested function */ 12436 err = prepare_func_exit(env, &env->insn_idx); 12437 if (err) 12438 return err; 12439 do_print_state = true; 12440 continue; 12441 } 12442 12443 err = check_return_code(env); 12444 if (err) 12445 return err; 12446 process_bpf_exit: 12447 mark_verifier_state_scratched(env); 12448 update_branch_counts(env, env->cur_state); 12449 err = pop_stack(env, &prev_insn_idx, 12450 &env->insn_idx, pop_log); 12451 if (err < 0) { 12452 if (err != -ENOENT) 12453 return err; 12454 break; 12455 } else { 12456 do_print_state = true; 12457 continue; 12458 } 12459 } else { 12460 err = check_cond_jmp_op(env, insn, &env->insn_idx); 12461 if (err) 12462 return err; 12463 } 12464 } else if (class == BPF_LD) { 12465 u8 mode = BPF_MODE(insn->code); 12466 12467 if (mode == BPF_ABS || mode == BPF_IND) { 12468 err = check_ld_abs(env, insn); 12469 if (err) 12470 return err; 12471 12472 } else if (mode == BPF_IMM) { 12473 err = check_ld_imm(env, insn); 12474 if (err) 12475 return err; 12476 12477 env->insn_idx++; 12478 sanitize_mark_insn_seen(env); 12479 } else { 12480 verbose(env, "invalid BPF_LD mode\n"); 12481 return -EINVAL; 12482 } 12483 } else { 12484 verbose(env, "unknown insn class %d\n", class); 12485 return -EINVAL; 12486 } 12487 12488 env->insn_idx++; 12489 } 12490 12491 return 0; 12492 } 12493 12494 static int find_btf_percpu_datasec(struct btf *btf) 12495 { 12496 const struct btf_type *t; 12497 const char *tname; 12498 int i, n; 12499 12500 /* 12501 * Both vmlinux and module each have their own ".data..percpu" 12502 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF 12503 * types to look at only module's own BTF types. 12504 */ 12505 n = btf_nr_types(btf); 12506 if (btf_is_module(btf)) 12507 i = btf_nr_types(btf_vmlinux); 12508 else 12509 i = 1; 12510 12511 for(; i < n; i++) { 12512 t = btf_type_by_id(btf, i); 12513 if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC) 12514 continue; 12515 12516 tname = btf_name_by_offset(btf, t->name_off); 12517 if (!strcmp(tname, ".data..percpu")) 12518 return i; 12519 } 12520 12521 return -ENOENT; 12522 } 12523 12524 /* replace pseudo btf_id with kernel symbol address */ 12525 static int check_pseudo_btf_id(struct bpf_verifier_env *env, 12526 struct bpf_insn *insn, 12527 struct bpf_insn_aux_data *aux) 12528 { 12529 const struct btf_var_secinfo *vsi; 12530 const struct btf_type *datasec; 12531 struct btf_mod_pair *btf_mod; 12532 const struct btf_type *t; 12533 const char *sym_name; 12534 bool percpu = false; 12535 u32 type, id = insn->imm; 12536 struct btf *btf; 12537 s32 datasec_id; 12538 u64 addr; 12539 int i, btf_fd, err; 12540 12541 btf_fd = insn[1].imm; 12542 if (btf_fd) { 12543 btf = btf_get_by_fd(btf_fd); 12544 if (IS_ERR(btf)) { 12545 verbose(env, "invalid module BTF object FD specified.\n"); 12546 return -EINVAL; 12547 } 12548 } else { 12549 if (!btf_vmlinux) { 12550 verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n"); 12551 return -EINVAL; 12552 } 12553 btf = btf_vmlinux; 12554 btf_get(btf); 12555 } 12556 12557 t = btf_type_by_id(btf, id); 12558 if (!t) { 12559 verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id); 12560 err = -ENOENT; 12561 goto err_put; 12562 } 12563 12564 if (!btf_type_is_var(t)) { 12565 verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", id); 12566 err = -EINVAL; 12567 goto err_put; 12568 } 12569 12570 sym_name = btf_name_by_offset(btf, t->name_off); 12571 addr = kallsyms_lookup_name(sym_name); 12572 if (!addr) { 12573 verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n", 12574 sym_name); 12575 err = -ENOENT; 12576 goto err_put; 12577 } 12578 12579 datasec_id = find_btf_percpu_datasec(btf); 12580 if (datasec_id > 0) { 12581 datasec = btf_type_by_id(btf, datasec_id); 12582 for_each_vsi(i, datasec, vsi) { 12583 if (vsi->type == id) { 12584 percpu = true; 12585 break; 12586 } 12587 } 12588 } 12589 12590 insn[0].imm = (u32)addr; 12591 insn[1].imm = addr >> 32; 12592 12593 type = t->type; 12594 t = btf_type_skip_modifiers(btf, type, NULL); 12595 if (percpu) { 12596 aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU; 12597 aux->btf_var.btf = btf; 12598 aux->btf_var.btf_id = type; 12599 } else if (!btf_type_is_struct(t)) { 12600 const struct btf_type *ret; 12601 const char *tname; 12602 u32 tsize; 12603 12604 /* resolve the type size of ksym. */ 12605 ret = btf_resolve_size(btf, t, &tsize); 12606 if (IS_ERR(ret)) { 12607 tname = btf_name_by_offset(btf, t->name_off); 12608 verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n", 12609 tname, PTR_ERR(ret)); 12610 err = -EINVAL; 12611 goto err_put; 12612 } 12613 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; 12614 aux->btf_var.mem_size = tsize; 12615 } else { 12616 aux->btf_var.reg_type = PTR_TO_BTF_ID; 12617 aux->btf_var.btf = btf; 12618 aux->btf_var.btf_id = type; 12619 } 12620 12621 /* check whether we recorded this BTF (and maybe module) already */ 12622 for (i = 0; i < env->used_btf_cnt; i++) { 12623 if (env->used_btfs[i].btf == btf) { 12624 btf_put(btf); 12625 return 0; 12626 } 12627 } 12628 12629 if (env->used_btf_cnt >= MAX_USED_BTFS) { 12630 err = -E2BIG; 12631 goto err_put; 12632 } 12633 12634 btf_mod = &env->used_btfs[env->used_btf_cnt]; 12635 btf_mod->btf = btf; 12636 btf_mod->module = NULL; 12637 12638 /* if we reference variables from kernel module, bump its refcount */ 12639 if (btf_is_module(btf)) { 12640 btf_mod->module = btf_try_get_module(btf); 12641 if (!btf_mod->module) { 12642 err = -ENXIO; 12643 goto err_put; 12644 } 12645 } 12646 12647 env->used_btf_cnt++; 12648 12649 return 0; 12650 err_put: 12651 btf_put(btf); 12652 return err; 12653 } 12654 12655 static bool is_tracing_prog_type(enum bpf_prog_type type) 12656 { 12657 switch (type) { 12658 case BPF_PROG_TYPE_KPROBE: 12659 case BPF_PROG_TYPE_TRACEPOINT: 12660 case BPF_PROG_TYPE_PERF_EVENT: 12661 case BPF_PROG_TYPE_RAW_TRACEPOINT: 12662 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 12663 return true; 12664 default: 12665 return false; 12666 } 12667 } 12668 12669 static int check_map_prog_compatibility(struct bpf_verifier_env *env, 12670 struct bpf_map *map, 12671 struct bpf_prog *prog) 12672 12673 { 12674 enum bpf_prog_type prog_type = resolve_prog_type(prog); 12675 12676 if (map_value_has_spin_lock(map)) { 12677 if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) { 12678 verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n"); 12679 return -EINVAL; 12680 } 12681 12682 if (is_tracing_prog_type(prog_type)) { 12683 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); 12684 return -EINVAL; 12685 } 12686 12687 if (prog->aux->sleepable) { 12688 verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n"); 12689 return -EINVAL; 12690 } 12691 } 12692 12693 if (map_value_has_timer(map)) { 12694 if (is_tracing_prog_type(prog_type)) { 12695 verbose(env, "tracing progs cannot use bpf_timer yet\n"); 12696 return -EINVAL; 12697 } 12698 } 12699 12700 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && 12701 !bpf_offload_prog_map_match(prog, map)) { 12702 verbose(env, "offload device mismatch between prog and map\n"); 12703 return -EINVAL; 12704 } 12705 12706 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 12707 verbose(env, "bpf_struct_ops map cannot be used in prog\n"); 12708 return -EINVAL; 12709 } 12710 12711 if (prog->aux->sleepable) 12712 switch (map->map_type) { 12713 case BPF_MAP_TYPE_HASH: 12714 case BPF_MAP_TYPE_LRU_HASH: 12715 case BPF_MAP_TYPE_ARRAY: 12716 case BPF_MAP_TYPE_PERCPU_HASH: 12717 case BPF_MAP_TYPE_PERCPU_ARRAY: 12718 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 12719 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 12720 case BPF_MAP_TYPE_HASH_OF_MAPS: 12721 case BPF_MAP_TYPE_RINGBUF: 12722 case BPF_MAP_TYPE_USER_RINGBUF: 12723 case BPF_MAP_TYPE_INODE_STORAGE: 12724 case BPF_MAP_TYPE_SK_STORAGE: 12725 case BPF_MAP_TYPE_TASK_STORAGE: 12726 break; 12727 default: 12728 verbose(env, 12729 "Sleepable programs can only use array, hash, and ringbuf maps\n"); 12730 return -EINVAL; 12731 } 12732 12733 return 0; 12734 } 12735 12736 static bool bpf_map_is_cgroup_storage(struct bpf_map *map) 12737 { 12738 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || 12739 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); 12740 } 12741 12742 /* find and rewrite pseudo imm in ld_imm64 instructions: 12743 * 12744 * 1. if it accesses map FD, replace it with actual map pointer. 12745 * 2. if it accesses btf_id of a VAR, replace it with pointer to the var. 12746 * 12747 * NOTE: btf_vmlinux is required for converting pseudo btf_id. 12748 */ 12749 static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) 12750 { 12751 struct bpf_insn *insn = env->prog->insnsi; 12752 int insn_cnt = env->prog->len; 12753 int i, j, err; 12754 12755 err = bpf_prog_calc_tag(env->prog); 12756 if (err) 12757 return err; 12758 12759 for (i = 0; i < insn_cnt; i++, insn++) { 12760 if (BPF_CLASS(insn->code) == BPF_LDX && 12761 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 12762 verbose(env, "BPF_LDX uses reserved fields\n"); 12763 return -EINVAL; 12764 } 12765 12766 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 12767 struct bpf_insn_aux_data *aux; 12768 struct bpf_map *map; 12769 struct fd f; 12770 u64 addr; 12771 u32 fd; 12772 12773 if (i == insn_cnt - 1 || insn[1].code != 0 || 12774 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 12775 insn[1].off != 0) { 12776 verbose(env, "invalid bpf_ld_imm64 insn\n"); 12777 return -EINVAL; 12778 } 12779 12780 if (insn[0].src_reg == 0) 12781 /* valid generic load 64-bit imm */ 12782 goto next_insn; 12783 12784 if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) { 12785 aux = &env->insn_aux_data[i]; 12786 err = check_pseudo_btf_id(env, insn, aux); 12787 if (err) 12788 return err; 12789 goto next_insn; 12790 } 12791 12792 if (insn[0].src_reg == BPF_PSEUDO_FUNC) { 12793 aux = &env->insn_aux_data[i]; 12794 aux->ptr_type = PTR_TO_FUNC; 12795 goto next_insn; 12796 } 12797 12798 /* In final convert_pseudo_ld_imm64() step, this is 12799 * converted into regular 64-bit imm load insn. 12800 */ 12801 switch (insn[0].src_reg) { 12802 case BPF_PSEUDO_MAP_VALUE: 12803 case BPF_PSEUDO_MAP_IDX_VALUE: 12804 break; 12805 case BPF_PSEUDO_MAP_FD: 12806 case BPF_PSEUDO_MAP_IDX: 12807 if (insn[1].imm == 0) 12808 break; 12809 fallthrough; 12810 default: 12811 verbose(env, "unrecognized bpf_ld_imm64 insn\n"); 12812 return -EINVAL; 12813 } 12814 12815 switch (insn[0].src_reg) { 12816 case BPF_PSEUDO_MAP_IDX_VALUE: 12817 case BPF_PSEUDO_MAP_IDX: 12818 if (bpfptr_is_null(env->fd_array)) { 12819 verbose(env, "fd_idx without fd_array is invalid\n"); 12820 return -EPROTO; 12821 } 12822 if (copy_from_bpfptr_offset(&fd, env->fd_array, 12823 insn[0].imm * sizeof(fd), 12824 sizeof(fd))) 12825 return -EFAULT; 12826 break; 12827 default: 12828 fd = insn[0].imm; 12829 break; 12830 } 12831 12832 f = fdget(fd); 12833 map = __bpf_map_get(f); 12834 if (IS_ERR(map)) { 12835 verbose(env, "fd %d is not pointing to valid bpf_map\n", 12836 insn[0].imm); 12837 return PTR_ERR(map); 12838 } 12839 12840 err = check_map_prog_compatibility(env, map, env->prog); 12841 if (err) { 12842 fdput(f); 12843 return err; 12844 } 12845 12846 aux = &env->insn_aux_data[i]; 12847 if (insn[0].src_reg == BPF_PSEUDO_MAP_FD || 12848 insn[0].src_reg == BPF_PSEUDO_MAP_IDX) { 12849 addr = (unsigned long)map; 12850 } else { 12851 u32 off = insn[1].imm; 12852 12853 if (off >= BPF_MAX_VAR_OFF) { 12854 verbose(env, "direct value offset of %u is not allowed\n", off); 12855 fdput(f); 12856 return -EINVAL; 12857 } 12858 12859 if (!map->ops->map_direct_value_addr) { 12860 verbose(env, "no direct value access support for this map type\n"); 12861 fdput(f); 12862 return -EINVAL; 12863 } 12864 12865 err = map->ops->map_direct_value_addr(map, &addr, off); 12866 if (err) { 12867 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n", 12868 map->value_size, off); 12869 fdput(f); 12870 return err; 12871 } 12872 12873 aux->map_off = off; 12874 addr += off; 12875 } 12876 12877 insn[0].imm = (u32)addr; 12878 insn[1].imm = addr >> 32; 12879 12880 /* check whether we recorded this map already */ 12881 for (j = 0; j < env->used_map_cnt; j++) { 12882 if (env->used_maps[j] == map) { 12883 aux->map_index = j; 12884 fdput(f); 12885 goto next_insn; 12886 } 12887 } 12888 12889 if (env->used_map_cnt >= MAX_USED_MAPS) { 12890 fdput(f); 12891 return -E2BIG; 12892 } 12893 12894 /* hold the map. If the program is rejected by verifier, 12895 * the map will be released by release_maps() or it 12896 * will be used by the valid program until it's unloaded 12897 * and all maps are released in free_used_maps() 12898 */ 12899 bpf_map_inc(map); 12900 12901 aux->map_index = env->used_map_cnt; 12902 env->used_maps[env->used_map_cnt++] = map; 12903 12904 if (bpf_map_is_cgroup_storage(map) && 12905 bpf_cgroup_storage_assign(env->prog->aux, map)) { 12906 verbose(env, "only one cgroup storage of each type is allowed\n"); 12907 fdput(f); 12908 return -EBUSY; 12909 } 12910 12911 fdput(f); 12912 next_insn: 12913 insn++; 12914 i++; 12915 continue; 12916 } 12917 12918 /* Basic sanity check before we invest more work here. */ 12919 if (!bpf_opcode_in_insntable(insn->code)) { 12920 verbose(env, "unknown opcode %02x\n", insn->code); 12921 return -EINVAL; 12922 } 12923 } 12924 12925 /* now all pseudo BPF_LD_IMM64 instructions load valid 12926 * 'struct bpf_map *' into a register instead of user map_fd. 12927 * These pointers will be used later by verifier to validate map access. 12928 */ 12929 return 0; 12930 } 12931 12932 /* drop refcnt of maps used by the rejected program */ 12933 static void release_maps(struct bpf_verifier_env *env) 12934 { 12935 __bpf_free_used_maps(env->prog->aux, env->used_maps, 12936 env->used_map_cnt); 12937 } 12938 12939 /* drop refcnt of maps used by the rejected program */ 12940 static void release_btfs(struct bpf_verifier_env *env) 12941 { 12942 __bpf_free_used_btfs(env->prog->aux, env->used_btfs, 12943 env->used_btf_cnt); 12944 } 12945 12946 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 12947 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) 12948 { 12949 struct bpf_insn *insn = env->prog->insnsi; 12950 int insn_cnt = env->prog->len; 12951 int i; 12952 12953 for (i = 0; i < insn_cnt; i++, insn++) { 12954 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) 12955 continue; 12956 if (insn->src_reg == BPF_PSEUDO_FUNC) 12957 continue; 12958 insn->src_reg = 0; 12959 } 12960 } 12961 12962 /* single env->prog->insni[off] instruction was replaced with the range 12963 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying 12964 * [0, off) and [off, end) to new locations, so the patched range stays zero 12965 */ 12966 static void adjust_insn_aux_data(struct bpf_verifier_env *env, 12967 struct bpf_insn_aux_data *new_data, 12968 struct bpf_prog *new_prog, u32 off, u32 cnt) 12969 { 12970 struct bpf_insn_aux_data *old_data = env->insn_aux_data; 12971 struct bpf_insn *insn = new_prog->insnsi; 12972 u32 old_seen = old_data[off].seen; 12973 u32 prog_len; 12974 int i; 12975 12976 /* aux info at OFF always needs adjustment, no matter fast path 12977 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the 12978 * original insn at old prog. 12979 */ 12980 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); 12981 12982 if (cnt == 1) 12983 return; 12984 prog_len = new_prog->len; 12985 12986 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 12987 memcpy(new_data + off + cnt - 1, old_data + off, 12988 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 12989 for (i = off; i < off + cnt - 1; i++) { 12990 /* Expand insni[off]'s seen count to the patched range. */ 12991 new_data[i].seen = old_seen; 12992 new_data[i].zext_dst = insn_has_def32(env, insn + i); 12993 } 12994 env->insn_aux_data = new_data; 12995 vfree(old_data); 12996 } 12997 12998 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) 12999 { 13000 int i; 13001 13002 if (len == 1) 13003 return; 13004 /* NOTE: fake 'exit' subprog should be updated as well. */ 13005 for (i = 0; i <= env->subprog_cnt; i++) { 13006 if (env->subprog_info[i].start <= off) 13007 continue; 13008 env->subprog_info[i].start += len - 1; 13009 } 13010 } 13011 13012 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len) 13013 { 13014 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; 13015 int i, sz = prog->aux->size_poke_tab; 13016 struct bpf_jit_poke_descriptor *desc; 13017 13018 for (i = 0; i < sz; i++) { 13019 desc = &tab[i]; 13020 if (desc->insn_idx <= off) 13021 continue; 13022 desc->insn_idx += len - 1; 13023 } 13024 } 13025 13026 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 13027 const struct bpf_insn *patch, u32 len) 13028 { 13029 struct bpf_prog *new_prog; 13030 struct bpf_insn_aux_data *new_data = NULL; 13031 13032 if (len > 1) { 13033 new_data = vzalloc(array_size(env->prog->len + len - 1, 13034 sizeof(struct bpf_insn_aux_data))); 13035 if (!new_data) 13036 return NULL; 13037 } 13038 13039 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); 13040 if (IS_ERR(new_prog)) { 13041 if (PTR_ERR(new_prog) == -ERANGE) 13042 verbose(env, 13043 "insn %d cannot be patched due to 16-bit range\n", 13044 env->insn_aux_data[off].orig_idx); 13045 vfree(new_data); 13046 return NULL; 13047 } 13048 adjust_insn_aux_data(env, new_data, new_prog, off, len); 13049 adjust_subprog_starts(env, off, len); 13050 adjust_poke_descs(new_prog, off, len); 13051 return new_prog; 13052 } 13053 13054 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env, 13055 u32 off, u32 cnt) 13056 { 13057 int i, j; 13058 13059 /* find first prog starting at or after off (first to remove) */ 13060 for (i = 0; i < env->subprog_cnt; i++) 13061 if (env->subprog_info[i].start >= off) 13062 break; 13063 /* find first prog starting at or after off + cnt (first to stay) */ 13064 for (j = i; j < env->subprog_cnt; j++) 13065 if (env->subprog_info[j].start >= off + cnt) 13066 break; 13067 /* if j doesn't start exactly at off + cnt, we are just removing 13068 * the front of previous prog 13069 */ 13070 if (env->subprog_info[j].start != off + cnt) 13071 j--; 13072 13073 if (j > i) { 13074 struct bpf_prog_aux *aux = env->prog->aux; 13075 int move; 13076 13077 /* move fake 'exit' subprog as well */ 13078 move = env->subprog_cnt + 1 - j; 13079 13080 memmove(env->subprog_info + i, 13081 env->subprog_info + j, 13082 sizeof(*env->subprog_info) * move); 13083 env->subprog_cnt -= j - i; 13084 13085 /* remove func_info */ 13086 if (aux->func_info) { 13087 move = aux->func_info_cnt - j; 13088 13089 memmove(aux->func_info + i, 13090 aux->func_info + j, 13091 sizeof(*aux->func_info) * move); 13092 aux->func_info_cnt -= j - i; 13093 /* func_info->insn_off is set after all code rewrites, 13094 * in adjust_btf_func() - no need to adjust 13095 */ 13096 } 13097 } else { 13098 /* convert i from "first prog to remove" to "first to adjust" */ 13099 if (env->subprog_info[i].start == off) 13100 i++; 13101 } 13102 13103 /* update fake 'exit' subprog as well */ 13104 for (; i <= env->subprog_cnt; i++) 13105 env->subprog_info[i].start -= cnt; 13106 13107 return 0; 13108 } 13109 13110 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off, 13111 u32 cnt) 13112 { 13113 struct bpf_prog *prog = env->prog; 13114 u32 i, l_off, l_cnt, nr_linfo; 13115 struct bpf_line_info *linfo; 13116 13117 nr_linfo = prog->aux->nr_linfo; 13118 if (!nr_linfo) 13119 return 0; 13120 13121 linfo = prog->aux->linfo; 13122 13123 /* find first line info to remove, count lines to be removed */ 13124 for (i = 0; i < nr_linfo; i++) 13125 if (linfo[i].insn_off >= off) 13126 break; 13127 13128 l_off = i; 13129 l_cnt = 0; 13130 for (; i < nr_linfo; i++) 13131 if (linfo[i].insn_off < off + cnt) 13132 l_cnt++; 13133 else 13134 break; 13135 13136 /* First live insn doesn't match first live linfo, it needs to "inherit" 13137 * last removed linfo. prog is already modified, so prog->len == off 13138 * means no live instructions after (tail of the program was removed). 13139 */ 13140 if (prog->len != off && l_cnt && 13141 (i == nr_linfo || linfo[i].insn_off != off + cnt)) { 13142 l_cnt--; 13143 linfo[--i].insn_off = off + cnt; 13144 } 13145 13146 /* remove the line info which refer to the removed instructions */ 13147 if (l_cnt) { 13148 memmove(linfo + l_off, linfo + i, 13149 sizeof(*linfo) * (nr_linfo - i)); 13150 13151 prog->aux->nr_linfo -= l_cnt; 13152 nr_linfo = prog->aux->nr_linfo; 13153 } 13154 13155 /* pull all linfo[i].insn_off >= off + cnt in by cnt */ 13156 for (i = l_off; i < nr_linfo; i++) 13157 linfo[i].insn_off -= cnt; 13158 13159 /* fix up all subprogs (incl. 'exit') which start >= off */ 13160 for (i = 0; i <= env->subprog_cnt; i++) 13161 if (env->subprog_info[i].linfo_idx > l_off) { 13162 /* program may have started in the removed region but 13163 * may not be fully removed 13164 */ 13165 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) 13166 env->subprog_info[i].linfo_idx -= l_cnt; 13167 else 13168 env->subprog_info[i].linfo_idx = l_off; 13169 } 13170 13171 return 0; 13172 } 13173 13174 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) 13175 { 13176 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 13177 unsigned int orig_prog_len = env->prog->len; 13178 int err; 13179 13180 if (bpf_prog_is_dev_bound(env->prog->aux)) 13181 bpf_prog_offload_remove_insns(env, off, cnt); 13182 13183 err = bpf_remove_insns(env->prog, off, cnt); 13184 if (err) 13185 return err; 13186 13187 err = adjust_subprog_starts_after_remove(env, off, cnt); 13188 if (err) 13189 return err; 13190 13191 err = bpf_adj_linfo_after_remove(env, off, cnt); 13192 if (err) 13193 return err; 13194 13195 memmove(aux_data + off, aux_data + off + cnt, 13196 sizeof(*aux_data) * (orig_prog_len - off - cnt)); 13197 13198 return 0; 13199 } 13200 13201 /* The verifier does more data flow analysis than llvm and will not 13202 * explore branches that are dead at run time. Malicious programs can 13203 * have dead code too. Therefore replace all dead at-run-time code 13204 * with 'ja -1'. 13205 * 13206 * Just nops are not optimal, e.g. if they would sit at the end of the 13207 * program and through another bug we would manage to jump there, then 13208 * we'd execute beyond program memory otherwise. Returning exception 13209 * code also wouldn't work since we can have subprogs where the dead 13210 * code could be located. 13211 */ 13212 static void sanitize_dead_code(struct bpf_verifier_env *env) 13213 { 13214 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 13215 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); 13216 struct bpf_insn *insn = env->prog->insnsi; 13217 const int insn_cnt = env->prog->len; 13218 int i; 13219 13220 for (i = 0; i < insn_cnt; i++) { 13221 if (aux_data[i].seen) 13222 continue; 13223 memcpy(insn + i, &trap, sizeof(trap)); 13224 aux_data[i].zext_dst = false; 13225 } 13226 } 13227 13228 static bool insn_is_cond_jump(u8 code) 13229 { 13230 u8 op; 13231 13232 if (BPF_CLASS(code) == BPF_JMP32) 13233 return true; 13234 13235 if (BPF_CLASS(code) != BPF_JMP) 13236 return false; 13237 13238 op = BPF_OP(code); 13239 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; 13240 } 13241 13242 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) 13243 { 13244 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 13245 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 13246 struct bpf_insn *insn = env->prog->insnsi; 13247 const int insn_cnt = env->prog->len; 13248 int i; 13249 13250 for (i = 0; i < insn_cnt; i++, insn++) { 13251 if (!insn_is_cond_jump(insn->code)) 13252 continue; 13253 13254 if (!aux_data[i + 1].seen) 13255 ja.off = insn->off; 13256 else if (!aux_data[i + 1 + insn->off].seen) 13257 ja.off = 0; 13258 else 13259 continue; 13260 13261 if (bpf_prog_is_dev_bound(env->prog->aux)) 13262 bpf_prog_offload_replace_insn(env, i, &ja); 13263 13264 memcpy(insn, &ja, sizeof(ja)); 13265 } 13266 } 13267 13268 static int opt_remove_dead_code(struct bpf_verifier_env *env) 13269 { 13270 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 13271 int insn_cnt = env->prog->len; 13272 int i, err; 13273 13274 for (i = 0; i < insn_cnt; i++) { 13275 int j; 13276 13277 j = 0; 13278 while (i + j < insn_cnt && !aux_data[i + j].seen) 13279 j++; 13280 if (!j) 13281 continue; 13282 13283 err = verifier_remove_insns(env, i, j); 13284 if (err) 13285 return err; 13286 insn_cnt = env->prog->len; 13287 } 13288 13289 return 0; 13290 } 13291 13292 static int opt_remove_nops(struct bpf_verifier_env *env) 13293 { 13294 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 13295 struct bpf_insn *insn = env->prog->insnsi; 13296 int insn_cnt = env->prog->len; 13297 int i, err; 13298 13299 for (i = 0; i < insn_cnt; i++) { 13300 if (memcmp(&insn[i], &ja, sizeof(ja))) 13301 continue; 13302 13303 err = verifier_remove_insns(env, i, 1); 13304 if (err) 13305 return err; 13306 insn_cnt--; 13307 i--; 13308 } 13309 13310 return 0; 13311 } 13312 13313 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, 13314 const union bpf_attr *attr) 13315 { 13316 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4]; 13317 struct bpf_insn_aux_data *aux = env->insn_aux_data; 13318 int i, patch_len, delta = 0, len = env->prog->len; 13319 struct bpf_insn *insns = env->prog->insnsi; 13320 struct bpf_prog *new_prog; 13321 bool rnd_hi32; 13322 13323 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; 13324 zext_patch[1] = BPF_ZEXT_REG(0); 13325 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0); 13326 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 13327 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX); 13328 for (i = 0; i < len; i++) { 13329 int adj_idx = i + delta; 13330 struct bpf_insn insn; 13331 int load_reg; 13332 13333 insn = insns[adj_idx]; 13334 load_reg = insn_def_regno(&insn); 13335 if (!aux[adj_idx].zext_dst) { 13336 u8 code, class; 13337 u32 imm_rnd; 13338 13339 if (!rnd_hi32) 13340 continue; 13341 13342 code = insn.code; 13343 class = BPF_CLASS(code); 13344 if (load_reg == -1) 13345 continue; 13346 13347 /* NOTE: arg "reg" (the fourth one) is only used for 13348 * BPF_STX + SRC_OP, so it is safe to pass NULL 13349 * here. 13350 */ 13351 if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) { 13352 if (class == BPF_LD && 13353 BPF_MODE(code) == BPF_IMM) 13354 i++; 13355 continue; 13356 } 13357 13358 /* ctx load could be transformed into wider load. */ 13359 if (class == BPF_LDX && 13360 aux[adj_idx].ptr_type == PTR_TO_CTX) 13361 continue; 13362 13363 imm_rnd = get_random_u32(); 13364 rnd_hi32_patch[0] = insn; 13365 rnd_hi32_patch[1].imm = imm_rnd; 13366 rnd_hi32_patch[3].dst_reg = load_reg; 13367 patch = rnd_hi32_patch; 13368 patch_len = 4; 13369 goto apply_patch_buffer; 13370 } 13371 13372 /* Add in an zero-extend instruction if a) the JIT has requested 13373 * it or b) it's a CMPXCHG. 13374 * 13375 * The latter is because: BPF_CMPXCHG always loads a value into 13376 * R0, therefore always zero-extends. However some archs' 13377 * equivalent instruction only does this load when the 13378 * comparison is successful. This detail of CMPXCHG is 13379 * orthogonal to the general zero-extension behaviour of the 13380 * CPU, so it's treated independently of bpf_jit_needs_zext. 13381 */ 13382 if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn)) 13383 continue; 13384 13385 if (WARN_ON(load_reg == -1)) { 13386 verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n"); 13387 return -EFAULT; 13388 } 13389 13390 zext_patch[0] = insn; 13391 zext_patch[1].dst_reg = load_reg; 13392 zext_patch[1].src_reg = load_reg; 13393 patch = zext_patch; 13394 patch_len = 2; 13395 apply_patch_buffer: 13396 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len); 13397 if (!new_prog) 13398 return -ENOMEM; 13399 env->prog = new_prog; 13400 insns = new_prog->insnsi; 13401 aux = env->insn_aux_data; 13402 delta += patch_len - 1; 13403 } 13404 13405 return 0; 13406 } 13407 13408 /* convert load instructions that access fields of a context type into a 13409 * sequence of instructions that access fields of the underlying structure: 13410 * struct __sk_buff -> struct sk_buff 13411 * struct bpf_sock_ops -> struct sock 13412 */ 13413 static int convert_ctx_accesses(struct bpf_verifier_env *env) 13414 { 13415 const struct bpf_verifier_ops *ops = env->ops; 13416 int i, cnt, size, ctx_field_size, delta = 0; 13417 const int insn_cnt = env->prog->len; 13418 struct bpf_insn insn_buf[16], *insn; 13419 u32 target_size, size_default, off; 13420 struct bpf_prog *new_prog; 13421 enum bpf_access_type type; 13422 bool is_narrower_load; 13423 13424 if (ops->gen_prologue || env->seen_direct_write) { 13425 if (!ops->gen_prologue) { 13426 verbose(env, "bpf verifier is misconfigured\n"); 13427 return -EINVAL; 13428 } 13429 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 13430 env->prog); 13431 if (cnt >= ARRAY_SIZE(insn_buf)) { 13432 verbose(env, "bpf verifier is misconfigured\n"); 13433 return -EINVAL; 13434 } else if (cnt) { 13435 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 13436 if (!new_prog) 13437 return -ENOMEM; 13438 13439 env->prog = new_prog; 13440 delta += cnt - 1; 13441 } 13442 } 13443 13444 if (bpf_prog_is_dev_bound(env->prog->aux)) 13445 return 0; 13446 13447 insn = env->prog->insnsi + delta; 13448 13449 for (i = 0; i < insn_cnt; i++, insn++) { 13450 bpf_convert_ctx_access_t convert_ctx_access; 13451 bool ctx_access; 13452 13453 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || 13454 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || 13455 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 13456 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) { 13457 type = BPF_READ; 13458 ctx_access = true; 13459 } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || 13460 insn->code == (BPF_STX | BPF_MEM | BPF_H) || 13461 insn->code == (BPF_STX | BPF_MEM | BPF_W) || 13462 insn->code == (BPF_STX | BPF_MEM | BPF_DW) || 13463 insn->code == (BPF_ST | BPF_MEM | BPF_B) || 13464 insn->code == (BPF_ST | BPF_MEM | BPF_H) || 13465 insn->code == (BPF_ST | BPF_MEM | BPF_W) || 13466 insn->code == (BPF_ST | BPF_MEM | BPF_DW)) { 13467 type = BPF_WRITE; 13468 ctx_access = BPF_CLASS(insn->code) == BPF_STX; 13469 } else { 13470 continue; 13471 } 13472 13473 if (type == BPF_WRITE && 13474 env->insn_aux_data[i + delta].sanitize_stack_spill) { 13475 struct bpf_insn patch[] = { 13476 *insn, 13477 BPF_ST_NOSPEC(), 13478 }; 13479 13480 cnt = ARRAY_SIZE(patch); 13481 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); 13482 if (!new_prog) 13483 return -ENOMEM; 13484 13485 delta += cnt - 1; 13486 env->prog = new_prog; 13487 insn = new_prog->insnsi + i + delta; 13488 continue; 13489 } 13490 13491 if (!ctx_access) 13492 continue; 13493 13494 switch ((int)env->insn_aux_data[i + delta].ptr_type) { 13495 case PTR_TO_CTX: 13496 if (!ops->convert_ctx_access) 13497 continue; 13498 convert_ctx_access = ops->convert_ctx_access; 13499 break; 13500 case PTR_TO_SOCKET: 13501 case PTR_TO_SOCK_COMMON: 13502 convert_ctx_access = bpf_sock_convert_ctx_access; 13503 break; 13504 case PTR_TO_TCP_SOCK: 13505 convert_ctx_access = bpf_tcp_sock_convert_ctx_access; 13506 break; 13507 case PTR_TO_XDP_SOCK: 13508 convert_ctx_access = bpf_xdp_sock_convert_ctx_access; 13509 break; 13510 case PTR_TO_BTF_ID: 13511 case PTR_TO_BTF_ID | PTR_UNTRUSTED: 13512 if (type == BPF_READ) { 13513 insn->code = BPF_LDX | BPF_PROBE_MEM | 13514 BPF_SIZE((insn)->code); 13515 env->prog->aux->num_exentries++; 13516 } 13517 continue; 13518 default: 13519 continue; 13520 } 13521 13522 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; 13523 size = BPF_LDST_BYTES(insn); 13524 13525 /* If the read access is a narrower load of the field, 13526 * convert to a 4/8-byte load, to minimum program type specific 13527 * convert_ctx_access changes. If conversion is successful, 13528 * we will apply proper mask to the result. 13529 */ 13530 is_narrower_load = size < ctx_field_size; 13531 size_default = bpf_ctx_off_adjust_machine(ctx_field_size); 13532 off = insn->off; 13533 if (is_narrower_load) { 13534 u8 size_code; 13535 13536 if (type == BPF_WRITE) { 13537 verbose(env, "bpf verifier narrow ctx access misconfigured\n"); 13538 return -EINVAL; 13539 } 13540 13541 size_code = BPF_H; 13542 if (ctx_field_size == 4) 13543 size_code = BPF_W; 13544 else if (ctx_field_size == 8) 13545 size_code = BPF_DW; 13546 13547 insn->off = off & ~(size_default - 1); 13548 insn->code = BPF_LDX | BPF_MEM | size_code; 13549 } 13550 13551 target_size = 0; 13552 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, 13553 &target_size); 13554 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || 13555 (ctx_field_size && !target_size)) { 13556 verbose(env, "bpf verifier is misconfigured\n"); 13557 return -EINVAL; 13558 } 13559 13560 if (is_narrower_load && size < target_size) { 13561 u8 shift = bpf_ctx_narrow_access_offset( 13562 off, size, size_default) * 8; 13563 if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) { 13564 verbose(env, "bpf verifier narrow ctx load misconfigured\n"); 13565 return -EINVAL; 13566 } 13567 if (ctx_field_size <= 4) { 13568 if (shift) 13569 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, 13570 insn->dst_reg, 13571 shift); 13572 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 13573 (1 << size * 8) - 1); 13574 } else { 13575 if (shift) 13576 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, 13577 insn->dst_reg, 13578 shift); 13579 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, 13580 (1ULL << size * 8) - 1); 13581 } 13582 } 13583 13584 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 13585 if (!new_prog) 13586 return -ENOMEM; 13587 13588 delta += cnt - 1; 13589 13590 /* keep walking new program and skip insns we just inserted */ 13591 env->prog = new_prog; 13592 insn = new_prog->insnsi + i + delta; 13593 } 13594 13595 return 0; 13596 } 13597 13598 static int jit_subprogs(struct bpf_verifier_env *env) 13599 { 13600 struct bpf_prog *prog = env->prog, **func, *tmp; 13601 int i, j, subprog_start, subprog_end = 0, len, subprog; 13602 struct bpf_map *map_ptr; 13603 struct bpf_insn *insn; 13604 void *old_bpf_func; 13605 int err, num_exentries; 13606 13607 if (env->subprog_cnt <= 1) 13608 return 0; 13609 13610 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 13611 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn)) 13612 continue; 13613 13614 /* Upon error here we cannot fall back to interpreter but 13615 * need a hard reject of the program. Thus -EFAULT is 13616 * propagated in any case. 13617 */ 13618 subprog = find_subprog(env, i + insn->imm + 1); 13619 if (subprog < 0) { 13620 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 13621 i + insn->imm + 1); 13622 return -EFAULT; 13623 } 13624 /* temporarily remember subprog id inside insn instead of 13625 * aux_data, since next loop will split up all insns into funcs 13626 */ 13627 insn->off = subprog; 13628 /* remember original imm in case JIT fails and fallback 13629 * to interpreter will be needed 13630 */ 13631 env->insn_aux_data[i].call_imm = insn->imm; 13632 /* point imm to __bpf_call_base+1 from JITs point of view */ 13633 insn->imm = 1; 13634 if (bpf_pseudo_func(insn)) 13635 /* jit (e.g. x86_64) may emit fewer instructions 13636 * if it learns a u32 imm is the same as a u64 imm. 13637 * Force a non zero here. 13638 */ 13639 insn[1].imm = 1; 13640 } 13641 13642 err = bpf_prog_alloc_jited_linfo(prog); 13643 if (err) 13644 goto out_undo_insn; 13645 13646 err = -ENOMEM; 13647 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); 13648 if (!func) 13649 goto out_undo_insn; 13650 13651 for (i = 0; i < env->subprog_cnt; i++) { 13652 subprog_start = subprog_end; 13653 subprog_end = env->subprog_info[i + 1].start; 13654 13655 len = subprog_end - subprog_start; 13656 /* bpf_prog_run() doesn't call subprogs directly, 13657 * hence main prog stats include the runtime of subprogs. 13658 * subprogs don't have IDs and not reachable via prog_get_next_id 13659 * func[i]->stats will never be accessed and stays NULL 13660 */ 13661 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER); 13662 if (!func[i]) 13663 goto out_free; 13664 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], 13665 len * sizeof(struct bpf_insn)); 13666 func[i]->type = prog->type; 13667 func[i]->len = len; 13668 if (bpf_prog_calc_tag(func[i])) 13669 goto out_free; 13670 func[i]->is_func = 1; 13671 func[i]->aux->func_idx = i; 13672 /* Below members will be freed only at prog->aux */ 13673 func[i]->aux->btf = prog->aux->btf; 13674 func[i]->aux->func_info = prog->aux->func_info; 13675 func[i]->aux->func_info_cnt = prog->aux->func_info_cnt; 13676 func[i]->aux->poke_tab = prog->aux->poke_tab; 13677 func[i]->aux->size_poke_tab = prog->aux->size_poke_tab; 13678 13679 for (j = 0; j < prog->aux->size_poke_tab; j++) { 13680 struct bpf_jit_poke_descriptor *poke; 13681 13682 poke = &prog->aux->poke_tab[j]; 13683 if (poke->insn_idx < subprog_end && 13684 poke->insn_idx >= subprog_start) 13685 poke->aux = func[i]->aux; 13686 } 13687 13688 func[i]->aux->name[0] = 'F'; 13689 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; 13690 func[i]->jit_requested = 1; 13691 func[i]->blinding_requested = prog->blinding_requested; 13692 func[i]->aux->kfunc_tab = prog->aux->kfunc_tab; 13693 func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab; 13694 func[i]->aux->linfo = prog->aux->linfo; 13695 func[i]->aux->nr_linfo = prog->aux->nr_linfo; 13696 func[i]->aux->jited_linfo = prog->aux->jited_linfo; 13697 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; 13698 num_exentries = 0; 13699 insn = func[i]->insnsi; 13700 for (j = 0; j < func[i]->len; j++, insn++) { 13701 if (BPF_CLASS(insn->code) == BPF_LDX && 13702 BPF_MODE(insn->code) == BPF_PROBE_MEM) 13703 num_exentries++; 13704 } 13705 func[i]->aux->num_exentries = num_exentries; 13706 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable; 13707 func[i] = bpf_int_jit_compile(func[i]); 13708 if (!func[i]->jited) { 13709 err = -ENOTSUPP; 13710 goto out_free; 13711 } 13712 cond_resched(); 13713 } 13714 13715 /* at this point all bpf functions were successfully JITed 13716 * now populate all bpf_calls with correct addresses and 13717 * run last pass of JIT 13718 */ 13719 for (i = 0; i < env->subprog_cnt; i++) { 13720 insn = func[i]->insnsi; 13721 for (j = 0; j < func[i]->len; j++, insn++) { 13722 if (bpf_pseudo_func(insn)) { 13723 subprog = insn->off; 13724 insn[0].imm = (u32)(long)func[subprog]->bpf_func; 13725 insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32; 13726 continue; 13727 } 13728 if (!bpf_pseudo_call(insn)) 13729 continue; 13730 subprog = insn->off; 13731 insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func); 13732 } 13733 13734 /* we use the aux data to keep a list of the start addresses 13735 * of the JITed images for each function in the program 13736 * 13737 * for some architectures, such as powerpc64, the imm field 13738 * might not be large enough to hold the offset of the start 13739 * address of the callee's JITed image from __bpf_call_base 13740 * 13741 * in such cases, we can lookup the start address of a callee 13742 * by using its subprog id, available from the off field of 13743 * the call instruction, as an index for this list 13744 */ 13745 func[i]->aux->func = func; 13746 func[i]->aux->func_cnt = env->subprog_cnt; 13747 } 13748 for (i = 0; i < env->subprog_cnt; i++) { 13749 old_bpf_func = func[i]->bpf_func; 13750 tmp = bpf_int_jit_compile(func[i]); 13751 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { 13752 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); 13753 err = -ENOTSUPP; 13754 goto out_free; 13755 } 13756 cond_resched(); 13757 } 13758 13759 /* finally lock prog and jit images for all functions and 13760 * populate kallsysm 13761 */ 13762 for (i = 0; i < env->subprog_cnt; i++) { 13763 bpf_prog_lock_ro(func[i]); 13764 bpf_prog_kallsyms_add(func[i]); 13765 } 13766 13767 /* Last step: make now unused interpreter insns from main 13768 * prog consistent for later dump requests, so they can 13769 * later look the same as if they were interpreted only. 13770 */ 13771 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 13772 if (bpf_pseudo_func(insn)) { 13773 insn[0].imm = env->insn_aux_data[i].call_imm; 13774 insn[1].imm = insn->off; 13775 insn->off = 0; 13776 continue; 13777 } 13778 if (!bpf_pseudo_call(insn)) 13779 continue; 13780 insn->off = env->insn_aux_data[i].call_imm; 13781 subprog = find_subprog(env, i + insn->off + 1); 13782 insn->imm = subprog; 13783 } 13784 13785 prog->jited = 1; 13786 prog->bpf_func = func[0]->bpf_func; 13787 prog->jited_len = func[0]->jited_len; 13788 prog->aux->func = func; 13789 prog->aux->func_cnt = env->subprog_cnt; 13790 bpf_prog_jit_attempt_done(prog); 13791 return 0; 13792 out_free: 13793 /* We failed JIT'ing, so at this point we need to unregister poke 13794 * descriptors from subprogs, so that kernel is not attempting to 13795 * patch it anymore as we're freeing the subprog JIT memory. 13796 */ 13797 for (i = 0; i < prog->aux->size_poke_tab; i++) { 13798 map_ptr = prog->aux->poke_tab[i].tail_call.map; 13799 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux); 13800 } 13801 /* At this point we're guaranteed that poke descriptors are not 13802 * live anymore. We can just unlink its descriptor table as it's 13803 * released with the main prog. 13804 */ 13805 for (i = 0; i < env->subprog_cnt; i++) { 13806 if (!func[i]) 13807 continue; 13808 func[i]->aux->poke_tab = NULL; 13809 bpf_jit_free(func[i]); 13810 } 13811 kfree(func); 13812 out_undo_insn: 13813 /* cleanup main prog to be interpreted */ 13814 prog->jit_requested = 0; 13815 prog->blinding_requested = 0; 13816 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 13817 if (!bpf_pseudo_call(insn)) 13818 continue; 13819 insn->off = 0; 13820 insn->imm = env->insn_aux_data[i].call_imm; 13821 } 13822 bpf_prog_jit_attempt_done(prog); 13823 return err; 13824 } 13825 13826 static int fixup_call_args(struct bpf_verifier_env *env) 13827 { 13828 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 13829 struct bpf_prog *prog = env->prog; 13830 struct bpf_insn *insn = prog->insnsi; 13831 bool has_kfunc_call = bpf_prog_has_kfunc_call(prog); 13832 int i, depth; 13833 #endif 13834 int err = 0; 13835 13836 if (env->prog->jit_requested && 13837 !bpf_prog_is_dev_bound(env->prog->aux)) { 13838 err = jit_subprogs(env); 13839 if (err == 0) 13840 return 0; 13841 if (err == -EFAULT) 13842 return err; 13843 } 13844 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 13845 if (has_kfunc_call) { 13846 verbose(env, "calling kernel functions are not allowed in non-JITed programs\n"); 13847 return -EINVAL; 13848 } 13849 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) { 13850 /* When JIT fails the progs with bpf2bpf calls and tail_calls 13851 * have to be rejected, since interpreter doesn't support them yet. 13852 */ 13853 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); 13854 return -EINVAL; 13855 } 13856 for (i = 0; i < prog->len; i++, insn++) { 13857 if (bpf_pseudo_func(insn)) { 13858 /* When JIT fails the progs with callback calls 13859 * have to be rejected, since interpreter doesn't support them yet. 13860 */ 13861 verbose(env, "callbacks are not allowed in non-JITed programs\n"); 13862 return -EINVAL; 13863 } 13864 13865 if (!bpf_pseudo_call(insn)) 13866 continue; 13867 depth = get_callee_stack_depth(env, insn, i); 13868 if (depth < 0) 13869 return depth; 13870 bpf_patch_call_args(insn, depth); 13871 } 13872 err = 0; 13873 #endif 13874 return err; 13875 } 13876 13877 static int fixup_kfunc_call(struct bpf_verifier_env *env, 13878 struct bpf_insn *insn) 13879 { 13880 const struct bpf_kfunc_desc *desc; 13881 13882 if (!insn->imm) { 13883 verbose(env, "invalid kernel function call not eliminated in verifier pass\n"); 13884 return -EINVAL; 13885 } 13886 13887 /* insn->imm has the btf func_id. Replace it with 13888 * an address (relative to __bpf_base_call). 13889 */ 13890 desc = find_kfunc_desc(env->prog, insn->imm, insn->off); 13891 if (!desc) { 13892 verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n", 13893 insn->imm); 13894 return -EFAULT; 13895 } 13896 13897 insn->imm = desc->imm; 13898 13899 return 0; 13900 } 13901 13902 /* Do various post-verification rewrites in a single program pass. 13903 * These rewrites simplify JIT and interpreter implementations. 13904 */ 13905 static int do_misc_fixups(struct bpf_verifier_env *env) 13906 { 13907 struct bpf_prog *prog = env->prog; 13908 enum bpf_attach_type eatype = prog->expected_attach_type; 13909 enum bpf_prog_type prog_type = resolve_prog_type(prog); 13910 struct bpf_insn *insn = prog->insnsi; 13911 const struct bpf_func_proto *fn; 13912 const int insn_cnt = prog->len; 13913 const struct bpf_map_ops *ops; 13914 struct bpf_insn_aux_data *aux; 13915 struct bpf_insn insn_buf[16]; 13916 struct bpf_prog *new_prog; 13917 struct bpf_map *map_ptr; 13918 int i, ret, cnt, delta = 0; 13919 13920 for (i = 0; i < insn_cnt; i++, insn++) { 13921 /* Make divide-by-zero exceptions impossible. */ 13922 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || 13923 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 13924 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || 13925 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 13926 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 13927 bool isdiv = BPF_OP(insn->code) == BPF_DIV; 13928 struct bpf_insn *patchlet; 13929 struct bpf_insn chk_and_div[] = { 13930 /* [R,W]x div 0 -> 0 */ 13931 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | 13932 BPF_JNE | BPF_K, insn->src_reg, 13933 0, 2, 0), 13934 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), 13935 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 13936 *insn, 13937 }; 13938 struct bpf_insn chk_and_mod[] = { 13939 /* [R,W]x mod 0 -> [R,W]x */ 13940 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | 13941 BPF_JEQ | BPF_K, insn->src_reg, 13942 0, 1 + (is64 ? 0 : 1), 0), 13943 *insn, 13944 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 13945 BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), 13946 }; 13947 13948 patchlet = isdiv ? chk_and_div : chk_and_mod; 13949 cnt = isdiv ? ARRAY_SIZE(chk_and_div) : 13950 ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); 13951 13952 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); 13953 if (!new_prog) 13954 return -ENOMEM; 13955 13956 delta += cnt - 1; 13957 env->prog = prog = new_prog; 13958 insn = new_prog->insnsi + i + delta; 13959 continue; 13960 } 13961 13962 /* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */ 13963 if (BPF_CLASS(insn->code) == BPF_LD && 13964 (BPF_MODE(insn->code) == BPF_ABS || 13965 BPF_MODE(insn->code) == BPF_IND)) { 13966 cnt = env->ops->gen_ld_abs(insn, insn_buf); 13967 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 13968 verbose(env, "bpf verifier is misconfigured\n"); 13969 return -EINVAL; 13970 } 13971 13972 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 13973 if (!new_prog) 13974 return -ENOMEM; 13975 13976 delta += cnt - 1; 13977 env->prog = prog = new_prog; 13978 insn = new_prog->insnsi + i + delta; 13979 continue; 13980 } 13981 13982 /* Rewrite pointer arithmetic to mitigate speculation attacks. */ 13983 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || 13984 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { 13985 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; 13986 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; 13987 struct bpf_insn *patch = &insn_buf[0]; 13988 bool issrc, isneg, isimm; 13989 u32 off_reg; 13990 13991 aux = &env->insn_aux_data[i + delta]; 13992 if (!aux->alu_state || 13993 aux->alu_state == BPF_ALU_NON_POINTER) 13994 continue; 13995 13996 isneg = aux->alu_state & BPF_ALU_NEG_VALUE; 13997 issrc = (aux->alu_state & BPF_ALU_SANITIZE) == 13998 BPF_ALU_SANITIZE_SRC; 13999 isimm = aux->alu_state & BPF_ALU_IMMEDIATE; 14000 14001 off_reg = issrc ? insn->src_reg : insn->dst_reg; 14002 if (isimm) { 14003 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); 14004 } else { 14005 if (isneg) 14006 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 14007 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); 14008 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); 14009 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); 14010 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); 14011 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); 14012 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); 14013 } 14014 if (!issrc) 14015 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg); 14016 insn->src_reg = BPF_REG_AX; 14017 if (isneg) 14018 insn->code = insn->code == code_add ? 14019 code_sub : code_add; 14020 *patch++ = *insn; 14021 if (issrc && isneg && !isimm) 14022 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 14023 cnt = patch - insn_buf; 14024 14025 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 14026 if (!new_prog) 14027 return -ENOMEM; 14028 14029 delta += cnt - 1; 14030 env->prog = prog = new_prog; 14031 insn = new_prog->insnsi + i + delta; 14032 continue; 14033 } 14034 14035 if (insn->code != (BPF_JMP | BPF_CALL)) 14036 continue; 14037 if (insn->src_reg == BPF_PSEUDO_CALL) 14038 continue; 14039 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { 14040 ret = fixup_kfunc_call(env, insn); 14041 if (ret) 14042 return ret; 14043 continue; 14044 } 14045 14046 if (insn->imm == BPF_FUNC_get_route_realm) 14047 prog->dst_needed = 1; 14048 if (insn->imm == BPF_FUNC_get_prandom_u32) 14049 bpf_user_rnd_init_once(); 14050 if (insn->imm == BPF_FUNC_override_return) 14051 prog->kprobe_override = 1; 14052 if (insn->imm == BPF_FUNC_tail_call) { 14053 /* If we tail call into other programs, we 14054 * cannot make any assumptions since they can 14055 * be replaced dynamically during runtime in 14056 * the program array. 14057 */ 14058 prog->cb_access = 1; 14059 if (!allow_tail_call_in_subprogs(env)) 14060 prog->aux->stack_depth = MAX_BPF_STACK; 14061 prog->aux->max_pkt_offset = MAX_PACKET_OFF; 14062 14063 /* mark bpf_tail_call as different opcode to avoid 14064 * conditional branch in the interpreter for every normal 14065 * call and to prevent accidental JITing by JIT compiler 14066 * that doesn't support bpf_tail_call yet 14067 */ 14068 insn->imm = 0; 14069 insn->code = BPF_JMP | BPF_TAIL_CALL; 14070 14071 aux = &env->insn_aux_data[i + delta]; 14072 if (env->bpf_capable && !prog->blinding_requested && 14073 prog->jit_requested && 14074 !bpf_map_key_poisoned(aux) && 14075 !bpf_map_ptr_poisoned(aux) && 14076 !bpf_map_ptr_unpriv(aux)) { 14077 struct bpf_jit_poke_descriptor desc = { 14078 .reason = BPF_POKE_REASON_TAIL_CALL, 14079 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state), 14080 .tail_call.key = bpf_map_key_immediate(aux), 14081 .insn_idx = i + delta, 14082 }; 14083 14084 ret = bpf_jit_add_poke_descriptor(prog, &desc); 14085 if (ret < 0) { 14086 verbose(env, "adding tail call poke descriptor failed\n"); 14087 return ret; 14088 } 14089 14090 insn->imm = ret + 1; 14091 continue; 14092 } 14093 14094 if (!bpf_map_ptr_unpriv(aux)) 14095 continue; 14096 14097 /* instead of changing every JIT dealing with tail_call 14098 * emit two extra insns: 14099 * if (index >= max_entries) goto out; 14100 * index &= array->index_mask; 14101 * to avoid out-of-bounds cpu speculation 14102 */ 14103 if (bpf_map_ptr_poisoned(aux)) { 14104 verbose(env, "tail_call abusing map_ptr\n"); 14105 return -EINVAL; 14106 } 14107 14108 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 14109 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, 14110 map_ptr->max_entries, 2); 14111 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, 14112 container_of(map_ptr, 14113 struct bpf_array, 14114 map)->index_mask); 14115 insn_buf[2] = *insn; 14116 cnt = 3; 14117 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 14118 if (!new_prog) 14119 return -ENOMEM; 14120 14121 delta += cnt - 1; 14122 env->prog = prog = new_prog; 14123 insn = new_prog->insnsi + i + delta; 14124 continue; 14125 } 14126 14127 if (insn->imm == BPF_FUNC_timer_set_callback) { 14128 /* The verifier will process callback_fn as many times as necessary 14129 * with different maps and the register states prepared by 14130 * set_timer_callback_state will be accurate. 14131 * 14132 * The following use case is valid: 14133 * map1 is shared by prog1, prog2, prog3. 14134 * prog1 calls bpf_timer_init for some map1 elements 14135 * prog2 calls bpf_timer_set_callback for some map1 elements. 14136 * Those that were not bpf_timer_init-ed will return -EINVAL. 14137 * prog3 calls bpf_timer_start for some map1 elements. 14138 * Those that were not both bpf_timer_init-ed and 14139 * bpf_timer_set_callback-ed will return -EINVAL. 14140 */ 14141 struct bpf_insn ld_addrs[2] = { 14142 BPF_LD_IMM64(BPF_REG_3, (long)prog->aux), 14143 }; 14144 14145 insn_buf[0] = ld_addrs[0]; 14146 insn_buf[1] = ld_addrs[1]; 14147 insn_buf[2] = *insn; 14148 cnt = 3; 14149 14150 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 14151 if (!new_prog) 14152 return -ENOMEM; 14153 14154 delta += cnt - 1; 14155 env->prog = prog = new_prog; 14156 insn = new_prog->insnsi + i + delta; 14157 goto patch_call_imm; 14158 } 14159 14160 if (insn->imm == BPF_FUNC_task_storage_get || 14161 insn->imm == BPF_FUNC_sk_storage_get || 14162 insn->imm == BPF_FUNC_inode_storage_get) { 14163 if (env->prog->aux->sleepable) 14164 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL); 14165 else 14166 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC); 14167 insn_buf[1] = *insn; 14168 cnt = 2; 14169 14170 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 14171 if (!new_prog) 14172 return -ENOMEM; 14173 14174 delta += cnt - 1; 14175 env->prog = prog = new_prog; 14176 insn = new_prog->insnsi + i + delta; 14177 goto patch_call_imm; 14178 } 14179 14180 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup 14181 * and other inlining handlers are currently limited to 64 bit 14182 * only. 14183 */ 14184 if (prog->jit_requested && BITS_PER_LONG == 64 && 14185 (insn->imm == BPF_FUNC_map_lookup_elem || 14186 insn->imm == BPF_FUNC_map_update_elem || 14187 insn->imm == BPF_FUNC_map_delete_elem || 14188 insn->imm == BPF_FUNC_map_push_elem || 14189 insn->imm == BPF_FUNC_map_pop_elem || 14190 insn->imm == BPF_FUNC_map_peek_elem || 14191 insn->imm == BPF_FUNC_redirect_map || 14192 insn->imm == BPF_FUNC_for_each_map_elem || 14193 insn->imm == BPF_FUNC_map_lookup_percpu_elem)) { 14194 aux = &env->insn_aux_data[i + delta]; 14195 if (bpf_map_ptr_poisoned(aux)) 14196 goto patch_call_imm; 14197 14198 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 14199 ops = map_ptr->ops; 14200 if (insn->imm == BPF_FUNC_map_lookup_elem && 14201 ops->map_gen_lookup) { 14202 cnt = ops->map_gen_lookup(map_ptr, insn_buf); 14203 if (cnt == -EOPNOTSUPP) 14204 goto patch_map_ops_generic; 14205 if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) { 14206 verbose(env, "bpf verifier is misconfigured\n"); 14207 return -EINVAL; 14208 } 14209 14210 new_prog = bpf_patch_insn_data(env, i + delta, 14211 insn_buf, cnt); 14212 if (!new_prog) 14213 return -ENOMEM; 14214 14215 delta += cnt - 1; 14216 env->prog = prog = new_prog; 14217 insn = new_prog->insnsi + i + delta; 14218 continue; 14219 } 14220 14221 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, 14222 (void *(*)(struct bpf_map *map, void *key))NULL)); 14223 BUILD_BUG_ON(!__same_type(ops->map_delete_elem, 14224 (int (*)(struct bpf_map *map, void *key))NULL)); 14225 BUILD_BUG_ON(!__same_type(ops->map_update_elem, 14226 (int (*)(struct bpf_map *map, void *key, void *value, 14227 u64 flags))NULL)); 14228 BUILD_BUG_ON(!__same_type(ops->map_push_elem, 14229 (int (*)(struct bpf_map *map, void *value, 14230 u64 flags))NULL)); 14231 BUILD_BUG_ON(!__same_type(ops->map_pop_elem, 14232 (int (*)(struct bpf_map *map, void *value))NULL)); 14233 BUILD_BUG_ON(!__same_type(ops->map_peek_elem, 14234 (int (*)(struct bpf_map *map, void *value))NULL)); 14235 BUILD_BUG_ON(!__same_type(ops->map_redirect, 14236 (int (*)(struct bpf_map *map, u32 ifindex, u64 flags))NULL)); 14237 BUILD_BUG_ON(!__same_type(ops->map_for_each_callback, 14238 (int (*)(struct bpf_map *map, 14239 bpf_callback_t callback_fn, 14240 void *callback_ctx, 14241 u64 flags))NULL)); 14242 BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem, 14243 (void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL)); 14244 14245 patch_map_ops_generic: 14246 switch (insn->imm) { 14247 case BPF_FUNC_map_lookup_elem: 14248 insn->imm = BPF_CALL_IMM(ops->map_lookup_elem); 14249 continue; 14250 case BPF_FUNC_map_update_elem: 14251 insn->imm = BPF_CALL_IMM(ops->map_update_elem); 14252 continue; 14253 case BPF_FUNC_map_delete_elem: 14254 insn->imm = BPF_CALL_IMM(ops->map_delete_elem); 14255 continue; 14256 case BPF_FUNC_map_push_elem: 14257 insn->imm = BPF_CALL_IMM(ops->map_push_elem); 14258 continue; 14259 case BPF_FUNC_map_pop_elem: 14260 insn->imm = BPF_CALL_IMM(ops->map_pop_elem); 14261 continue; 14262 case BPF_FUNC_map_peek_elem: 14263 insn->imm = BPF_CALL_IMM(ops->map_peek_elem); 14264 continue; 14265 case BPF_FUNC_redirect_map: 14266 insn->imm = BPF_CALL_IMM(ops->map_redirect); 14267 continue; 14268 case BPF_FUNC_for_each_map_elem: 14269 insn->imm = BPF_CALL_IMM(ops->map_for_each_callback); 14270 continue; 14271 case BPF_FUNC_map_lookup_percpu_elem: 14272 insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem); 14273 continue; 14274 } 14275 14276 goto patch_call_imm; 14277 } 14278 14279 /* Implement bpf_jiffies64 inline. */ 14280 if (prog->jit_requested && BITS_PER_LONG == 64 && 14281 insn->imm == BPF_FUNC_jiffies64) { 14282 struct bpf_insn ld_jiffies_addr[2] = { 14283 BPF_LD_IMM64(BPF_REG_0, 14284 (unsigned long)&jiffies), 14285 }; 14286 14287 insn_buf[0] = ld_jiffies_addr[0]; 14288 insn_buf[1] = ld_jiffies_addr[1]; 14289 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, 14290 BPF_REG_0, 0); 14291 cnt = 3; 14292 14293 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 14294 cnt); 14295 if (!new_prog) 14296 return -ENOMEM; 14297 14298 delta += cnt - 1; 14299 env->prog = prog = new_prog; 14300 insn = new_prog->insnsi + i + delta; 14301 continue; 14302 } 14303 14304 /* Implement bpf_get_func_arg inline. */ 14305 if (prog_type == BPF_PROG_TYPE_TRACING && 14306 insn->imm == BPF_FUNC_get_func_arg) { 14307 /* Load nr_args from ctx - 8 */ 14308 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); 14309 insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6); 14310 insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3); 14311 insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1); 14312 insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0); 14313 insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0); 14314 insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0); 14315 insn_buf[7] = BPF_JMP_A(1); 14316 insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL); 14317 cnt = 9; 14318 14319 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 14320 if (!new_prog) 14321 return -ENOMEM; 14322 14323 delta += cnt - 1; 14324 env->prog = prog = new_prog; 14325 insn = new_prog->insnsi + i + delta; 14326 continue; 14327 } 14328 14329 /* Implement bpf_get_func_ret inline. */ 14330 if (prog_type == BPF_PROG_TYPE_TRACING && 14331 insn->imm == BPF_FUNC_get_func_ret) { 14332 if (eatype == BPF_TRACE_FEXIT || 14333 eatype == BPF_MODIFY_RETURN) { 14334 /* Load nr_args from ctx - 8 */ 14335 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); 14336 insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3); 14337 insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1); 14338 insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0); 14339 insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0); 14340 insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0); 14341 cnt = 6; 14342 } else { 14343 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP); 14344 cnt = 1; 14345 } 14346 14347 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 14348 if (!new_prog) 14349 return -ENOMEM; 14350 14351 delta += cnt - 1; 14352 env->prog = prog = new_prog; 14353 insn = new_prog->insnsi + i + delta; 14354 continue; 14355 } 14356 14357 /* Implement get_func_arg_cnt inline. */ 14358 if (prog_type == BPF_PROG_TYPE_TRACING && 14359 insn->imm == BPF_FUNC_get_func_arg_cnt) { 14360 /* Load nr_args from ctx - 8 */ 14361 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); 14362 14363 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); 14364 if (!new_prog) 14365 return -ENOMEM; 14366 14367 env->prog = prog = new_prog; 14368 insn = new_prog->insnsi + i + delta; 14369 continue; 14370 } 14371 14372 /* Implement bpf_get_func_ip inline. */ 14373 if (prog_type == BPF_PROG_TYPE_TRACING && 14374 insn->imm == BPF_FUNC_get_func_ip) { 14375 /* Load IP address from ctx - 16 */ 14376 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16); 14377 14378 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); 14379 if (!new_prog) 14380 return -ENOMEM; 14381 14382 env->prog = prog = new_prog; 14383 insn = new_prog->insnsi + i + delta; 14384 continue; 14385 } 14386 14387 patch_call_imm: 14388 fn = env->ops->get_func_proto(insn->imm, env->prog); 14389 /* all functions that have prototype and verifier allowed 14390 * programs to call them, must be real in-kernel functions 14391 */ 14392 if (!fn->func) { 14393 verbose(env, 14394 "kernel subsystem misconfigured func %s#%d\n", 14395 func_id_name(insn->imm), insn->imm); 14396 return -EFAULT; 14397 } 14398 insn->imm = fn->func - __bpf_call_base; 14399 } 14400 14401 /* Since poke tab is now finalized, publish aux to tracker. */ 14402 for (i = 0; i < prog->aux->size_poke_tab; i++) { 14403 map_ptr = prog->aux->poke_tab[i].tail_call.map; 14404 if (!map_ptr->ops->map_poke_track || 14405 !map_ptr->ops->map_poke_untrack || 14406 !map_ptr->ops->map_poke_run) { 14407 verbose(env, "bpf verifier is misconfigured\n"); 14408 return -EINVAL; 14409 } 14410 14411 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); 14412 if (ret < 0) { 14413 verbose(env, "tracking tail call prog failed\n"); 14414 return ret; 14415 } 14416 } 14417 14418 sort_kfunc_descs_by_imm(env->prog); 14419 14420 return 0; 14421 } 14422 14423 static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env, 14424 int position, 14425 s32 stack_base, 14426 u32 callback_subprogno, 14427 u32 *cnt) 14428 { 14429 s32 r6_offset = stack_base + 0 * BPF_REG_SIZE; 14430 s32 r7_offset = stack_base + 1 * BPF_REG_SIZE; 14431 s32 r8_offset = stack_base + 2 * BPF_REG_SIZE; 14432 int reg_loop_max = BPF_REG_6; 14433 int reg_loop_cnt = BPF_REG_7; 14434 int reg_loop_ctx = BPF_REG_8; 14435 14436 struct bpf_prog *new_prog; 14437 u32 callback_start; 14438 u32 call_insn_offset; 14439 s32 callback_offset; 14440 14441 /* This represents an inlined version of bpf_iter.c:bpf_loop, 14442 * be careful to modify this code in sync. 14443 */ 14444 struct bpf_insn insn_buf[] = { 14445 /* Return error and jump to the end of the patch if 14446 * expected number of iterations is too big. 14447 */ 14448 BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2), 14449 BPF_MOV32_IMM(BPF_REG_0, -E2BIG), 14450 BPF_JMP_IMM(BPF_JA, 0, 0, 16), 14451 /* spill R6, R7, R8 to use these as loop vars */ 14452 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset), 14453 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset), 14454 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset), 14455 /* initialize loop vars */ 14456 BPF_MOV64_REG(reg_loop_max, BPF_REG_1), 14457 BPF_MOV32_IMM(reg_loop_cnt, 0), 14458 BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3), 14459 /* loop header, 14460 * if reg_loop_cnt >= reg_loop_max skip the loop body 14461 */ 14462 BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5), 14463 /* callback call, 14464 * correct callback offset would be set after patching 14465 */ 14466 BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt), 14467 BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx), 14468 BPF_CALL_REL(0), 14469 /* increment loop counter */ 14470 BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1), 14471 /* jump to loop header if callback returned 0 */ 14472 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6), 14473 /* return value of bpf_loop, 14474 * set R0 to the number of iterations 14475 */ 14476 BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt), 14477 /* restore original values of R6, R7, R8 */ 14478 BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset), 14479 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset), 14480 BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset), 14481 }; 14482 14483 *cnt = ARRAY_SIZE(insn_buf); 14484 new_prog = bpf_patch_insn_data(env, position, insn_buf, *cnt); 14485 if (!new_prog) 14486 return new_prog; 14487 14488 /* callback start is known only after patching */ 14489 callback_start = env->subprog_info[callback_subprogno].start; 14490 /* Note: insn_buf[12] is an offset of BPF_CALL_REL instruction */ 14491 call_insn_offset = position + 12; 14492 callback_offset = callback_start - call_insn_offset - 1; 14493 new_prog->insnsi[call_insn_offset].imm = callback_offset; 14494 14495 return new_prog; 14496 } 14497 14498 static bool is_bpf_loop_call(struct bpf_insn *insn) 14499 { 14500 return insn->code == (BPF_JMP | BPF_CALL) && 14501 insn->src_reg == 0 && 14502 insn->imm == BPF_FUNC_loop; 14503 } 14504 14505 /* For all sub-programs in the program (including main) check 14506 * insn_aux_data to see if there are bpf_loop calls that require 14507 * inlining. If such calls are found the calls are replaced with a 14508 * sequence of instructions produced by `inline_bpf_loop` function and 14509 * subprog stack_depth is increased by the size of 3 registers. 14510 * This stack space is used to spill values of the R6, R7, R8. These 14511 * registers are used to store the loop bound, counter and context 14512 * variables. 14513 */ 14514 static int optimize_bpf_loop(struct bpf_verifier_env *env) 14515 { 14516 struct bpf_subprog_info *subprogs = env->subprog_info; 14517 int i, cur_subprog = 0, cnt, delta = 0; 14518 struct bpf_insn *insn = env->prog->insnsi; 14519 int insn_cnt = env->prog->len; 14520 u16 stack_depth = subprogs[cur_subprog].stack_depth; 14521 u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; 14522 u16 stack_depth_extra = 0; 14523 14524 for (i = 0; i < insn_cnt; i++, insn++) { 14525 struct bpf_loop_inline_state *inline_state = 14526 &env->insn_aux_data[i + delta].loop_inline_state; 14527 14528 if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) { 14529 struct bpf_prog *new_prog; 14530 14531 stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup; 14532 new_prog = inline_bpf_loop(env, 14533 i + delta, 14534 -(stack_depth + stack_depth_extra), 14535 inline_state->callback_subprogno, 14536 &cnt); 14537 if (!new_prog) 14538 return -ENOMEM; 14539 14540 delta += cnt - 1; 14541 env->prog = new_prog; 14542 insn = new_prog->insnsi + i + delta; 14543 } 14544 14545 if (subprogs[cur_subprog + 1].start == i + delta + 1) { 14546 subprogs[cur_subprog].stack_depth += stack_depth_extra; 14547 cur_subprog++; 14548 stack_depth = subprogs[cur_subprog].stack_depth; 14549 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; 14550 stack_depth_extra = 0; 14551 } 14552 } 14553 14554 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; 14555 14556 return 0; 14557 } 14558 14559 static void free_states(struct bpf_verifier_env *env) 14560 { 14561 struct bpf_verifier_state_list *sl, *sln; 14562 int i; 14563 14564 sl = env->free_list; 14565 while (sl) { 14566 sln = sl->next; 14567 free_verifier_state(&sl->state, false); 14568 kfree(sl); 14569 sl = sln; 14570 } 14571 env->free_list = NULL; 14572 14573 if (!env->explored_states) 14574 return; 14575 14576 for (i = 0; i < state_htab_size(env); i++) { 14577 sl = env->explored_states[i]; 14578 14579 while (sl) { 14580 sln = sl->next; 14581 free_verifier_state(&sl->state, false); 14582 kfree(sl); 14583 sl = sln; 14584 } 14585 env->explored_states[i] = NULL; 14586 } 14587 } 14588 14589 static int do_check_common(struct bpf_verifier_env *env, int subprog) 14590 { 14591 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); 14592 struct bpf_verifier_state *state; 14593 struct bpf_reg_state *regs; 14594 int ret, i; 14595 14596 env->prev_linfo = NULL; 14597 env->pass_cnt++; 14598 14599 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); 14600 if (!state) 14601 return -ENOMEM; 14602 state->curframe = 0; 14603 state->speculative = false; 14604 state->branches = 1; 14605 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); 14606 if (!state->frame[0]) { 14607 kfree(state); 14608 return -ENOMEM; 14609 } 14610 env->cur_state = state; 14611 init_func_state(env, state->frame[0], 14612 BPF_MAIN_FUNC /* callsite */, 14613 0 /* frameno */, 14614 subprog); 14615 14616 regs = state->frame[state->curframe]->regs; 14617 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { 14618 ret = btf_prepare_func_args(env, subprog, regs); 14619 if (ret) 14620 goto out; 14621 for (i = BPF_REG_1; i <= BPF_REG_5; i++) { 14622 if (regs[i].type == PTR_TO_CTX) 14623 mark_reg_known_zero(env, regs, i); 14624 else if (regs[i].type == SCALAR_VALUE) 14625 mark_reg_unknown(env, regs, i); 14626 else if (base_type(regs[i].type) == PTR_TO_MEM) { 14627 const u32 mem_size = regs[i].mem_size; 14628 14629 mark_reg_known_zero(env, regs, i); 14630 regs[i].mem_size = mem_size; 14631 regs[i].id = ++env->id_gen; 14632 } 14633 } 14634 } else { 14635 /* 1st arg to a function */ 14636 regs[BPF_REG_1].type = PTR_TO_CTX; 14637 mark_reg_known_zero(env, regs, BPF_REG_1); 14638 ret = btf_check_subprog_arg_match(env, subprog, regs); 14639 if (ret == -EFAULT) 14640 /* unlikely verifier bug. abort. 14641 * ret == 0 and ret < 0 are sadly acceptable for 14642 * main() function due to backward compatibility. 14643 * Like socket filter program may be written as: 14644 * int bpf_prog(struct pt_regs *ctx) 14645 * and never dereference that ctx in the program. 14646 * 'struct pt_regs' is a type mismatch for socket 14647 * filter that should be using 'struct __sk_buff'. 14648 */ 14649 goto out; 14650 } 14651 14652 ret = do_check(env); 14653 out: 14654 /* check for NULL is necessary, since cur_state can be freed inside 14655 * do_check() under memory pressure. 14656 */ 14657 if (env->cur_state) { 14658 free_verifier_state(env->cur_state, true); 14659 env->cur_state = NULL; 14660 } 14661 while (!pop_stack(env, NULL, NULL, false)); 14662 if (!ret && pop_log) 14663 bpf_vlog_reset(&env->log, 0); 14664 free_states(env); 14665 return ret; 14666 } 14667 14668 /* Verify all global functions in a BPF program one by one based on their BTF. 14669 * All global functions must pass verification. Otherwise the whole program is rejected. 14670 * Consider: 14671 * int bar(int); 14672 * int foo(int f) 14673 * { 14674 * return bar(f); 14675 * } 14676 * int bar(int b) 14677 * { 14678 * ... 14679 * } 14680 * foo() will be verified first for R1=any_scalar_value. During verification it 14681 * will be assumed that bar() already verified successfully and call to bar() 14682 * from foo() will be checked for type match only. Later bar() will be verified 14683 * independently to check that it's safe for R1=any_scalar_value. 14684 */ 14685 static int do_check_subprogs(struct bpf_verifier_env *env) 14686 { 14687 struct bpf_prog_aux *aux = env->prog->aux; 14688 int i, ret; 14689 14690 if (!aux->func_info) 14691 return 0; 14692 14693 for (i = 1; i < env->subprog_cnt; i++) { 14694 if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL) 14695 continue; 14696 env->insn_idx = env->subprog_info[i].start; 14697 WARN_ON_ONCE(env->insn_idx == 0); 14698 ret = do_check_common(env, i); 14699 if (ret) { 14700 return ret; 14701 } else if (env->log.level & BPF_LOG_LEVEL) { 14702 verbose(env, 14703 "Func#%d is safe for any args that match its prototype\n", 14704 i); 14705 } 14706 } 14707 return 0; 14708 } 14709 14710 static int do_check_main(struct bpf_verifier_env *env) 14711 { 14712 int ret; 14713 14714 env->insn_idx = 0; 14715 ret = do_check_common(env, 0); 14716 if (!ret) 14717 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; 14718 return ret; 14719 } 14720 14721 14722 static void print_verification_stats(struct bpf_verifier_env *env) 14723 { 14724 int i; 14725 14726 if (env->log.level & BPF_LOG_STATS) { 14727 verbose(env, "verification time %lld usec\n", 14728 div_u64(env->verification_time, 1000)); 14729 verbose(env, "stack depth "); 14730 for (i = 0; i < env->subprog_cnt; i++) { 14731 u32 depth = env->subprog_info[i].stack_depth; 14732 14733 verbose(env, "%d", depth); 14734 if (i + 1 < env->subprog_cnt) 14735 verbose(env, "+"); 14736 } 14737 verbose(env, "\n"); 14738 } 14739 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d " 14740 "total_states %d peak_states %d mark_read %d\n", 14741 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, 14742 env->max_states_per_insn, env->total_states, 14743 env->peak_states, env->longest_mark_read_walk); 14744 } 14745 14746 static int check_struct_ops_btf_id(struct bpf_verifier_env *env) 14747 { 14748 const struct btf_type *t, *func_proto; 14749 const struct bpf_struct_ops *st_ops; 14750 const struct btf_member *member; 14751 struct bpf_prog *prog = env->prog; 14752 u32 btf_id, member_idx; 14753 const char *mname; 14754 14755 if (!prog->gpl_compatible) { 14756 verbose(env, "struct ops programs must have a GPL compatible license\n"); 14757 return -EINVAL; 14758 } 14759 14760 btf_id = prog->aux->attach_btf_id; 14761 st_ops = bpf_struct_ops_find(btf_id); 14762 if (!st_ops) { 14763 verbose(env, "attach_btf_id %u is not a supported struct\n", 14764 btf_id); 14765 return -ENOTSUPP; 14766 } 14767 14768 t = st_ops->type; 14769 member_idx = prog->expected_attach_type; 14770 if (member_idx >= btf_type_vlen(t)) { 14771 verbose(env, "attach to invalid member idx %u of struct %s\n", 14772 member_idx, st_ops->name); 14773 return -EINVAL; 14774 } 14775 14776 member = &btf_type_member(t)[member_idx]; 14777 mname = btf_name_by_offset(btf_vmlinux, member->name_off); 14778 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type, 14779 NULL); 14780 if (!func_proto) { 14781 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n", 14782 mname, member_idx, st_ops->name); 14783 return -EINVAL; 14784 } 14785 14786 if (st_ops->check_member) { 14787 int err = st_ops->check_member(t, member); 14788 14789 if (err) { 14790 verbose(env, "attach to unsupported member %s of struct %s\n", 14791 mname, st_ops->name); 14792 return err; 14793 } 14794 } 14795 14796 prog->aux->attach_func_proto = func_proto; 14797 prog->aux->attach_func_name = mname; 14798 env->ops = st_ops->verifier_ops; 14799 14800 return 0; 14801 } 14802 #define SECURITY_PREFIX "security_" 14803 14804 static int check_attach_modify_return(unsigned long addr, const char *func_name) 14805 { 14806 if (within_error_injection_list(addr) || 14807 !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1)) 14808 return 0; 14809 14810 return -EINVAL; 14811 } 14812 14813 /* list of non-sleepable functions that are otherwise on 14814 * ALLOW_ERROR_INJECTION list 14815 */ 14816 BTF_SET_START(btf_non_sleepable_error_inject) 14817 /* Three functions below can be called from sleepable and non-sleepable context. 14818 * Assume non-sleepable from bpf safety point of view. 14819 */ 14820 BTF_ID(func, __filemap_add_folio) 14821 BTF_ID(func, should_fail_alloc_page) 14822 BTF_ID(func, should_failslab) 14823 BTF_SET_END(btf_non_sleepable_error_inject) 14824 14825 static int check_non_sleepable_error_inject(u32 btf_id) 14826 { 14827 return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id); 14828 } 14829 14830 int bpf_check_attach_target(struct bpf_verifier_log *log, 14831 const struct bpf_prog *prog, 14832 const struct bpf_prog *tgt_prog, 14833 u32 btf_id, 14834 struct bpf_attach_target_info *tgt_info) 14835 { 14836 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; 14837 const char prefix[] = "btf_trace_"; 14838 int ret = 0, subprog = -1, i; 14839 const struct btf_type *t; 14840 bool conservative = true; 14841 const char *tname; 14842 struct btf *btf; 14843 long addr = 0; 14844 14845 if (!btf_id) { 14846 bpf_log(log, "Tracing programs must provide btf_id\n"); 14847 return -EINVAL; 14848 } 14849 btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf; 14850 if (!btf) { 14851 bpf_log(log, 14852 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n"); 14853 return -EINVAL; 14854 } 14855 t = btf_type_by_id(btf, btf_id); 14856 if (!t) { 14857 bpf_log(log, "attach_btf_id %u is invalid\n", btf_id); 14858 return -EINVAL; 14859 } 14860 tname = btf_name_by_offset(btf, t->name_off); 14861 if (!tname) { 14862 bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id); 14863 return -EINVAL; 14864 } 14865 if (tgt_prog) { 14866 struct bpf_prog_aux *aux = tgt_prog->aux; 14867 14868 for (i = 0; i < aux->func_info_cnt; i++) 14869 if (aux->func_info[i].type_id == btf_id) { 14870 subprog = i; 14871 break; 14872 } 14873 if (subprog == -1) { 14874 bpf_log(log, "Subprog %s doesn't exist\n", tname); 14875 return -EINVAL; 14876 } 14877 conservative = aux->func_info_aux[subprog].unreliable; 14878 if (prog_extension) { 14879 if (conservative) { 14880 bpf_log(log, 14881 "Cannot replace static functions\n"); 14882 return -EINVAL; 14883 } 14884 if (!prog->jit_requested) { 14885 bpf_log(log, 14886 "Extension programs should be JITed\n"); 14887 return -EINVAL; 14888 } 14889 } 14890 if (!tgt_prog->jited) { 14891 bpf_log(log, "Can attach to only JITed progs\n"); 14892 return -EINVAL; 14893 } 14894 if (tgt_prog->type == prog->type) { 14895 /* Cannot fentry/fexit another fentry/fexit program. 14896 * Cannot attach program extension to another extension. 14897 * It's ok to attach fentry/fexit to extension program. 14898 */ 14899 bpf_log(log, "Cannot recursively attach\n"); 14900 return -EINVAL; 14901 } 14902 if (tgt_prog->type == BPF_PROG_TYPE_TRACING && 14903 prog_extension && 14904 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY || 14905 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) { 14906 /* Program extensions can extend all program types 14907 * except fentry/fexit. The reason is the following. 14908 * The fentry/fexit programs are used for performance 14909 * analysis, stats and can be attached to any program 14910 * type except themselves. When extension program is 14911 * replacing XDP function it is necessary to allow 14912 * performance analysis of all functions. Both original 14913 * XDP program and its program extension. Hence 14914 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is 14915 * allowed. If extending of fentry/fexit was allowed it 14916 * would be possible to create long call chain 14917 * fentry->extension->fentry->extension beyond 14918 * reasonable stack size. Hence extending fentry is not 14919 * allowed. 14920 */ 14921 bpf_log(log, "Cannot extend fentry/fexit\n"); 14922 return -EINVAL; 14923 } 14924 } else { 14925 if (prog_extension) { 14926 bpf_log(log, "Cannot replace kernel functions\n"); 14927 return -EINVAL; 14928 } 14929 } 14930 14931 switch (prog->expected_attach_type) { 14932 case BPF_TRACE_RAW_TP: 14933 if (tgt_prog) { 14934 bpf_log(log, 14935 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n"); 14936 return -EINVAL; 14937 } 14938 if (!btf_type_is_typedef(t)) { 14939 bpf_log(log, "attach_btf_id %u is not a typedef\n", 14940 btf_id); 14941 return -EINVAL; 14942 } 14943 if (strncmp(prefix, tname, sizeof(prefix) - 1)) { 14944 bpf_log(log, "attach_btf_id %u points to wrong type name %s\n", 14945 btf_id, tname); 14946 return -EINVAL; 14947 } 14948 tname += sizeof(prefix) - 1; 14949 t = btf_type_by_id(btf, t->type); 14950 if (!btf_type_is_ptr(t)) 14951 /* should never happen in valid vmlinux build */ 14952 return -EINVAL; 14953 t = btf_type_by_id(btf, t->type); 14954 if (!btf_type_is_func_proto(t)) 14955 /* should never happen in valid vmlinux build */ 14956 return -EINVAL; 14957 14958 break; 14959 case BPF_TRACE_ITER: 14960 if (!btf_type_is_func(t)) { 14961 bpf_log(log, "attach_btf_id %u is not a function\n", 14962 btf_id); 14963 return -EINVAL; 14964 } 14965 t = btf_type_by_id(btf, t->type); 14966 if (!btf_type_is_func_proto(t)) 14967 return -EINVAL; 14968 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); 14969 if (ret) 14970 return ret; 14971 break; 14972 default: 14973 if (!prog_extension) 14974 return -EINVAL; 14975 fallthrough; 14976 case BPF_MODIFY_RETURN: 14977 case BPF_LSM_MAC: 14978 case BPF_LSM_CGROUP: 14979 case BPF_TRACE_FENTRY: 14980 case BPF_TRACE_FEXIT: 14981 if (!btf_type_is_func(t)) { 14982 bpf_log(log, "attach_btf_id %u is not a function\n", 14983 btf_id); 14984 return -EINVAL; 14985 } 14986 if (prog_extension && 14987 btf_check_type_match(log, prog, btf, t)) 14988 return -EINVAL; 14989 t = btf_type_by_id(btf, t->type); 14990 if (!btf_type_is_func_proto(t)) 14991 return -EINVAL; 14992 14993 if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) && 14994 (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type || 14995 prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type)) 14996 return -EINVAL; 14997 14998 if (tgt_prog && conservative) 14999 t = NULL; 15000 15001 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); 15002 if (ret < 0) 15003 return ret; 15004 15005 if (tgt_prog) { 15006 if (subprog == 0) 15007 addr = (long) tgt_prog->bpf_func; 15008 else 15009 addr = (long) tgt_prog->aux->func[subprog]->bpf_func; 15010 } else { 15011 addr = kallsyms_lookup_name(tname); 15012 if (!addr) { 15013 bpf_log(log, 15014 "The address of function %s cannot be found\n", 15015 tname); 15016 return -ENOENT; 15017 } 15018 } 15019 15020 if (prog->aux->sleepable) { 15021 ret = -EINVAL; 15022 switch (prog->type) { 15023 case BPF_PROG_TYPE_TRACING: 15024 /* fentry/fexit/fmod_ret progs can be sleepable only if they are 15025 * attached to ALLOW_ERROR_INJECTION and are not in denylist. 15026 */ 15027 if (!check_non_sleepable_error_inject(btf_id) && 15028 within_error_injection_list(addr)) 15029 ret = 0; 15030 break; 15031 case BPF_PROG_TYPE_LSM: 15032 /* LSM progs check that they are attached to bpf_lsm_*() funcs. 15033 * Only some of them are sleepable. 15034 */ 15035 if (bpf_lsm_is_sleepable_hook(btf_id)) 15036 ret = 0; 15037 break; 15038 default: 15039 break; 15040 } 15041 if (ret) { 15042 bpf_log(log, "%s is not sleepable\n", tname); 15043 return ret; 15044 } 15045 } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) { 15046 if (tgt_prog) { 15047 bpf_log(log, "can't modify return codes of BPF programs\n"); 15048 return -EINVAL; 15049 } 15050 ret = check_attach_modify_return(addr, tname); 15051 if (ret) { 15052 bpf_log(log, "%s() is not modifiable\n", tname); 15053 return ret; 15054 } 15055 } 15056 15057 break; 15058 } 15059 tgt_info->tgt_addr = addr; 15060 tgt_info->tgt_name = tname; 15061 tgt_info->tgt_type = t; 15062 return 0; 15063 } 15064 15065 BTF_SET_START(btf_id_deny) 15066 BTF_ID_UNUSED 15067 #ifdef CONFIG_SMP 15068 BTF_ID(func, migrate_disable) 15069 BTF_ID(func, migrate_enable) 15070 #endif 15071 #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU 15072 BTF_ID(func, rcu_read_unlock_strict) 15073 #endif 15074 BTF_SET_END(btf_id_deny) 15075 15076 static int check_attach_btf_id(struct bpf_verifier_env *env) 15077 { 15078 struct bpf_prog *prog = env->prog; 15079 struct bpf_prog *tgt_prog = prog->aux->dst_prog; 15080 struct bpf_attach_target_info tgt_info = {}; 15081 u32 btf_id = prog->aux->attach_btf_id; 15082 struct bpf_trampoline *tr; 15083 int ret; 15084 u64 key; 15085 15086 if (prog->type == BPF_PROG_TYPE_SYSCALL) { 15087 if (prog->aux->sleepable) 15088 /* attach_btf_id checked to be zero already */ 15089 return 0; 15090 verbose(env, "Syscall programs can only be sleepable\n"); 15091 return -EINVAL; 15092 } 15093 15094 if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING && 15095 prog->type != BPF_PROG_TYPE_LSM && prog->type != BPF_PROG_TYPE_KPROBE) { 15096 verbose(env, "Only fentry/fexit/fmod_ret, lsm, and kprobe/uprobe programs can be sleepable\n"); 15097 return -EINVAL; 15098 } 15099 15100 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) 15101 return check_struct_ops_btf_id(env); 15102 15103 if (prog->type != BPF_PROG_TYPE_TRACING && 15104 prog->type != BPF_PROG_TYPE_LSM && 15105 prog->type != BPF_PROG_TYPE_EXT) 15106 return 0; 15107 15108 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info); 15109 if (ret) 15110 return ret; 15111 15112 if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) { 15113 /* to make freplace equivalent to their targets, they need to 15114 * inherit env->ops and expected_attach_type for the rest of the 15115 * verification 15116 */ 15117 env->ops = bpf_verifier_ops[tgt_prog->type]; 15118 prog->expected_attach_type = tgt_prog->expected_attach_type; 15119 } 15120 15121 /* store info about the attachment target that will be used later */ 15122 prog->aux->attach_func_proto = tgt_info.tgt_type; 15123 prog->aux->attach_func_name = tgt_info.tgt_name; 15124 15125 if (tgt_prog) { 15126 prog->aux->saved_dst_prog_type = tgt_prog->type; 15127 prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type; 15128 } 15129 15130 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) { 15131 prog->aux->attach_btf_trace = true; 15132 return 0; 15133 } else if (prog->expected_attach_type == BPF_TRACE_ITER) { 15134 if (!bpf_iter_prog_supported(prog)) 15135 return -EINVAL; 15136 return 0; 15137 } 15138 15139 if (prog->type == BPF_PROG_TYPE_LSM) { 15140 ret = bpf_lsm_verify_prog(&env->log, prog); 15141 if (ret < 0) 15142 return ret; 15143 } else if (prog->type == BPF_PROG_TYPE_TRACING && 15144 btf_id_set_contains(&btf_id_deny, btf_id)) { 15145 return -EINVAL; 15146 } 15147 15148 key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id); 15149 tr = bpf_trampoline_get(key, &tgt_info); 15150 if (!tr) 15151 return -ENOMEM; 15152 15153 prog->aux->dst_trampoline = tr; 15154 return 0; 15155 } 15156 15157 struct btf *bpf_get_btf_vmlinux(void) 15158 { 15159 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { 15160 mutex_lock(&bpf_verifier_lock); 15161 if (!btf_vmlinux) 15162 btf_vmlinux = btf_parse_vmlinux(); 15163 mutex_unlock(&bpf_verifier_lock); 15164 } 15165 return btf_vmlinux; 15166 } 15167 15168 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr) 15169 { 15170 u64 start_time = ktime_get_ns(); 15171 struct bpf_verifier_env *env; 15172 struct bpf_verifier_log *log; 15173 int i, len, ret = -EINVAL; 15174 bool is_priv; 15175 15176 /* no program is valid */ 15177 if (ARRAY_SIZE(bpf_verifier_ops) == 0) 15178 return -EINVAL; 15179 15180 /* 'struct bpf_verifier_env' can be global, but since it's not small, 15181 * allocate/free it every time bpf_check() is called 15182 */ 15183 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 15184 if (!env) 15185 return -ENOMEM; 15186 log = &env->log; 15187 15188 len = (*prog)->len; 15189 env->insn_aux_data = 15190 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len)); 15191 ret = -ENOMEM; 15192 if (!env->insn_aux_data) 15193 goto err_free_env; 15194 for (i = 0; i < len; i++) 15195 env->insn_aux_data[i].orig_idx = i; 15196 env->prog = *prog; 15197 env->ops = bpf_verifier_ops[env->prog->type]; 15198 env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel); 15199 is_priv = bpf_capable(); 15200 15201 bpf_get_btf_vmlinux(); 15202 15203 /* grab the mutex to protect few globals used by verifier */ 15204 if (!is_priv) 15205 mutex_lock(&bpf_verifier_lock); 15206 15207 if (attr->log_level || attr->log_buf || attr->log_size) { 15208 /* user requested verbose verifier output 15209 * and supplied buffer to store the verification trace 15210 */ 15211 log->level = attr->log_level; 15212 log->ubuf = (char __user *) (unsigned long) attr->log_buf; 15213 log->len_total = attr->log_size; 15214 15215 /* log attributes have to be sane */ 15216 if (!bpf_verifier_log_attr_valid(log)) { 15217 ret = -EINVAL; 15218 goto err_unlock; 15219 } 15220 } 15221 15222 mark_verifier_state_clean(env); 15223 15224 if (IS_ERR(btf_vmlinux)) { 15225 /* Either gcc or pahole or kernel are broken. */ 15226 verbose(env, "in-kernel BTF is malformed\n"); 15227 ret = PTR_ERR(btf_vmlinux); 15228 goto skip_full_check; 15229 } 15230 15231 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); 15232 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 15233 env->strict_alignment = true; 15234 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) 15235 env->strict_alignment = false; 15236 15237 env->allow_ptr_leaks = bpf_allow_ptr_leaks(); 15238 env->allow_uninit_stack = bpf_allow_uninit_stack(); 15239 env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access(); 15240 env->bypass_spec_v1 = bpf_bypass_spec_v1(); 15241 env->bypass_spec_v4 = bpf_bypass_spec_v4(); 15242 env->bpf_capable = bpf_capable(); 15243 15244 if (is_priv) 15245 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; 15246 15247 env->explored_states = kvcalloc(state_htab_size(env), 15248 sizeof(struct bpf_verifier_state_list *), 15249 GFP_USER); 15250 ret = -ENOMEM; 15251 if (!env->explored_states) 15252 goto skip_full_check; 15253 15254 ret = add_subprog_and_kfunc(env); 15255 if (ret < 0) 15256 goto skip_full_check; 15257 15258 ret = check_subprogs(env); 15259 if (ret < 0) 15260 goto skip_full_check; 15261 15262 ret = check_btf_info(env, attr, uattr); 15263 if (ret < 0) 15264 goto skip_full_check; 15265 15266 ret = check_attach_btf_id(env); 15267 if (ret) 15268 goto skip_full_check; 15269 15270 ret = resolve_pseudo_ldimm64(env); 15271 if (ret < 0) 15272 goto skip_full_check; 15273 15274 if (bpf_prog_is_dev_bound(env->prog->aux)) { 15275 ret = bpf_prog_offload_verifier_prep(env->prog); 15276 if (ret) 15277 goto skip_full_check; 15278 } 15279 15280 ret = check_cfg(env); 15281 if (ret < 0) 15282 goto skip_full_check; 15283 15284 ret = do_check_subprogs(env); 15285 ret = ret ?: do_check_main(env); 15286 15287 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux)) 15288 ret = bpf_prog_offload_finalize(env); 15289 15290 skip_full_check: 15291 kvfree(env->explored_states); 15292 15293 if (ret == 0) 15294 ret = check_max_stack_depth(env); 15295 15296 /* instruction rewrites happen after this point */ 15297 if (ret == 0) 15298 ret = optimize_bpf_loop(env); 15299 15300 if (is_priv) { 15301 if (ret == 0) 15302 opt_hard_wire_dead_code_branches(env); 15303 if (ret == 0) 15304 ret = opt_remove_dead_code(env); 15305 if (ret == 0) 15306 ret = opt_remove_nops(env); 15307 } else { 15308 if (ret == 0) 15309 sanitize_dead_code(env); 15310 } 15311 15312 if (ret == 0) 15313 /* program is valid, convert *(u32*)(ctx + off) accesses */ 15314 ret = convert_ctx_accesses(env); 15315 15316 if (ret == 0) 15317 ret = do_misc_fixups(env); 15318 15319 /* do 32-bit optimization after insn patching has done so those patched 15320 * insns could be handled correctly. 15321 */ 15322 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) { 15323 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr); 15324 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret 15325 : false; 15326 } 15327 15328 if (ret == 0) 15329 ret = fixup_call_args(env); 15330 15331 env->verification_time = ktime_get_ns() - start_time; 15332 print_verification_stats(env); 15333 env->prog->aux->verified_insns = env->insn_processed; 15334 15335 if (log->level && bpf_verifier_log_full(log)) 15336 ret = -ENOSPC; 15337 if (log->level && !log->ubuf) { 15338 ret = -EFAULT; 15339 goto err_release_maps; 15340 } 15341 15342 if (ret) 15343 goto err_release_maps; 15344 15345 if (env->used_map_cnt) { 15346 /* if program passed verifier, update used_maps in bpf_prog_info */ 15347 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 15348 sizeof(env->used_maps[0]), 15349 GFP_KERNEL); 15350 15351 if (!env->prog->aux->used_maps) { 15352 ret = -ENOMEM; 15353 goto err_release_maps; 15354 } 15355 15356 memcpy(env->prog->aux->used_maps, env->used_maps, 15357 sizeof(env->used_maps[0]) * env->used_map_cnt); 15358 env->prog->aux->used_map_cnt = env->used_map_cnt; 15359 } 15360 if (env->used_btf_cnt) { 15361 /* if program passed verifier, update used_btfs in bpf_prog_aux */ 15362 env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt, 15363 sizeof(env->used_btfs[0]), 15364 GFP_KERNEL); 15365 if (!env->prog->aux->used_btfs) { 15366 ret = -ENOMEM; 15367 goto err_release_maps; 15368 } 15369 15370 memcpy(env->prog->aux->used_btfs, env->used_btfs, 15371 sizeof(env->used_btfs[0]) * env->used_btf_cnt); 15372 env->prog->aux->used_btf_cnt = env->used_btf_cnt; 15373 } 15374 if (env->used_map_cnt || env->used_btf_cnt) { 15375 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 15376 * bpf_ld_imm64 instructions 15377 */ 15378 convert_pseudo_ld_imm64(env); 15379 } 15380 15381 adjust_btf_func(env); 15382 15383 err_release_maps: 15384 if (!env->prog->aux->used_maps) 15385 /* if we didn't copy map pointers into bpf_prog_info, release 15386 * them now. Otherwise free_used_maps() will release them. 15387 */ 15388 release_maps(env); 15389 if (!env->prog->aux->used_btfs) 15390 release_btfs(env); 15391 15392 /* extension progs temporarily inherit the attach_type of their targets 15393 for verification purposes, so set it back to zero before returning 15394 */ 15395 if (env->prog->type == BPF_PROG_TYPE_EXT) 15396 env->prog->expected_attach_type = 0; 15397 15398 *prog = env->prog; 15399 err_unlock: 15400 if (!is_priv) 15401 mutex_unlock(&bpf_verifier_lock); 15402 vfree(env->insn_aux_data); 15403 err_free_env: 15404 kfree(env); 15405 return ret; 15406 } 15407