1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io 5 */ 6 #include <uapi/linux/btf.h> 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/slab.h> 10 #include <linux/bpf.h> 11 #include <linux/btf.h> 12 #include <linux/bpf_verifier.h> 13 #include <linux/filter.h> 14 #include <net/netlink.h> 15 #include <linux/file.h> 16 #include <linux/vmalloc.h> 17 #include <linux/stringify.h> 18 #include <linux/bsearch.h> 19 #include <linux/sort.h> 20 #include <linux/perf_event.h> 21 #include <linux/ctype.h> 22 #include <linux/error-injection.h> 23 #include <linux/bpf_lsm.h> 24 #include <linux/btf_ids.h> 25 26 #include "disasm.h" 27 28 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { 29 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 30 [_id] = & _name ## _verifier_ops, 31 #define BPF_MAP_TYPE(_id, _ops) 32 #define BPF_LINK_TYPE(_id, _name) 33 #include <linux/bpf_types.h> 34 #undef BPF_PROG_TYPE 35 #undef BPF_MAP_TYPE 36 #undef BPF_LINK_TYPE 37 }; 38 39 /* bpf_check() is a static code analyzer that walks eBPF program 40 * instruction by instruction and updates register/stack state. 41 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 42 * 43 * The first pass is depth-first-search to check that the program is a DAG. 44 * It rejects the following programs: 45 * - larger than BPF_MAXINSNS insns 46 * - if loop is present (detected via back-edge) 47 * - unreachable insns exist (shouldn't be a forest. program = one function) 48 * - out of bounds or malformed jumps 49 * The second pass is all possible path descent from the 1st insn. 50 * Since it's analyzing all paths through the program, the length of the 51 * analysis is limited to 64k insn, which may be hit even if total number of 52 * insn is less then 4K, but there are too many branches that change stack/regs. 53 * Number of 'branches to be analyzed' is limited to 1k 54 * 55 * On entry to each instruction, each register has a type, and the instruction 56 * changes the types of the registers depending on instruction semantics. 57 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 58 * copied to R1. 59 * 60 * All registers are 64-bit. 61 * R0 - return register 62 * R1-R5 argument passing registers 63 * R6-R9 callee saved registers 64 * R10 - frame pointer read-only 65 * 66 * At the start of BPF program the register R1 contains a pointer to bpf_context 67 * and has type PTR_TO_CTX. 68 * 69 * Verifier tracks arithmetic operations on pointers in case: 70 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 71 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 72 * 1st insn copies R10 (which has FRAME_PTR) type into R1 73 * and 2nd arithmetic instruction is pattern matched to recognize 74 * that it wants to construct a pointer to some element within stack. 75 * So after 2nd insn, the register R1 has type PTR_TO_STACK 76 * (and -20 constant is saved for further stack bounds checking). 77 * Meaning that this reg is a pointer to stack plus known immediate constant. 78 * 79 * Most of the time the registers have SCALAR_VALUE type, which 80 * means the register has some value, but it's not a valid pointer. 81 * (like pointer plus pointer becomes SCALAR_VALUE type) 82 * 83 * When verifier sees load or store instructions the type of base register 84 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are 85 * four pointer types recognized by check_mem_access() function. 86 * 87 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 88 * and the range of [ptr, ptr + map's value_size) is accessible. 89 * 90 * registers used to pass values to function calls are checked against 91 * function argument constraints. 92 * 93 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 94 * It means that the register type passed to this function must be 95 * PTR_TO_STACK and it will be used inside the function as 96 * 'pointer to map element key' 97 * 98 * For example the argument constraints for bpf_map_lookup_elem(): 99 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 100 * .arg1_type = ARG_CONST_MAP_PTR, 101 * .arg2_type = ARG_PTR_TO_MAP_KEY, 102 * 103 * ret_type says that this function returns 'pointer to map elem value or null' 104 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 105 * 2nd argument should be a pointer to stack, which will be used inside 106 * the helper function as a pointer to map element key. 107 * 108 * On the kernel side the helper function looks like: 109 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 110 * { 111 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 112 * void *key = (void *) (unsigned long) r2; 113 * void *value; 114 * 115 * here kernel can access 'key' and 'map' pointers safely, knowing that 116 * [key, key + map->key_size) bytes are valid and were initialized on 117 * the stack of eBPF program. 118 * } 119 * 120 * Corresponding eBPF program may look like: 121 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 122 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 123 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 124 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 125 * here verifier looks at prototype of map_lookup_elem() and sees: 126 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 127 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 128 * 129 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 130 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 131 * and were initialized prior to this call. 132 * If it's ok, then verifier allows this BPF_CALL insn and looks at 133 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 134 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 135 * returns either pointer to map value or NULL. 136 * 137 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 138 * insn, the register holding that pointer in the true branch changes state to 139 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 140 * branch. See check_cond_jmp_op(). 141 * 142 * After the call R0 is set to return type of the function and registers R1-R5 143 * are set to NOT_INIT to indicate that they are no longer readable. 144 * 145 * The following reference types represent a potential reference to a kernel 146 * resource which, after first being allocated, must be checked and freed by 147 * the BPF program: 148 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET 149 * 150 * When the verifier sees a helper call return a reference type, it allocates a 151 * pointer id for the reference and stores it in the current function state. 152 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into 153 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type 154 * passes through a NULL-check conditional. For the branch wherein the state is 155 * changed to CONST_IMM, the verifier releases the reference. 156 * 157 * For each helper function that allocates a reference, such as 158 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as 159 * bpf_sk_release(). When a reference type passes into the release function, 160 * the verifier also releases the reference. If any unchecked or unreleased 161 * reference remains at the end of the program, the verifier rejects it. 162 */ 163 164 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 165 struct bpf_verifier_stack_elem { 166 /* verifer state is 'st' 167 * before processing instruction 'insn_idx' 168 * and after processing instruction 'prev_insn_idx' 169 */ 170 struct bpf_verifier_state st; 171 int insn_idx; 172 int prev_insn_idx; 173 struct bpf_verifier_stack_elem *next; 174 /* length of verifier log at the time this state was pushed on stack */ 175 u32 log_pos; 176 }; 177 178 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192 179 #define BPF_COMPLEXITY_LIMIT_STATES 64 180 181 #define BPF_MAP_KEY_POISON (1ULL << 63) 182 #define BPF_MAP_KEY_SEEN (1ULL << 62) 183 184 #define BPF_MAP_PTR_UNPRIV 1UL 185 #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ 186 POISON_POINTER_DELTA)) 187 #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) 188 189 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) 190 { 191 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON; 192 } 193 194 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) 195 { 196 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV; 197 } 198 199 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, 200 const struct bpf_map *map, bool unpriv) 201 { 202 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); 203 unpriv |= bpf_map_ptr_unpriv(aux); 204 aux->map_ptr_state = (unsigned long)map | 205 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); 206 } 207 208 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux) 209 { 210 return aux->map_key_state & BPF_MAP_KEY_POISON; 211 } 212 213 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux) 214 { 215 return !(aux->map_key_state & BPF_MAP_KEY_SEEN); 216 } 217 218 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux) 219 { 220 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON); 221 } 222 223 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state) 224 { 225 bool poisoned = bpf_map_key_poisoned(aux); 226 227 aux->map_key_state = state | BPF_MAP_KEY_SEEN | 228 (poisoned ? BPF_MAP_KEY_POISON : 0ULL); 229 } 230 231 static bool bpf_pseudo_call(const struct bpf_insn *insn) 232 { 233 return insn->code == (BPF_JMP | BPF_CALL) && 234 insn->src_reg == BPF_PSEUDO_CALL; 235 } 236 237 static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn) 238 { 239 return insn->code == (BPF_JMP | BPF_CALL) && 240 insn->src_reg == BPF_PSEUDO_KFUNC_CALL; 241 } 242 243 static bool bpf_pseudo_func(const struct bpf_insn *insn) 244 { 245 return insn->code == (BPF_LD | BPF_IMM | BPF_DW) && 246 insn->src_reg == BPF_PSEUDO_FUNC; 247 } 248 249 struct bpf_call_arg_meta { 250 struct bpf_map *map_ptr; 251 bool raw_mode; 252 bool pkt_access; 253 int regno; 254 int access_size; 255 int mem_size; 256 u64 msize_max_value; 257 int ref_obj_id; 258 int func_id; 259 struct btf *btf; 260 u32 btf_id; 261 struct btf *ret_btf; 262 u32 ret_btf_id; 263 u32 subprogno; 264 }; 265 266 struct btf *btf_vmlinux; 267 268 static DEFINE_MUTEX(bpf_verifier_lock); 269 270 static const struct bpf_line_info * 271 find_linfo(const struct bpf_verifier_env *env, u32 insn_off) 272 { 273 const struct bpf_line_info *linfo; 274 const struct bpf_prog *prog; 275 u32 i, nr_linfo; 276 277 prog = env->prog; 278 nr_linfo = prog->aux->nr_linfo; 279 280 if (!nr_linfo || insn_off >= prog->len) 281 return NULL; 282 283 linfo = prog->aux->linfo; 284 for (i = 1; i < nr_linfo; i++) 285 if (insn_off < linfo[i].insn_off) 286 break; 287 288 return &linfo[i - 1]; 289 } 290 291 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, 292 va_list args) 293 { 294 unsigned int n; 295 296 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); 297 298 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, 299 "verifier log line truncated - local buffer too short\n"); 300 301 n = min(log->len_total - log->len_used - 1, n); 302 log->kbuf[n] = '\0'; 303 304 if (log->level == BPF_LOG_KERNEL) { 305 pr_err("BPF:%s\n", log->kbuf); 306 return; 307 } 308 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) 309 log->len_used += n; 310 else 311 log->ubuf = NULL; 312 } 313 314 static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos) 315 { 316 char zero = 0; 317 318 if (!bpf_verifier_log_needed(log)) 319 return; 320 321 log->len_used = new_pos; 322 if (put_user(zero, log->ubuf + new_pos)) 323 log->ubuf = NULL; 324 } 325 326 /* log_level controls verbosity level of eBPF verifier. 327 * bpf_verifier_log_write() is used to dump the verification trace to the log, 328 * so the user can figure out what's wrong with the program 329 */ 330 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, 331 const char *fmt, ...) 332 { 333 va_list args; 334 335 if (!bpf_verifier_log_needed(&env->log)) 336 return; 337 338 va_start(args, fmt); 339 bpf_verifier_vlog(&env->log, fmt, args); 340 va_end(args); 341 } 342 EXPORT_SYMBOL_GPL(bpf_verifier_log_write); 343 344 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) 345 { 346 struct bpf_verifier_env *env = private_data; 347 va_list args; 348 349 if (!bpf_verifier_log_needed(&env->log)) 350 return; 351 352 va_start(args, fmt); 353 bpf_verifier_vlog(&env->log, fmt, args); 354 va_end(args); 355 } 356 357 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log, 358 const char *fmt, ...) 359 { 360 va_list args; 361 362 if (!bpf_verifier_log_needed(log)) 363 return; 364 365 va_start(args, fmt); 366 bpf_verifier_vlog(log, fmt, args); 367 va_end(args); 368 } 369 370 static const char *ltrim(const char *s) 371 { 372 while (isspace(*s)) 373 s++; 374 375 return s; 376 } 377 378 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env, 379 u32 insn_off, 380 const char *prefix_fmt, ...) 381 { 382 const struct bpf_line_info *linfo; 383 384 if (!bpf_verifier_log_needed(&env->log)) 385 return; 386 387 linfo = find_linfo(env, insn_off); 388 if (!linfo || linfo == env->prev_linfo) 389 return; 390 391 if (prefix_fmt) { 392 va_list args; 393 394 va_start(args, prefix_fmt); 395 bpf_verifier_vlog(&env->log, prefix_fmt, args); 396 va_end(args); 397 } 398 399 verbose(env, "%s\n", 400 ltrim(btf_name_by_offset(env->prog->aux->btf, 401 linfo->line_off))); 402 403 env->prev_linfo = linfo; 404 } 405 406 static void verbose_invalid_scalar(struct bpf_verifier_env *env, 407 struct bpf_reg_state *reg, 408 struct tnum *range, const char *ctx, 409 const char *reg_name) 410 { 411 char tn_buf[48]; 412 413 verbose(env, "At %s the register %s ", ctx, reg_name); 414 if (!tnum_is_unknown(reg->var_off)) { 415 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 416 verbose(env, "has value %s", tn_buf); 417 } else { 418 verbose(env, "has unknown scalar value"); 419 } 420 tnum_strn(tn_buf, sizeof(tn_buf), *range); 421 verbose(env, " should have been in %s\n", tn_buf); 422 } 423 424 static bool type_is_pkt_pointer(enum bpf_reg_type type) 425 { 426 return type == PTR_TO_PACKET || 427 type == PTR_TO_PACKET_META; 428 } 429 430 static bool type_is_sk_pointer(enum bpf_reg_type type) 431 { 432 return type == PTR_TO_SOCKET || 433 type == PTR_TO_SOCK_COMMON || 434 type == PTR_TO_TCP_SOCK || 435 type == PTR_TO_XDP_SOCK; 436 } 437 438 static bool reg_type_not_null(enum bpf_reg_type type) 439 { 440 return type == PTR_TO_SOCKET || 441 type == PTR_TO_TCP_SOCK || 442 type == PTR_TO_MAP_VALUE || 443 type == PTR_TO_MAP_KEY || 444 type == PTR_TO_SOCK_COMMON; 445 } 446 447 static bool reg_type_may_be_null(enum bpf_reg_type type) 448 { 449 return type == PTR_TO_MAP_VALUE_OR_NULL || 450 type == PTR_TO_SOCKET_OR_NULL || 451 type == PTR_TO_SOCK_COMMON_OR_NULL || 452 type == PTR_TO_TCP_SOCK_OR_NULL || 453 type == PTR_TO_BTF_ID_OR_NULL || 454 type == PTR_TO_MEM_OR_NULL || 455 type == PTR_TO_RDONLY_BUF_OR_NULL || 456 type == PTR_TO_RDWR_BUF_OR_NULL; 457 } 458 459 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) 460 { 461 return reg->type == PTR_TO_MAP_VALUE && 462 map_value_has_spin_lock(reg->map_ptr); 463 } 464 465 static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type) 466 { 467 return type == PTR_TO_SOCKET || 468 type == PTR_TO_SOCKET_OR_NULL || 469 type == PTR_TO_TCP_SOCK || 470 type == PTR_TO_TCP_SOCK_OR_NULL || 471 type == PTR_TO_MEM || 472 type == PTR_TO_MEM_OR_NULL; 473 } 474 475 static bool arg_type_may_be_refcounted(enum bpf_arg_type type) 476 { 477 return type == ARG_PTR_TO_SOCK_COMMON; 478 } 479 480 static bool arg_type_may_be_null(enum bpf_arg_type type) 481 { 482 return type == ARG_PTR_TO_MAP_VALUE_OR_NULL || 483 type == ARG_PTR_TO_MEM_OR_NULL || 484 type == ARG_PTR_TO_CTX_OR_NULL || 485 type == ARG_PTR_TO_SOCKET_OR_NULL || 486 type == ARG_PTR_TO_ALLOC_MEM_OR_NULL || 487 type == ARG_PTR_TO_STACK_OR_NULL; 488 } 489 490 /* Determine whether the function releases some resources allocated by another 491 * function call. The first reference type argument will be assumed to be 492 * released by release_reference(). 493 */ 494 static bool is_release_function(enum bpf_func_id func_id) 495 { 496 return func_id == BPF_FUNC_sk_release || 497 func_id == BPF_FUNC_ringbuf_submit || 498 func_id == BPF_FUNC_ringbuf_discard; 499 } 500 501 static bool may_be_acquire_function(enum bpf_func_id func_id) 502 { 503 return func_id == BPF_FUNC_sk_lookup_tcp || 504 func_id == BPF_FUNC_sk_lookup_udp || 505 func_id == BPF_FUNC_skc_lookup_tcp || 506 func_id == BPF_FUNC_map_lookup_elem || 507 func_id == BPF_FUNC_ringbuf_reserve; 508 } 509 510 static bool is_acquire_function(enum bpf_func_id func_id, 511 const struct bpf_map *map) 512 { 513 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC; 514 515 if (func_id == BPF_FUNC_sk_lookup_tcp || 516 func_id == BPF_FUNC_sk_lookup_udp || 517 func_id == BPF_FUNC_skc_lookup_tcp || 518 func_id == BPF_FUNC_ringbuf_reserve) 519 return true; 520 521 if (func_id == BPF_FUNC_map_lookup_elem && 522 (map_type == BPF_MAP_TYPE_SOCKMAP || 523 map_type == BPF_MAP_TYPE_SOCKHASH)) 524 return true; 525 526 return false; 527 } 528 529 static bool is_ptr_cast_function(enum bpf_func_id func_id) 530 { 531 return func_id == BPF_FUNC_tcp_sock || 532 func_id == BPF_FUNC_sk_fullsock || 533 func_id == BPF_FUNC_skc_to_tcp_sock || 534 func_id == BPF_FUNC_skc_to_tcp6_sock || 535 func_id == BPF_FUNC_skc_to_udp6_sock || 536 func_id == BPF_FUNC_skc_to_tcp_timewait_sock || 537 func_id == BPF_FUNC_skc_to_tcp_request_sock; 538 } 539 540 static bool is_cmpxchg_insn(const struct bpf_insn *insn) 541 { 542 return BPF_CLASS(insn->code) == BPF_STX && 543 BPF_MODE(insn->code) == BPF_ATOMIC && 544 insn->imm == BPF_CMPXCHG; 545 } 546 547 /* string representation of 'enum bpf_reg_type' */ 548 static const char * const reg_type_str[] = { 549 [NOT_INIT] = "?", 550 [SCALAR_VALUE] = "inv", 551 [PTR_TO_CTX] = "ctx", 552 [CONST_PTR_TO_MAP] = "map_ptr", 553 [PTR_TO_MAP_VALUE] = "map_value", 554 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", 555 [PTR_TO_STACK] = "fp", 556 [PTR_TO_PACKET] = "pkt", 557 [PTR_TO_PACKET_META] = "pkt_meta", 558 [PTR_TO_PACKET_END] = "pkt_end", 559 [PTR_TO_FLOW_KEYS] = "flow_keys", 560 [PTR_TO_SOCKET] = "sock", 561 [PTR_TO_SOCKET_OR_NULL] = "sock_or_null", 562 [PTR_TO_SOCK_COMMON] = "sock_common", 563 [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null", 564 [PTR_TO_TCP_SOCK] = "tcp_sock", 565 [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null", 566 [PTR_TO_TP_BUFFER] = "tp_buffer", 567 [PTR_TO_XDP_SOCK] = "xdp_sock", 568 [PTR_TO_BTF_ID] = "ptr_", 569 [PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_", 570 [PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_", 571 [PTR_TO_MEM] = "mem", 572 [PTR_TO_MEM_OR_NULL] = "mem_or_null", 573 [PTR_TO_RDONLY_BUF] = "rdonly_buf", 574 [PTR_TO_RDONLY_BUF_OR_NULL] = "rdonly_buf_or_null", 575 [PTR_TO_RDWR_BUF] = "rdwr_buf", 576 [PTR_TO_RDWR_BUF_OR_NULL] = "rdwr_buf_or_null", 577 [PTR_TO_FUNC] = "func", 578 [PTR_TO_MAP_KEY] = "map_key", 579 }; 580 581 static char slot_type_char[] = { 582 [STACK_INVALID] = '?', 583 [STACK_SPILL] = 'r', 584 [STACK_MISC] = 'm', 585 [STACK_ZERO] = '0', 586 }; 587 588 static void print_liveness(struct bpf_verifier_env *env, 589 enum bpf_reg_liveness live) 590 { 591 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE)) 592 verbose(env, "_"); 593 if (live & REG_LIVE_READ) 594 verbose(env, "r"); 595 if (live & REG_LIVE_WRITTEN) 596 verbose(env, "w"); 597 if (live & REG_LIVE_DONE) 598 verbose(env, "D"); 599 } 600 601 static struct bpf_func_state *func(struct bpf_verifier_env *env, 602 const struct bpf_reg_state *reg) 603 { 604 struct bpf_verifier_state *cur = env->cur_state; 605 606 return cur->frame[reg->frameno]; 607 } 608 609 static const char *kernel_type_name(const struct btf* btf, u32 id) 610 { 611 return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); 612 } 613 614 static void print_verifier_state(struct bpf_verifier_env *env, 615 const struct bpf_func_state *state) 616 { 617 const struct bpf_reg_state *reg; 618 enum bpf_reg_type t; 619 int i; 620 621 if (state->frameno) 622 verbose(env, " frame%d:", state->frameno); 623 for (i = 0; i < MAX_BPF_REG; i++) { 624 reg = &state->regs[i]; 625 t = reg->type; 626 if (t == NOT_INIT) 627 continue; 628 verbose(env, " R%d", i); 629 print_liveness(env, reg->live); 630 verbose(env, "=%s", reg_type_str[t]); 631 if (t == SCALAR_VALUE && reg->precise) 632 verbose(env, "P"); 633 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && 634 tnum_is_const(reg->var_off)) { 635 /* reg->off should be 0 for SCALAR_VALUE */ 636 verbose(env, "%lld", reg->var_off.value + reg->off); 637 } else { 638 if (t == PTR_TO_BTF_ID || 639 t == PTR_TO_BTF_ID_OR_NULL || 640 t == PTR_TO_PERCPU_BTF_ID) 641 verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id)); 642 verbose(env, "(id=%d", reg->id); 643 if (reg_type_may_be_refcounted_or_null(t)) 644 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id); 645 if (t != SCALAR_VALUE) 646 verbose(env, ",off=%d", reg->off); 647 if (type_is_pkt_pointer(t)) 648 verbose(env, ",r=%d", reg->range); 649 else if (t == CONST_PTR_TO_MAP || 650 t == PTR_TO_MAP_KEY || 651 t == PTR_TO_MAP_VALUE || 652 t == PTR_TO_MAP_VALUE_OR_NULL) 653 verbose(env, ",ks=%d,vs=%d", 654 reg->map_ptr->key_size, 655 reg->map_ptr->value_size); 656 if (tnum_is_const(reg->var_off)) { 657 /* Typically an immediate SCALAR_VALUE, but 658 * could be a pointer whose offset is too big 659 * for reg->off 660 */ 661 verbose(env, ",imm=%llx", reg->var_off.value); 662 } else { 663 if (reg->smin_value != reg->umin_value && 664 reg->smin_value != S64_MIN) 665 verbose(env, ",smin_value=%lld", 666 (long long)reg->smin_value); 667 if (reg->smax_value != reg->umax_value && 668 reg->smax_value != S64_MAX) 669 verbose(env, ",smax_value=%lld", 670 (long long)reg->smax_value); 671 if (reg->umin_value != 0) 672 verbose(env, ",umin_value=%llu", 673 (unsigned long long)reg->umin_value); 674 if (reg->umax_value != U64_MAX) 675 verbose(env, ",umax_value=%llu", 676 (unsigned long long)reg->umax_value); 677 if (!tnum_is_unknown(reg->var_off)) { 678 char tn_buf[48]; 679 680 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 681 verbose(env, ",var_off=%s", tn_buf); 682 } 683 if (reg->s32_min_value != reg->smin_value && 684 reg->s32_min_value != S32_MIN) 685 verbose(env, ",s32_min_value=%d", 686 (int)(reg->s32_min_value)); 687 if (reg->s32_max_value != reg->smax_value && 688 reg->s32_max_value != S32_MAX) 689 verbose(env, ",s32_max_value=%d", 690 (int)(reg->s32_max_value)); 691 if (reg->u32_min_value != reg->umin_value && 692 reg->u32_min_value != U32_MIN) 693 verbose(env, ",u32_min_value=%d", 694 (int)(reg->u32_min_value)); 695 if (reg->u32_max_value != reg->umax_value && 696 reg->u32_max_value != U32_MAX) 697 verbose(env, ",u32_max_value=%d", 698 (int)(reg->u32_max_value)); 699 } 700 verbose(env, ")"); 701 } 702 } 703 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 704 char types_buf[BPF_REG_SIZE + 1]; 705 bool valid = false; 706 int j; 707 708 for (j = 0; j < BPF_REG_SIZE; j++) { 709 if (state->stack[i].slot_type[j] != STACK_INVALID) 710 valid = true; 711 types_buf[j] = slot_type_char[ 712 state->stack[i].slot_type[j]]; 713 } 714 types_buf[BPF_REG_SIZE] = 0; 715 if (!valid) 716 continue; 717 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); 718 print_liveness(env, state->stack[i].spilled_ptr.live); 719 if (state->stack[i].slot_type[0] == STACK_SPILL) { 720 reg = &state->stack[i].spilled_ptr; 721 t = reg->type; 722 verbose(env, "=%s", reg_type_str[t]); 723 if (t == SCALAR_VALUE && reg->precise) 724 verbose(env, "P"); 725 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) 726 verbose(env, "%lld", reg->var_off.value + reg->off); 727 } else { 728 verbose(env, "=%s", types_buf); 729 } 730 } 731 if (state->acquired_refs && state->refs[0].id) { 732 verbose(env, " refs=%d", state->refs[0].id); 733 for (i = 1; i < state->acquired_refs; i++) 734 if (state->refs[i].id) 735 verbose(env, ",%d", state->refs[i].id); 736 } 737 verbose(env, "\n"); 738 } 739 740 /* copy array src of length n * size bytes to dst. dst is reallocated if it's too 741 * small to hold src. This is different from krealloc since we don't want to preserve 742 * the contents of dst. 743 * 744 * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could 745 * not be allocated. 746 */ 747 static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags) 748 { 749 size_t bytes; 750 751 if (ZERO_OR_NULL_PTR(src)) 752 goto out; 753 754 if (unlikely(check_mul_overflow(n, size, &bytes))) 755 return NULL; 756 757 if (ksize(dst) < bytes) { 758 kfree(dst); 759 dst = kmalloc_track_caller(bytes, flags); 760 if (!dst) 761 return NULL; 762 } 763 764 memcpy(dst, src, bytes); 765 out: 766 return dst ? dst : ZERO_SIZE_PTR; 767 } 768 769 /* resize an array from old_n items to new_n items. the array is reallocated if it's too 770 * small to hold new_n items. new items are zeroed out if the array grows. 771 * 772 * Contrary to krealloc_array, does not free arr if new_n is zero. 773 */ 774 static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size) 775 { 776 if (!new_n || old_n == new_n) 777 goto out; 778 779 arr = krealloc_array(arr, new_n, size, GFP_KERNEL); 780 if (!arr) 781 return NULL; 782 783 if (new_n > old_n) 784 memset(arr + old_n * size, 0, (new_n - old_n) * size); 785 786 out: 787 return arr ? arr : ZERO_SIZE_PTR; 788 } 789 790 static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src) 791 { 792 dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs, 793 sizeof(struct bpf_reference_state), GFP_KERNEL); 794 if (!dst->refs) 795 return -ENOMEM; 796 797 dst->acquired_refs = src->acquired_refs; 798 return 0; 799 } 800 801 static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src) 802 { 803 size_t n = src->allocated_stack / BPF_REG_SIZE; 804 805 dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state), 806 GFP_KERNEL); 807 if (!dst->stack) 808 return -ENOMEM; 809 810 dst->allocated_stack = src->allocated_stack; 811 return 0; 812 } 813 814 static int resize_reference_state(struct bpf_func_state *state, size_t n) 815 { 816 state->refs = realloc_array(state->refs, state->acquired_refs, n, 817 sizeof(struct bpf_reference_state)); 818 if (!state->refs) 819 return -ENOMEM; 820 821 state->acquired_refs = n; 822 return 0; 823 } 824 825 static int grow_stack_state(struct bpf_func_state *state, int size) 826 { 827 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE; 828 829 if (old_n >= n) 830 return 0; 831 832 state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state)); 833 if (!state->stack) 834 return -ENOMEM; 835 836 state->allocated_stack = size; 837 return 0; 838 } 839 840 /* Acquire a pointer id from the env and update the state->refs to include 841 * this new pointer reference. 842 * On success, returns a valid pointer id to associate with the register 843 * On failure, returns a negative errno. 844 */ 845 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) 846 { 847 struct bpf_func_state *state = cur_func(env); 848 int new_ofs = state->acquired_refs; 849 int id, err; 850 851 err = resize_reference_state(state, state->acquired_refs + 1); 852 if (err) 853 return err; 854 id = ++env->id_gen; 855 state->refs[new_ofs].id = id; 856 state->refs[new_ofs].insn_idx = insn_idx; 857 858 return id; 859 } 860 861 /* release function corresponding to acquire_reference_state(). Idempotent. */ 862 static int release_reference_state(struct bpf_func_state *state, int ptr_id) 863 { 864 int i, last_idx; 865 866 last_idx = state->acquired_refs - 1; 867 for (i = 0; i < state->acquired_refs; i++) { 868 if (state->refs[i].id == ptr_id) { 869 if (last_idx && i != last_idx) 870 memcpy(&state->refs[i], &state->refs[last_idx], 871 sizeof(*state->refs)); 872 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); 873 state->acquired_refs--; 874 return 0; 875 } 876 } 877 return -EINVAL; 878 } 879 880 static void free_func_state(struct bpf_func_state *state) 881 { 882 if (!state) 883 return; 884 kfree(state->refs); 885 kfree(state->stack); 886 kfree(state); 887 } 888 889 static void clear_jmp_history(struct bpf_verifier_state *state) 890 { 891 kfree(state->jmp_history); 892 state->jmp_history = NULL; 893 state->jmp_history_cnt = 0; 894 } 895 896 static void free_verifier_state(struct bpf_verifier_state *state, 897 bool free_self) 898 { 899 int i; 900 901 for (i = 0; i <= state->curframe; i++) { 902 free_func_state(state->frame[i]); 903 state->frame[i] = NULL; 904 } 905 clear_jmp_history(state); 906 if (free_self) 907 kfree(state); 908 } 909 910 /* copy verifier state from src to dst growing dst stack space 911 * when necessary to accommodate larger src stack 912 */ 913 static int copy_func_state(struct bpf_func_state *dst, 914 const struct bpf_func_state *src) 915 { 916 int err; 917 918 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); 919 err = copy_reference_state(dst, src); 920 if (err) 921 return err; 922 return copy_stack_state(dst, src); 923 } 924 925 static int copy_verifier_state(struct bpf_verifier_state *dst_state, 926 const struct bpf_verifier_state *src) 927 { 928 struct bpf_func_state *dst; 929 int i, err; 930 931 dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history, 932 src->jmp_history_cnt, sizeof(struct bpf_idx_pair), 933 GFP_USER); 934 if (!dst_state->jmp_history) 935 return -ENOMEM; 936 dst_state->jmp_history_cnt = src->jmp_history_cnt; 937 938 /* if dst has more stack frames then src frame, free them */ 939 for (i = src->curframe + 1; i <= dst_state->curframe; i++) { 940 free_func_state(dst_state->frame[i]); 941 dst_state->frame[i] = NULL; 942 } 943 dst_state->speculative = src->speculative; 944 dst_state->curframe = src->curframe; 945 dst_state->active_spin_lock = src->active_spin_lock; 946 dst_state->branches = src->branches; 947 dst_state->parent = src->parent; 948 dst_state->first_insn_idx = src->first_insn_idx; 949 dst_state->last_insn_idx = src->last_insn_idx; 950 for (i = 0; i <= src->curframe; i++) { 951 dst = dst_state->frame[i]; 952 if (!dst) { 953 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 954 if (!dst) 955 return -ENOMEM; 956 dst_state->frame[i] = dst; 957 } 958 err = copy_func_state(dst, src->frame[i]); 959 if (err) 960 return err; 961 } 962 return 0; 963 } 964 965 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) 966 { 967 while (st) { 968 u32 br = --st->branches; 969 970 /* WARN_ON(br > 1) technically makes sense here, 971 * but see comment in push_stack(), hence: 972 */ 973 WARN_ONCE((int)br < 0, 974 "BUG update_branch_counts:branches_to_explore=%d\n", 975 br); 976 if (br) 977 break; 978 st = st->parent; 979 } 980 } 981 982 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, 983 int *insn_idx, bool pop_log) 984 { 985 struct bpf_verifier_state *cur = env->cur_state; 986 struct bpf_verifier_stack_elem *elem, *head = env->head; 987 int err; 988 989 if (env->head == NULL) 990 return -ENOENT; 991 992 if (cur) { 993 err = copy_verifier_state(cur, &head->st); 994 if (err) 995 return err; 996 } 997 if (pop_log) 998 bpf_vlog_reset(&env->log, head->log_pos); 999 if (insn_idx) 1000 *insn_idx = head->insn_idx; 1001 if (prev_insn_idx) 1002 *prev_insn_idx = head->prev_insn_idx; 1003 elem = head->next; 1004 free_verifier_state(&head->st, false); 1005 kfree(head); 1006 env->head = elem; 1007 env->stack_size--; 1008 return 0; 1009 } 1010 1011 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, 1012 int insn_idx, int prev_insn_idx, 1013 bool speculative) 1014 { 1015 struct bpf_verifier_state *cur = env->cur_state; 1016 struct bpf_verifier_stack_elem *elem; 1017 int err; 1018 1019 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 1020 if (!elem) 1021 goto err; 1022 1023 elem->insn_idx = insn_idx; 1024 elem->prev_insn_idx = prev_insn_idx; 1025 elem->next = env->head; 1026 elem->log_pos = env->log.len_used; 1027 env->head = elem; 1028 env->stack_size++; 1029 err = copy_verifier_state(&elem->st, cur); 1030 if (err) 1031 goto err; 1032 elem->st.speculative |= speculative; 1033 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { 1034 verbose(env, "The sequence of %d jumps is too complex.\n", 1035 env->stack_size); 1036 goto err; 1037 } 1038 if (elem->st.parent) { 1039 ++elem->st.parent->branches; 1040 /* WARN_ON(branches > 2) technically makes sense here, 1041 * but 1042 * 1. speculative states will bump 'branches' for non-branch 1043 * instructions 1044 * 2. is_state_visited() heuristics may decide not to create 1045 * a new state for a sequence of branches and all such current 1046 * and cloned states will be pointing to a single parent state 1047 * which might have large 'branches' count. 1048 */ 1049 } 1050 return &elem->st; 1051 err: 1052 free_verifier_state(env->cur_state, true); 1053 env->cur_state = NULL; 1054 /* pop all elements and return */ 1055 while (!pop_stack(env, NULL, NULL, false)); 1056 return NULL; 1057 } 1058 1059 #define CALLER_SAVED_REGS 6 1060 static const int caller_saved[CALLER_SAVED_REGS] = { 1061 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 1062 }; 1063 1064 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 1065 struct bpf_reg_state *reg); 1066 1067 /* This helper doesn't clear reg->id */ 1068 static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm) 1069 { 1070 reg->var_off = tnum_const(imm); 1071 reg->smin_value = (s64)imm; 1072 reg->smax_value = (s64)imm; 1073 reg->umin_value = imm; 1074 reg->umax_value = imm; 1075 1076 reg->s32_min_value = (s32)imm; 1077 reg->s32_max_value = (s32)imm; 1078 reg->u32_min_value = (u32)imm; 1079 reg->u32_max_value = (u32)imm; 1080 } 1081 1082 /* Mark the unknown part of a register (variable offset or scalar value) as 1083 * known to have the value @imm. 1084 */ 1085 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) 1086 { 1087 /* Clear id, off, and union(map_ptr, range) */ 1088 memset(((u8 *)reg) + sizeof(reg->type), 0, 1089 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); 1090 ___mark_reg_known(reg, imm); 1091 } 1092 1093 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm) 1094 { 1095 reg->var_off = tnum_const_subreg(reg->var_off, imm); 1096 reg->s32_min_value = (s32)imm; 1097 reg->s32_max_value = (s32)imm; 1098 reg->u32_min_value = (u32)imm; 1099 reg->u32_max_value = (u32)imm; 1100 } 1101 1102 /* Mark the 'variable offset' part of a register as zero. This should be 1103 * used only on registers holding a pointer type. 1104 */ 1105 static void __mark_reg_known_zero(struct bpf_reg_state *reg) 1106 { 1107 __mark_reg_known(reg, 0); 1108 } 1109 1110 static void __mark_reg_const_zero(struct bpf_reg_state *reg) 1111 { 1112 __mark_reg_known(reg, 0); 1113 reg->type = SCALAR_VALUE; 1114 } 1115 1116 static void mark_reg_known_zero(struct bpf_verifier_env *env, 1117 struct bpf_reg_state *regs, u32 regno) 1118 { 1119 if (WARN_ON(regno >= MAX_BPF_REG)) { 1120 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); 1121 /* Something bad happened, let's kill all regs */ 1122 for (regno = 0; regno < MAX_BPF_REG; regno++) 1123 __mark_reg_not_init(env, regs + regno); 1124 return; 1125 } 1126 __mark_reg_known_zero(regs + regno); 1127 } 1128 1129 static void mark_ptr_not_null_reg(struct bpf_reg_state *reg) 1130 { 1131 switch (reg->type) { 1132 case PTR_TO_MAP_VALUE_OR_NULL: { 1133 const struct bpf_map *map = reg->map_ptr; 1134 1135 if (map->inner_map_meta) { 1136 reg->type = CONST_PTR_TO_MAP; 1137 reg->map_ptr = map->inner_map_meta; 1138 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { 1139 reg->type = PTR_TO_XDP_SOCK; 1140 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || 1141 map->map_type == BPF_MAP_TYPE_SOCKHASH) { 1142 reg->type = PTR_TO_SOCKET; 1143 } else { 1144 reg->type = PTR_TO_MAP_VALUE; 1145 } 1146 break; 1147 } 1148 case PTR_TO_SOCKET_OR_NULL: 1149 reg->type = PTR_TO_SOCKET; 1150 break; 1151 case PTR_TO_SOCK_COMMON_OR_NULL: 1152 reg->type = PTR_TO_SOCK_COMMON; 1153 break; 1154 case PTR_TO_TCP_SOCK_OR_NULL: 1155 reg->type = PTR_TO_TCP_SOCK; 1156 break; 1157 case PTR_TO_BTF_ID_OR_NULL: 1158 reg->type = PTR_TO_BTF_ID; 1159 break; 1160 case PTR_TO_MEM_OR_NULL: 1161 reg->type = PTR_TO_MEM; 1162 break; 1163 case PTR_TO_RDONLY_BUF_OR_NULL: 1164 reg->type = PTR_TO_RDONLY_BUF; 1165 break; 1166 case PTR_TO_RDWR_BUF_OR_NULL: 1167 reg->type = PTR_TO_RDWR_BUF; 1168 break; 1169 default: 1170 WARN_ONCE(1, "unknown nullable register type"); 1171 } 1172 } 1173 1174 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) 1175 { 1176 return type_is_pkt_pointer(reg->type); 1177 } 1178 1179 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) 1180 { 1181 return reg_is_pkt_pointer(reg) || 1182 reg->type == PTR_TO_PACKET_END; 1183 } 1184 1185 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ 1186 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, 1187 enum bpf_reg_type which) 1188 { 1189 /* The register can already have a range from prior markings. 1190 * This is fine as long as it hasn't been advanced from its 1191 * origin. 1192 */ 1193 return reg->type == which && 1194 reg->id == 0 && 1195 reg->off == 0 && 1196 tnum_equals_const(reg->var_off, 0); 1197 } 1198 1199 /* Reset the min/max bounds of a register */ 1200 static void __mark_reg_unbounded(struct bpf_reg_state *reg) 1201 { 1202 reg->smin_value = S64_MIN; 1203 reg->smax_value = S64_MAX; 1204 reg->umin_value = 0; 1205 reg->umax_value = U64_MAX; 1206 1207 reg->s32_min_value = S32_MIN; 1208 reg->s32_max_value = S32_MAX; 1209 reg->u32_min_value = 0; 1210 reg->u32_max_value = U32_MAX; 1211 } 1212 1213 static void __mark_reg64_unbounded(struct bpf_reg_state *reg) 1214 { 1215 reg->smin_value = S64_MIN; 1216 reg->smax_value = S64_MAX; 1217 reg->umin_value = 0; 1218 reg->umax_value = U64_MAX; 1219 } 1220 1221 static void __mark_reg32_unbounded(struct bpf_reg_state *reg) 1222 { 1223 reg->s32_min_value = S32_MIN; 1224 reg->s32_max_value = S32_MAX; 1225 reg->u32_min_value = 0; 1226 reg->u32_max_value = U32_MAX; 1227 } 1228 1229 static void __update_reg32_bounds(struct bpf_reg_state *reg) 1230 { 1231 struct tnum var32_off = tnum_subreg(reg->var_off); 1232 1233 /* min signed is max(sign bit) | min(other bits) */ 1234 reg->s32_min_value = max_t(s32, reg->s32_min_value, 1235 var32_off.value | (var32_off.mask & S32_MIN)); 1236 /* max signed is min(sign bit) | max(other bits) */ 1237 reg->s32_max_value = min_t(s32, reg->s32_max_value, 1238 var32_off.value | (var32_off.mask & S32_MAX)); 1239 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value); 1240 reg->u32_max_value = min(reg->u32_max_value, 1241 (u32)(var32_off.value | var32_off.mask)); 1242 } 1243 1244 static void __update_reg64_bounds(struct bpf_reg_state *reg) 1245 { 1246 /* min signed is max(sign bit) | min(other bits) */ 1247 reg->smin_value = max_t(s64, reg->smin_value, 1248 reg->var_off.value | (reg->var_off.mask & S64_MIN)); 1249 /* max signed is min(sign bit) | max(other bits) */ 1250 reg->smax_value = min_t(s64, reg->smax_value, 1251 reg->var_off.value | (reg->var_off.mask & S64_MAX)); 1252 reg->umin_value = max(reg->umin_value, reg->var_off.value); 1253 reg->umax_value = min(reg->umax_value, 1254 reg->var_off.value | reg->var_off.mask); 1255 } 1256 1257 static void __update_reg_bounds(struct bpf_reg_state *reg) 1258 { 1259 __update_reg32_bounds(reg); 1260 __update_reg64_bounds(reg); 1261 } 1262 1263 /* Uses signed min/max values to inform unsigned, and vice-versa */ 1264 static void __reg32_deduce_bounds(struct bpf_reg_state *reg) 1265 { 1266 /* Learn sign from signed bounds. 1267 * If we cannot cross the sign boundary, then signed and unsigned bounds 1268 * are the same, so combine. This works even in the negative case, e.g. 1269 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 1270 */ 1271 if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) { 1272 reg->s32_min_value = reg->u32_min_value = 1273 max_t(u32, reg->s32_min_value, reg->u32_min_value); 1274 reg->s32_max_value = reg->u32_max_value = 1275 min_t(u32, reg->s32_max_value, reg->u32_max_value); 1276 return; 1277 } 1278 /* Learn sign from unsigned bounds. Signed bounds cross the sign 1279 * boundary, so we must be careful. 1280 */ 1281 if ((s32)reg->u32_max_value >= 0) { 1282 /* Positive. We can't learn anything from the smin, but smax 1283 * is positive, hence safe. 1284 */ 1285 reg->s32_min_value = reg->u32_min_value; 1286 reg->s32_max_value = reg->u32_max_value = 1287 min_t(u32, reg->s32_max_value, reg->u32_max_value); 1288 } else if ((s32)reg->u32_min_value < 0) { 1289 /* Negative. We can't learn anything from the smax, but smin 1290 * is negative, hence safe. 1291 */ 1292 reg->s32_min_value = reg->u32_min_value = 1293 max_t(u32, reg->s32_min_value, reg->u32_min_value); 1294 reg->s32_max_value = reg->u32_max_value; 1295 } 1296 } 1297 1298 static void __reg64_deduce_bounds(struct bpf_reg_state *reg) 1299 { 1300 /* Learn sign from signed bounds. 1301 * If we cannot cross the sign boundary, then signed and unsigned bounds 1302 * are the same, so combine. This works even in the negative case, e.g. 1303 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 1304 */ 1305 if (reg->smin_value >= 0 || reg->smax_value < 0) { 1306 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 1307 reg->umin_value); 1308 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 1309 reg->umax_value); 1310 return; 1311 } 1312 /* Learn sign from unsigned bounds. Signed bounds cross the sign 1313 * boundary, so we must be careful. 1314 */ 1315 if ((s64)reg->umax_value >= 0) { 1316 /* Positive. We can't learn anything from the smin, but smax 1317 * is positive, hence safe. 1318 */ 1319 reg->smin_value = reg->umin_value; 1320 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 1321 reg->umax_value); 1322 } else if ((s64)reg->umin_value < 0) { 1323 /* Negative. We can't learn anything from the smax, but smin 1324 * is negative, hence safe. 1325 */ 1326 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 1327 reg->umin_value); 1328 reg->smax_value = reg->umax_value; 1329 } 1330 } 1331 1332 static void __reg_deduce_bounds(struct bpf_reg_state *reg) 1333 { 1334 __reg32_deduce_bounds(reg); 1335 __reg64_deduce_bounds(reg); 1336 } 1337 1338 /* Attempts to improve var_off based on unsigned min/max information */ 1339 static void __reg_bound_offset(struct bpf_reg_state *reg) 1340 { 1341 struct tnum var64_off = tnum_intersect(reg->var_off, 1342 tnum_range(reg->umin_value, 1343 reg->umax_value)); 1344 struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off), 1345 tnum_range(reg->u32_min_value, 1346 reg->u32_max_value)); 1347 1348 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); 1349 } 1350 1351 static void __reg_assign_32_into_64(struct bpf_reg_state *reg) 1352 { 1353 reg->umin_value = reg->u32_min_value; 1354 reg->umax_value = reg->u32_max_value; 1355 /* Attempt to pull 32-bit signed bounds into 64-bit bounds 1356 * but must be positive otherwise set to worse case bounds 1357 * and refine later from tnum. 1358 */ 1359 if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0) 1360 reg->smax_value = reg->s32_max_value; 1361 else 1362 reg->smax_value = U32_MAX; 1363 if (reg->s32_min_value >= 0) 1364 reg->smin_value = reg->s32_min_value; 1365 else 1366 reg->smin_value = 0; 1367 } 1368 1369 static void __reg_combine_32_into_64(struct bpf_reg_state *reg) 1370 { 1371 /* special case when 64-bit register has upper 32-bit register 1372 * zeroed. Typically happens after zext or <<32, >>32 sequence 1373 * allowing us to use 32-bit bounds directly, 1374 */ 1375 if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) { 1376 __reg_assign_32_into_64(reg); 1377 } else { 1378 /* Otherwise the best we can do is push lower 32bit known and 1379 * unknown bits into register (var_off set from jmp logic) 1380 * then learn as much as possible from the 64-bit tnum 1381 * known and unknown bits. The previous smin/smax bounds are 1382 * invalid here because of jmp32 compare so mark them unknown 1383 * so they do not impact tnum bounds calculation. 1384 */ 1385 __mark_reg64_unbounded(reg); 1386 __update_reg_bounds(reg); 1387 } 1388 1389 /* Intersecting with the old var_off might have improved our bounds 1390 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 1391 * then new var_off is (0; 0x7f...fc) which improves our umax. 1392 */ 1393 __reg_deduce_bounds(reg); 1394 __reg_bound_offset(reg); 1395 __update_reg_bounds(reg); 1396 } 1397 1398 static bool __reg64_bound_s32(s64 a) 1399 { 1400 return a > S32_MIN && a < S32_MAX; 1401 } 1402 1403 static bool __reg64_bound_u32(u64 a) 1404 { 1405 return a > U32_MIN && a < U32_MAX; 1406 } 1407 1408 static void __reg_combine_64_into_32(struct bpf_reg_state *reg) 1409 { 1410 __mark_reg32_unbounded(reg); 1411 1412 if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) { 1413 reg->s32_min_value = (s32)reg->smin_value; 1414 reg->s32_max_value = (s32)reg->smax_value; 1415 } 1416 if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) { 1417 reg->u32_min_value = (u32)reg->umin_value; 1418 reg->u32_max_value = (u32)reg->umax_value; 1419 } 1420 1421 /* Intersecting with the old var_off might have improved our bounds 1422 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 1423 * then new var_off is (0; 0x7f...fc) which improves our umax. 1424 */ 1425 __reg_deduce_bounds(reg); 1426 __reg_bound_offset(reg); 1427 __update_reg_bounds(reg); 1428 } 1429 1430 /* Mark a register as having a completely unknown (scalar) value. */ 1431 static void __mark_reg_unknown(const struct bpf_verifier_env *env, 1432 struct bpf_reg_state *reg) 1433 { 1434 /* 1435 * Clear type, id, off, and union(map_ptr, range) and 1436 * padding between 'type' and union 1437 */ 1438 memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); 1439 reg->type = SCALAR_VALUE; 1440 reg->var_off = tnum_unknown; 1441 reg->frameno = 0; 1442 reg->precise = env->subprog_cnt > 1 || !env->bpf_capable; 1443 __mark_reg_unbounded(reg); 1444 } 1445 1446 static void mark_reg_unknown(struct bpf_verifier_env *env, 1447 struct bpf_reg_state *regs, u32 regno) 1448 { 1449 if (WARN_ON(regno >= MAX_BPF_REG)) { 1450 verbose(env, "mark_reg_unknown(regs, %u)\n", regno); 1451 /* Something bad happened, let's kill all regs except FP */ 1452 for (regno = 0; regno < BPF_REG_FP; regno++) 1453 __mark_reg_not_init(env, regs + regno); 1454 return; 1455 } 1456 __mark_reg_unknown(env, regs + regno); 1457 } 1458 1459 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 1460 struct bpf_reg_state *reg) 1461 { 1462 __mark_reg_unknown(env, reg); 1463 reg->type = NOT_INIT; 1464 } 1465 1466 static void mark_reg_not_init(struct bpf_verifier_env *env, 1467 struct bpf_reg_state *regs, u32 regno) 1468 { 1469 if (WARN_ON(regno >= MAX_BPF_REG)) { 1470 verbose(env, "mark_reg_not_init(regs, %u)\n", regno); 1471 /* Something bad happened, let's kill all regs except FP */ 1472 for (regno = 0; regno < BPF_REG_FP; regno++) 1473 __mark_reg_not_init(env, regs + regno); 1474 return; 1475 } 1476 __mark_reg_not_init(env, regs + regno); 1477 } 1478 1479 static void mark_btf_ld_reg(struct bpf_verifier_env *env, 1480 struct bpf_reg_state *regs, u32 regno, 1481 enum bpf_reg_type reg_type, 1482 struct btf *btf, u32 btf_id) 1483 { 1484 if (reg_type == SCALAR_VALUE) { 1485 mark_reg_unknown(env, regs, regno); 1486 return; 1487 } 1488 mark_reg_known_zero(env, regs, regno); 1489 regs[regno].type = PTR_TO_BTF_ID; 1490 regs[regno].btf = btf; 1491 regs[regno].btf_id = btf_id; 1492 } 1493 1494 #define DEF_NOT_SUBREG (0) 1495 static void init_reg_state(struct bpf_verifier_env *env, 1496 struct bpf_func_state *state) 1497 { 1498 struct bpf_reg_state *regs = state->regs; 1499 int i; 1500 1501 for (i = 0; i < MAX_BPF_REG; i++) { 1502 mark_reg_not_init(env, regs, i); 1503 regs[i].live = REG_LIVE_NONE; 1504 regs[i].parent = NULL; 1505 regs[i].subreg_def = DEF_NOT_SUBREG; 1506 } 1507 1508 /* frame pointer */ 1509 regs[BPF_REG_FP].type = PTR_TO_STACK; 1510 mark_reg_known_zero(env, regs, BPF_REG_FP); 1511 regs[BPF_REG_FP].frameno = state->frameno; 1512 } 1513 1514 #define BPF_MAIN_FUNC (-1) 1515 static void init_func_state(struct bpf_verifier_env *env, 1516 struct bpf_func_state *state, 1517 int callsite, int frameno, int subprogno) 1518 { 1519 state->callsite = callsite; 1520 state->frameno = frameno; 1521 state->subprogno = subprogno; 1522 init_reg_state(env, state); 1523 } 1524 1525 enum reg_arg_type { 1526 SRC_OP, /* register is used as source operand */ 1527 DST_OP, /* register is used as destination operand */ 1528 DST_OP_NO_MARK /* same as above, check only, don't mark */ 1529 }; 1530 1531 static int cmp_subprogs(const void *a, const void *b) 1532 { 1533 return ((struct bpf_subprog_info *)a)->start - 1534 ((struct bpf_subprog_info *)b)->start; 1535 } 1536 1537 static int find_subprog(struct bpf_verifier_env *env, int off) 1538 { 1539 struct bpf_subprog_info *p; 1540 1541 p = bsearch(&off, env->subprog_info, env->subprog_cnt, 1542 sizeof(env->subprog_info[0]), cmp_subprogs); 1543 if (!p) 1544 return -ENOENT; 1545 return p - env->subprog_info; 1546 1547 } 1548 1549 static int add_subprog(struct bpf_verifier_env *env, int off) 1550 { 1551 int insn_cnt = env->prog->len; 1552 int ret; 1553 1554 if (off >= insn_cnt || off < 0) { 1555 verbose(env, "call to invalid destination\n"); 1556 return -EINVAL; 1557 } 1558 ret = find_subprog(env, off); 1559 if (ret >= 0) 1560 return ret; 1561 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { 1562 verbose(env, "too many subprograms\n"); 1563 return -E2BIG; 1564 } 1565 /* determine subprog starts. The end is one before the next starts */ 1566 env->subprog_info[env->subprog_cnt++].start = off; 1567 sort(env->subprog_info, env->subprog_cnt, 1568 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); 1569 return env->subprog_cnt - 1; 1570 } 1571 1572 struct bpf_kfunc_desc { 1573 struct btf_func_model func_model; 1574 u32 func_id; 1575 s32 imm; 1576 }; 1577 1578 #define MAX_KFUNC_DESCS 256 1579 struct bpf_kfunc_desc_tab { 1580 struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS]; 1581 u32 nr_descs; 1582 }; 1583 1584 static int kfunc_desc_cmp_by_id(const void *a, const void *b) 1585 { 1586 const struct bpf_kfunc_desc *d0 = a; 1587 const struct bpf_kfunc_desc *d1 = b; 1588 1589 /* func_id is not greater than BTF_MAX_TYPE */ 1590 return d0->func_id - d1->func_id; 1591 } 1592 1593 static const struct bpf_kfunc_desc * 1594 find_kfunc_desc(const struct bpf_prog *prog, u32 func_id) 1595 { 1596 struct bpf_kfunc_desc desc = { 1597 .func_id = func_id, 1598 }; 1599 struct bpf_kfunc_desc_tab *tab; 1600 1601 tab = prog->aux->kfunc_tab; 1602 return bsearch(&desc, tab->descs, tab->nr_descs, 1603 sizeof(tab->descs[0]), kfunc_desc_cmp_by_id); 1604 } 1605 1606 static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id) 1607 { 1608 const struct btf_type *func, *func_proto; 1609 struct bpf_kfunc_desc_tab *tab; 1610 struct bpf_prog_aux *prog_aux; 1611 struct bpf_kfunc_desc *desc; 1612 const char *func_name; 1613 unsigned long addr; 1614 int err; 1615 1616 prog_aux = env->prog->aux; 1617 tab = prog_aux->kfunc_tab; 1618 if (!tab) { 1619 if (!btf_vmlinux) { 1620 verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n"); 1621 return -ENOTSUPP; 1622 } 1623 1624 if (!env->prog->jit_requested) { 1625 verbose(env, "JIT is required for calling kernel function\n"); 1626 return -ENOTSUPP; 1627 } 1628 1629 if (!bpf_jit_supports_kfunc_call()) { 1630 verbose(env, "JIT does not support calling kernel function\n"); 1631 return -ENOTSUPP; 1632 } 1633 1634 if (!env->prog->gpl_compatible) { 1635 verbose(env, "cannot call kernel function from non-GPL compatible program\n"); 1636 return -EINVAL; 1637 } 1638 1639 tab = kzalloc(sizeof(*tab), GFP_KERNEL); 1640 if (!tab) 1641 return -ENOMEM; 1642 prog_aux->kfunc_tab = tab; 1643 } 1644 1645 if (find_kfunc_desc(env->prog, func_id)) 1646 return 0; 1647 1648 if (tab->nr_descs == MAX_KFUNC_DESCS) { 1649 verbose(env, "too many different kernel function calls\n"); 1650 return -E2BIG; 1651 } 1652 1653 func = btf_type_by_id(btf_vmlinux, func_id); 1654 if (!func || !btf_type_is_func(func)) { 1655 verbose(env, "kernel btf_id %u is not a function\n", 1656 func_id); 1657 return -EINVAL; 1658 } 1659 func_proto = btf_type_by_id(btf_vmlinux, func->type); 1660 if (!func_proto || !btf_type_is_func_proto(func_proto)) { 1661 verbose(env, "kernel function btf_id %u does not have a valid func_proto\n", 1662 func_id); 1663 return -EINVAL; 1664 } 1665 1666 func_name = btf_name_by_offset(btf_vmlinux, func->name_off); 1667 addr = kallsyms_lookup_name(func_name); 1668 if (!addr) { 1669 verbose(env, "cannot find address for kernel function %s\n", 1670 func_name); 1671 return -EINVAL; 1672 } 1673 1674 desc = &tab->descs[tab->nr_descs++]; 1675 desc->func_id = func_id; 1676 desc->imm = BPF_CAST_CALL(addr) - __bpf_call_base; 1677 err = btf_distill_func_proto(&env->log, btf_vmlinux, 1678 func_proto, func_name, 1679 &desc->func_model); 1680 if (!err) 1681 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 1682 kfunc_desc_cmp_by_id, NULL); 1683 return err; 1684 } 1685 1686 static int kfunc_desc_cmp_by_imm(const void *a, const void *b) 1687 { 1688 const struct bpf_kfunc_desc *d0 = a; 1689 const struct bpf_kfunc_desc *d1 = b; 1690 1691 if (d0->imm > d1->imm) 1692 return 1; 1693 else if (d0->imm < d1->imm) 1694 return -1; 1695 return 0; 1696 } 1697 1698 static void sort_kfunc_descs_by_imm(struct bpf_prog *prog) 1699 { 1700 struct bpf_kfunc_desc_tab *tab; 1701 1702 tab = prog->aux->kfunc_tab; 1703 if (!tab) 1704 return; 1705 1706 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 1707 kfunc_desc_cmp_by_imm, NULL); 1708 } 1709 1710 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) 1711 { 1712 return !!prog->aux->kfunc_tab; 1713 } 1714 1715 const struct btf_func_model * 1716 bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 1717 const struct bpf_insn *insn) 1718 { 1719 const struct bpf_kfunc_desc desc = { 1720 .imm = insn->imm, 1721 }; 1722 const struct bpf_kfunc_desc *res; 1723 struct bpf_kfunc_desc_tab *tab; 1724 1725 tab = prog->aux->kfunc_tab; 1726 res = bsearch(&desc, tab->descs, tab->nr_descs, 1727 sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm); 1728 1729 return res ? &res->func_model : NULL; 1730 } 1731 1732 static int add_subprog_and_kfunc(struct bpf_verifier_env *env) 1733 { 1734 struct bpf_subprog_info *subprog = env->subprog_info; 1735 struct bpf_insn *insn = env->prog->insnsi; 1736 int i, ret, insn_cnt = env->prog->len; 1737 1738 /* Add entry function. */ 1739 ret = add_subprog(env, 0); 1740 if (ret) 1741 return ret; 1742 1743 for (i = 0; i < insn_cnt; i++, insn++) { 1744 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) && 1745 !bpf_pseudo_kfunc_call(insn)) 1746 continue; 1747 1748 if (!env->bpf_capable) { 1749 verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n"); 1750 return -EPERM; 1751 } 1752 1753 if (bpf_pseudo_func(insn)) { 1754 ret = add_subprog(env, i + insn->imm + 1); 1755 if (ret >= 0) 1756 /* remember subprog */ 1757 insn[1].imm = ret; 1758 } else if (bpf_pseudo_call(insn)) { 1759 ret = add_subprog(env, i + insn->imm + 1); 1760 } else { 1761 ret = add_kfunc_call(env, insn->imm); 1762 } 1763 1764 if (ret < 0) 1765 return ret; 1766 } 1767 1768 /* Add a fake 'exit' subprog which could simplify subprog iteration 1769 * logic. 'subprog_cnt' should not be increased. 1770 */ 1771 subprog[env->subprog_cnt].start = insn_cnt; 1772 1773 if (env->log.level & BPF_LOG_LEVEL2) 1774 for (i = 0; i < env->subprog_cnt; i++) 1775 verbose(env, "func#%d @%d\n", i, subprog[i].start); 1776 1777 return 0; 1778 } 1779 1780 static int check_subprogs(struct bpf_verifier_env *env) 1781 { 1782 int i, subprog_start, subprog_end, off, cur_subprog = 0; 1783 struct bpf_subprog_info *subprog = env->subprog_info; 1784 struct bpf_insn *insn = env->prog->insnsi; 1785 int insn_cnt = env->prog->len; 1786 1787 /* now check that all jumps are within the same subprog */ 1788 subprog_start = subprog[cur_subprog].start; 1789 subprog_end = subprog[cur_subprog + 1].start; 1790 for (i = 0; i < insn_cnt; i++) { 1791 u8 code = insn[i].code; 1792 1793 if (code == (BPF_JMP | BPF_CALL) && 1794 insn[i].imm == BPF_FUNC_tail_call && 1795 insn[i].src_reg != BPF_PSEUDO_CALL) 1796 subprog[cur_subprog].has_tail_call = true; 1797 if (BPF_CLASS(code) == BPF_LD && 1798 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND)) 1799 subprog[cur_subprog].has_ld_abs = true; 1800 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) 1801 goto next; 1802 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) 1803 goto next; 1804 off = i + insn[i].off + 1; 1805 if (off < subprog_start || off >= subprog_end) { 1806 verbose(env, "jump out of range from insn %d to %d\n", i, off); 1807 return -EINVAL; 1808 } 1809 next: 1810 if (i == subprog_end - 1) { 1811 /* to avoid fall-through from one subprog into another 1812 * the last insn of the subprog should be either exit 1813 * or unconditional jump back 1814 */ 1815 if (code != (BPF_JMP | BPF_EXIT) && 1816 code != (BPF_JMP | BPF_JA)) { 1817 verbose(env, "last insn is not an exit or jmp\n"); 1818 return -EINVAL; 1819 } 1820 subprog_start = subprog_end; 1821 cur_subprog++; 1822 if (cur_subprog < env->subprog_cnt) 1823 subprog_end = subprog[cur_subprog + 1].start; 1824 } 1825 } 1826 return 0; 1827 } 1828 1829 /* Parentage chain of this register (or stack slot) should take care of all 1830 * issues like callee-saved registers, stack slot allocation time, etc. 1831 */ 1832 static int mark_reg_read(struct bpf_verifier_env *env, 1833 const struct bpf_reg_state *state, 1834 struct bpf_reg_state *parent, u8 flag) 1835 { 1836 bool writes = parent == state->parent; /* Observe write marks */ 1837 int cnt = 0; 1838 1839 while (parent) { 1840 /* if read wasn't screened by an earlier write ... */ 1841 if (writes && state->live & REG_LIVE_WRITTEN) 1842 break; 1843 if (parent->live & REG_LIVE_DONE) { 1844 verbose(env, "verifier BUG type %s var_off %lld off %d\n", 1845 reg_type_str[parent->type], 1846 parent->var_off.value, parent->off); 1847 return -EFAULT; 1848 } 1849 /* The first condition is more likely to be true than the 1850 * second, checked it first. 1851 */ 1852 if ((parent->live & REG_LIVE_READ) == flag || 1853 parent->live & REG_LIVE_READ64) 1854 /* The parentage chain never changes and 1855 * this parent was already marked as LIVE_READ. 1856 * There is no need to keep walking the chain again and 1857 * keep re-marking all parents as LIVE_READ. 1858 * This case happens when the same register is read 1859 * multiple times without writes into it in-between. 1860 * Also, if parent has the stronger REG_LIVE_READ64 set, 1861 * then no need to set the weak REG_LIVE_READ32. 1862 */ 1863 break; 1864 /* ... then we depend on parent's value */ 1865 parent->live |= flag; 1866 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */ 1867 if (flag == REG_LIVE_READ64) 1868 parent->live &= ~REG_LIVE_READ32; 1869 state = parent; 1870 parent = state->parent; 1871 writes = true; 1872 cnt++; 1873 } 1874 1875 if (env->longest_mark_read_walk < cnt) 1876 env->longest_mark_read_walk = cnt; 1877 return 0; 1878 } 1879 1880 /* This function is supposed to be used by the following 32-bit optimization 1881 * code only. It returns TRUE if the source or destination register operates 1882 * on 64-bit, otherwise return FALSE. 1883 */ 1884 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, 1885 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t) 1886 { 1887 u8 code, class, op; 1888 1889 code = insn->code; 1890 class = BPF_CLASS(code); 1891 op = BPF_OP(code); 1892 if (class == BPF_JMP) { 1893 /* BPF_EXIT for "main" will reach here. Return TRUE 1894 * conservatively. 1895 */ 1896 if (op == BPF_EXIT) 1897 return true; 1898 if (op == BPF_CALL) { 1899 /* BPF to BPF call will reach here because of marking 1900 * caller saved clobber with DST_OP_NO_MARK for which we 1901 * don't care the register def because they are anyway 1902 * marked as NOT_INIT already. 1903 */ 1904 if (insn->src_reg == BPF_PSEUDO_CALL) 1905 return false; 1906 /* Helper call will reach here because of arg type 1907 * check, conservatively return TRUE. 1908 */ 1909 if (t == SRC_OP) 1910 return true; 1911 1912 return false; 1913 } 1914 } 1915 1916 if (class == BPF_ALU64 || class == BPF_JMP || 1917 /* BPF_END always use BPF_ALU class. */ 1918 (class == BPF_ALU && op == BPF_END && insn->imm == 64)) 1919 return true; 1920 1921 if (class == BPF_ALU || class == BPF_JMP32) 1922 return false; 1923 1924 if (class == BPF_LDX) { 1925 if (t != SRC_OP) 1926 return BPF_SIZE(code) == BPF_DW; 1927 /* LDX source must be ptr. */ 1928 return true; 1929 } 1930 1931 if (class == BPF_STX) { 1932 /* BPF_STX (including atomic variants) has multiple source 1933 * operands, one of which is a ptr. Check whether the caller is 1934 * asking about it. 1935 */ 1936 if (t == SRC_OP && reg->type != SCALAR_VALUE) 1937 return true; 1938 return BPF_SIZE(code) == BPF_DW; 1939 } 1940 1941 if (class == BPF_LD) { 1942 u8 mode = BPF_MODE(code); 1943 1944 /* LD_IMM64 */ 1945 if (mode == BPF_IMM) 1946 return true; 1947 1948 /* Both LD_IND and LD_ABS return 32-bit data. */ 1949 if (t != SRC_OP) 1950 return false; 1951 1952 /* Implicit ctx ptr. */ 1953 if (regno == BPF_REG_6) 1954 return true; 1955 1956 /* Explicit source could be any width. */ 1957 return true; 1958 } 1959 1960 if (class == BPF_ST) 1961 /* The only source register for BPF_ST is a ptr. */ 1962 return true; 1963 1964 /* Conservatively return true at default. */ 1965 return true; 1966 } 1967 1968 /* Return the regno defined by the insn, or -1. */ 1969 static int insn_def_regno(const struct bpf_insn *insn) 1970 { 1971 switch (BPF_CLASS(insn->code)) { 1972 case BPF_JMP: 1973 case BPF_JMP32: 1974 case BPF_ST: 1975 return -1; 1976 case BPF_STX: 1977 if (BPF_MODE(insn->code) == BPF_ATOMIC && 1978 (insn->imm & BPF_FETCH)) { 1979 if (insn->imm == BPF_CMPXCHG) 1980 return BPF_REG_0; 1981 else 1982 return insn->src_reg; 1983 } else { 1984 return -1; 1985 } 1986 default: 1987 return insn->dst_reg; 1988 } 1989 } 1990 1991 /* Return TRUE if INSN has defined any 32-bit value explicitly. */ 1992 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn) 1993 { 1994 int dst_reg = insn_def_regno(insn); 1995 1996 if (dst_reg == -1) 1997 return false; 1998 1999 return !is_reg64(env, insn, dst_reg, NULL, DST_OP); 2000 } 2001 2002 static void mark_insn_zext(struct bpf_verifier_env *env, 2003 struct bpf_reg_state *reg) 2004 { 2005 s32 def_idx = reg->subreg_def; 2006 2007 if (def_idx == DEF_NOT_SUBREG) 2008 return; 2009 2010 env->insn_aux_data[def_idx - 1].zext_dst = true; 2011 /* The dst will be zero extended, so won't be sub-register anymore. */ 2012 reg->subreg_def = DEF_NOT_SUBREG; 2013 } 2014 2015 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, 2016 enum reg_arg_type t) 2017 { 2018 struct bpf_verifier_state *vstate = env->cur_state; 2019 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2020 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; 2021 struct bpf_reg_state *reg, *regs = state->regs; 2022 bool rw64; 2023 2024 if (regno >= MAX_BPF_REG) { 2025 verbose(env, "R%d is invalid\n", regno); 2026 return -EINVAL; 2027 } 2028 2029 reg = ®s[regno]; 2030 rw64 = is_reg64(env, insn, regno, reg, t); 2031 if (t == SRC_OP) { 2032 /* check whether register used as source operand can be read */ 2033 if (reg->type == NOT_INIT) { 2034 verbose(env, "R%d !read_ok\n", regno); 2035 return -EACCES; 2036 } 2037 /* We don't need to worry about FP liveness because it's read-only */ 2038 if (regno == BPF_REG_FP) 2039 return 0; 2040 2041 if (rw64) 2042 mark_insn_zext(env, reg); 2043 2044 return mark_reg_read(env, reg, reg->parent, 2045 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32); 2046 } else { 2047 /* check whether register used as dest operand can be written to */ 2048 if (regno == BPF_REG_FP) { 2049 verbose(env, "frame pointer is read only\n"); 2050 return -EACCES; 2051 } 2052 reg->live |= REG_LIVE_WRITTEN; 2053 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; 2054 if (t == DST_OP) 2055 mark_reg_unknown(env, regs, regno); 2056 } 2057 return 0; 2058 } 2059 2060 /* for any branch, call, exit record the history of jmps in the given state */ 2061 static int push_jmp_history(struct bpf_verifier_env *env, 2062 struct bpf_verifier_state *cur) 2063 { 2064 u32 cnt = cur->jmp_history_cnt; 2065 struct bpf_idx_pair *p; 2066 2067 cnt++; 2068 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER); 2069 if (!p) 2070 return -ENOMEM; 2071 p[cnt - 1].idx = env->insn_idx; 2072 p[cnt - 1].prev_idx = env->prev_insn_idx; 2073 cur->jmp_history = p; 2074 cur->jmp_history_cnt = cnt; 2075 return 0; 2076 } 2077 2078 /* Backtrack one insn at a time. If idx is not at the top of recorded 2079 * history then previous instruction came from straight line execution. 2080 */ 2081 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, 2082 u32 *history) 2083 { 2084 u32 cnt = *history; 2085 2086 if (cnt && st->jmp_history[cnt - 1].idx == i) { 2087 i = st->jmp_history[cnt - 1].prev_idx; 2088 (*history)--; 2089 } else { 2090 i--; 2091 } 2092 return i; 2093 } 2094 2095 static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn) 2096 { 2097 const struct btf_type *func; 2098 2099 if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL) 2100 return NULL; 2101 2102 func = btf_type_by_id(btf_vmlinux, insn->imm); 2103 return btf_name_by_offset(btf_vmlinux, func->name_off); 2104 } 2105 2106 /* For given verifier state backtrack_insn() is called from the last insn to 2107 * the first insn. Its purpose is to compute a bitmask of registers and 2108 * stack slots that needs precision in the parent verifier state. 2109 */ 2110 static int backtrack_insn(struct bpf_verifier_env *env, int idx, 2111 u32 *reg_mask, u64 *stack_mask) 2112 { 2113 const struct bpf_insn_cbs cbs = { 2114 .cb_call = disasm_kfunc_name, 2115 .cb_print = verbose, 2116 .private_data = env, 2117 }; 2118 struct bpf_insn *insn = env->prog->insnsi + idx; 2119 u8 class = BPF_CLASS(insn->code); 2120 u8 opcode = BPF_OP(insn->code); 2121 u8 mode = BPF_MODE(insn->code); 2122 u32 dreg = 1u << insn->dst_reg; 2123 u32 sreg = 1u << insn->src_reg; 2124 u32 spi; 2125 2126 if (insn->code == 0) 2127 return 0; 2128 if (env->log.level & BPF_LOG_LEVEL) { 2129 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask); 2130 verbose(env, "%d: ", idx); 2131 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 2132 } 2133 2134 if (class == BPF_ALU || class == BPF_ALU64) { 2135 if (!(*reg_mask & dreg)) 2136 return 0; 2137 if (opcode == BPF_MOV) { 2138 if (BPF_SRC(insn->code) == BPF_X) { 2139 /* dreg = sreg 2140 * dreg needs precision after this insn 2141 * sreg needs precision before this insn 2142 */ 2143 *reg_mask &= ~dreg; 2144 *reg_mask |= sreg; 2145 } else { 2146 /* dreg = K 2147 * dreg needs precision after this insn. 2148 * Corresponding register is already marked 2149 * as precise=true in this verifier state. 2150 * No further markings in parent are necessary 2151 */ 2152 *reg_mask &= ~dreg; 2153 } 2154 } else { 2155 if (BPF_SRC(insn->code) == BPF_X) { 2156 /* dreg += sreg 2157 * both dreg and sreg need precision 2158 * before this insn 2159 */ 2160 *reg_mask |= sreg; 2161 } /* else dreg += K 2162 * dreg still needs precision before this insn 2163 */ 2164 } 2165 } else if (class == BPF_LDX) { 2166 if (!(*reg_mask & dreg)) 2167 return 0; 2168 *reg_mask &= ~dreg; 2169 2170 /* scalars can only be spilled into stack w/o losing precision. 2171 * Load from any other memory can be zero extended. 2172 * The desire to keep that precision is already indicated 2173 * by 'precise' mark in corresponding register of this state. 2174 * No further tracking necessary. 2175 */ 2176 if (insn->src_reg != BPF_REG_FP) 2177 return 0; 2178 if (BPF_SIZE(insn->code) != BPF_DW) 2179 return 0; 2180 2181 /* dreg = *(u64 *)[fp - off] was a fill from the stack. 2182 * that [fp - off] slot contains scalar that needs to be 2183 * tracked with precision 2184 */ 2185 spi = (-insn->off - 1) / BPF_REG_SIZE; 2186 if (spi >= 64) { 2187 verbose(env, "BUG spi %d\n", spi); 2188 WARN_ONCE(1, "verifier backtracking bug"); 2189 return -EFAULT; 2190 } 2191 *stack_mask |= 1ull << spi; 2192 } else if (class == BPF_STX || class == BPF_ST) { 2193 if (*reg_mask & dreg) 2194 /* stx & st shouldn't be using _scalar_ dst_reg 2195 * to access memory. It means backtracking 2196 * encountered a case of pointer subtraction. 2197 */ 2198 return -ENOTSUPP; 2199 /* scalars can only be spilled into stack */ 2200 if (insn->dst_reg != BPF_REG_FP) 2201 return 0; 2202 if (BPF_SIZE(insn->code) != BPF_DW) 2203 return 0; 2204 spi = (-insn->off - 1) / BPF_REG_SIZE; 2205 if (spi >= 64) { 2206 verbose(env, "BUG spi %d\n", spi); 2207 WARN_ONCE(1, "verifier backtracking bug"); 2208 return -EFAULT; 2209 } 2210 if (!(*stack_mask & (1ull << spi))) 2211 return 0; 2212 *stack_mask &= ~(1ull << spi); 2213 if (class == BPF_STX) 2214 *reg_mask |= sreg; 2215 } else if (class == BPF_JMP || class == BPF_JMP32) { 2216 if (opcode == BPF_CALL) { 2217 if (insn->src_reg == BPF_PSEUDO_CALL) 2218 return -ENOTSUPP; 2219 /* regular helper call sets R0 */ 2220 *reg_mask &= ~1; 2221 if (*reg_mask & 0x3f) { 2222 /* if backtracing was looking for registers R1-R5 2223 * they should have been found already. 2224 */ 2225 verbose(env, "BUG regs %x\n", *reg_mask); 2226 WARN_ONCE(1, "verifier backtracking bug"); 2227 return -EFAULT; 2228 } 2229 } else if (opcode == BPF_EXIT) { 2230 return -ENOTSUPP; 2231 } 2232 } else if (class == BPF_LD) { 2233 if (!(*reg_mask & dreg)) 2234 return 0; 2235 *reg_mask &= ~dreg; 2236 /* It's ld_imm64 or ld_abs or ld_ind. 2237 * For ld_imm64 no further tracking of precision 2238 * into parent is necessary 2239 */ 2240 if (mode == BPF_IND || mode == BPF_ABS) 2241 /* to be analyzed */ 2242 return -ENOTSUPP; 2243 } 2244 return 0; 2245 } 2246 2247 /* the scalar precision tracking algorithm: 2248 * . at the start all registers have precise=false. 2249 * . scalar ranges are tracked as normal through alu and jmp insns. 2250 * . once precise value of the scalar register is used in: 2251 * . ptr + scalar alu 2252 * . if (scalar cond K|scalar) 2253 * . helper_call(.., scalar, ...) where ARG_CONST is expected 2254 * backtrack through the verifier states and mark all registers and 2255 * stack slots with spilled constants that these scalar regisers 2256 * should be precise. 2257 * . during state pruning two registers (or spilled stack slots) 2258 * are equivalent if both are not precise. 2259 * 2260 * Note the verifier cannot simply walk register parentage chain, 2261 * since many different registers and stack slots could have been 2262 * used to compute single precise scalar. 2263 * 2264 * The approach of starting with precise=true for all registers and then 2265 * backtrack to mark a register as not precise when the verifier detects 2266 * that program doesn't care about specific value (e.g., when helper 2267 * takes register as ARG_ANYTHING parameter) is not safe. 2268 * 2269 * It's ok to walk single parentage chain of the verifier states. 2270 * It's possible that this backtracking will go all the way till 1st insn. 2271 * All other branches will be explored for needing precision later. 2272 * 2273 * The backtracking needs to deal with cases like: 2274 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0) 2275 * r9 -= r8 2276 * r5 = r9 2277 * if r5 > 0x79f goto pc+7 2278 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff)) 2279 * r5 += 1 2280 * ... 2281 * call bpf_perf_event_output#25 2282 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO 2283 * 2284 * and this case: 2285 * r6 = 1 2286 * call foo // uses callee's r6 inside to compute r0 2287 * r0 += r6 2288 * if r0 == 0 goto 2289 * 2290 * to track above reg_mask/stack_mask needs to be independent for each frame. 2291 * 2292 * Also if parent's curframe > frame where backtracking started, 2293 * the verifier need to mark registers in both frames, otherwise callees 2294 * may incorrectly prune callers. This is similar to 2295 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences") 2296 * 2297 * For now backtracking falls back into conservative marking. 2298 */ 2299 static void mark_all_scalars_precise(struct bpf_verifier_env *env, 2300 struct bpf_verifier_state *st) 2301 { 2302 struct bpf_func_state *func; 2303 struct bpf_reg_state *reg; 2304 int i, j; 2305 2306 /* big hammer: mark all scalars precise in this path. 2307 * pop_stack may still get !precise scalars. 2308 */ 2309 for (; st; st = st->parent) 2310 for (i = 0; i <= st->curframe; i++) { 2311 func = st->frame[i]; 2312 for (j = 0; j < BPF_REG_FP; j++) { 2313 reg = &func->regs[j]; 2314 if (reg->type != SCALAR_VALUE) 2315 continue; 2316 reg->precise = true; 2317 } 2318 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { 2319 if (func->stack[j].slot_type[0] != STACK_SPILL) 2320 continue; 2321 reg = &func->stack[j].spilled_ptr; 2322 if (reg->type != SCALAR_VALUE) 2323 continue; 2324 reg->precise = true; 2325 } 2326 } 2327 } 2328 2329 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, 2330 int spi) 2331 { 2332 struct bpf_verifier_state *st = env->cur_state; 2333 int first_idx = st->first_insn_idx; 2334 int last_idx = env->insn_idx; 2335 struct bpf_func_state *func; 2336 struct bpf_reg_state *reg; 2337 u32 reg_mask = regno >= 0 ? 1u << regno : 0; 2338 u64 stack_mask = spi >= 0 ? 1ull << spi : 0; 2339 bool skip_first = true; 2340 bool new_marks = false; 2341 int i, err; 2342 2343 if (!env->bpf_capable) 2344 return 0; 2345 2346 func = st->frame[st->curframe]; 2347 if (regno >= 0) { 2348 reg = &func->regs[regno]; 2349 if (reg->type != SCALAR_VALUE) { 2350 WARN_ONCE(1, "backtracing misuse"); 2351 return -EFAULT; 2352 } 2353 if (!reg->precise) 2354 new_marks = true; 2355 else 2356 reg_mask = 0; 2357 reg->precise = true; 2358 } 2359 2360 while (spi >= 0) { 2361 if (func->stack[spi].slot_type[0] != STACK_SPILL) { 2362 stack_mask = 0; 2363 break; 2364 } 2365 reg = &func->stack[spi].spilled_ptr; 2366 if (reg->type != SCALAR_VALUE) { 2367 stack_mask = 0; 2368 break; 2369 } 2370 if (!reg->precise) 2371 new_marks = true; 2372 else 2373 stack_mask = 0; 2374 reg->precise = true; 2375 break; 2376 } 2377 2378 if (!new_marks) 2379 return 0; 2380 if (!reg_mask && !stack_mask) 2381 return 0; 2382 for (;;) { 2383 DECLARE_BITMAP(mask, 64); 2384 u32 history = st->jmp_history_cnt; 2385 2386 if (env->log.level & BPF_LOG_LEVEL) 2387 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx); 2388 for (i = last_idx;;) { 2389 if (skip_first) { 2390 err = 0; 2391 skip_first = false; 2392 } else { 2393 err = backtrack_insn(env, i, ®_mask, &stack_mask); 2394 } 2395 if (err == -ENOTSUPP) { 2396 mark_all_scalars_precise(env, st); 2397 return 0; 2398 } else if (err) { 2399 return err; 2400 } 2401 if (!reg_mask && !stack_mask) 2402 /* Found assignment(s) into tracked register in this state. 2403 * Since this state is already marked, just return. 2404 * Nothing to be tracked further in the parent state. 2405 */ 2406 return 0; 2407 if (i == first_idx) 2408 break; 2409 i = get_prev_insn_idx(st, i, &history); 2410 if (i >= env->prog->len) { 2411 /* This can happen if backtracking reached insn 0 2412 * and there are still reg_mask or stack_mask 2413 * to backtrack. 2414 * It means the backtracking missed the spot where 2415 * particular register was initialized with a constant. 2416 */ 2417 verbose(env, "BUG backtracking idx %d\n", i); 2418 WARN_ONCE(1, "verifier backtracking bug"); 2419 return -EFAULT; 2420 } 2421 } 2422 st = st->parent; 2423 if (!st) 2424 break; 2425 2426 new_marks = false; 2427 func = st->frame[st->curframe]; 2428 bitmap_from_u64(mask, reg_mask); 2429 for_each_set_bit(i, mask, 32) { 2430 reg = &func->regs[i]; 2431 if (reg->type != SCALAR_VALUE) { 2432 reg_mask &= ~(1u << i); 2433 continue; 2434 } 2435 if (!reg->precise) 2436 new_marks = true; 2437 reg->precise = true; 2438 } 2439 2440 bitmap_from_u64(mask, stack_mask); 2441 for_each_set_bit(i, mask, 64) { 2442 if (i >= func->allocated_stack / BPF_REG_SIZE) { 2443 /* the sequence of instructions: 2444 * 2: (bf) r3 = r10 2445 * 3: (7b) *(u64 *)(r3 -8) = r0 2446 * 4: (79) r4 = *(u64 *)(r10 -8) 2447 * doesn't contain jmps. It's backtracked 2448 * as a single block. 2449 * During backtracking insn 3 is not recognized as 2450 * stack access, so at the end of backtracking 2451 * stack slot fp-8 is still marked in stack_mask. 2452 * However the parent state may not have accessed 2453 * fp-8 and it's "unallocated" stack space. 2454 * In such case fallback to conservative. 2455 */ 2456 mark_all_scalars_precise(env, st); 2457 return 0; 2458 } 2459 2460 if (func->stack[i].slot_type[0] != STACK_SPILL) { 2461 stack_mask &= ~(1ull << i); 2462 continue; 2463 } 2464 reg = &func->stack[i].spilled_ptr; 2465 if (reg->type != SCALAR_VALUE) { 2466 stack_mask &= ~(1ull << i); 2467 continue; 2468 } 2469 if (!reg->precise) 2470 new_marks = true; 2471 reg->precise = true; 2472 } 2473 if (env->log.level & BPF_LOG_LEVEL) { 2474 print_verifier_state(env, func); 2475 verbose(env, "parent %s regs=%x stack=%llx marks\n", 2476 new_marks ? "didn't have" : "already had", 2477 reg_mask, stack_mask); 2478 } 2479 2480 if (!reg_mask && !stack_mask) 2481 break; 2482 if (!new_marks) 2483 break; 2484 2485 last_idx = st->last_insn_idx; 2486 first_idx = st->first_insn_idx; 2487 } 2488 return 0; 2489 } 2490 2491 static int mark_chain_precision(struct bpf_verifier_env *env, int regno) 2492 { 2493 return __mark_chain_precision(env, regno, -1); 2494 } 2495 2496 static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi) 2497 { 2498 return __mark_chain_precision(env, -1, spi); 2499 } 2500 2501 static bool is_spillable_regtype(enum bpf_reg_type type) 2502 { 2503 switch (type) { 2504 case PTR_TO_MAP_VALUE: 2505 case PTR_TO_MAP_VALUE_OR_NULL: 2506 case PTR_TO_STACK: 2507 case PTR_TO_CTX: 2508 case PTR_TO_PACKET: 2509 case PTR_TO_PACKET_META: 2510 case PTR_TO_PACKET_END: 2511 case PTR_TO_FLOW_KEYS: 2512 case CONST_PTR_TO_MAP: 2513 case PTR_TO_SOCKET: 2514 case PTR_TO_SOCKET_OR_NULL: 2515 case PTR_TO_SOCK_COMMON: 2516 case PTR_TO_SOCK_COMMON_OR_NULL: 2517 case PTR_TO_TCP_SOCK: 2518 case PTR_TO_TCP_SOCK_OR_NULL: 2519 case PTR_TO_XDP_SOCK: 2520 case PTR_TO_BTF_ID: 2521 case PTR_TO_BTF_ID_OR_NULL: 2522 case PTR_TO_RDONLY_BUF: 2523 case PTR_TO_RDONLY_BUF_OR_NULL: 2524 case PTR_TO_RDWR_BUF: 2525 case PTR_TO_RDWR_BUF_OR_NULL: 2526 case PTR_TO_PERCPU_BTF_ID: 2527 case PTR_TO_MEM: 2528 case PTR_TO_MEM_OR_NULL: 2529 case PTR_TO_FUNC: 2530 case PTR_TO_MAP_KEY: 2531 return true; 2532 default: 2533 return false; 2534 } 2535 } 2536 2537 /* Does this register contain a constant zero? */ 2538 static bool register_is_null(struct bpf_reg_state *reg) 2539 { 2540 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); 2541 } 2542 2543 static bool register_is_const(struct bpf_reg_state *reg) 2544 { 2545 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off); 2546 } 2547 2548 static bool __is_scalar_unbounded(struct bpf_reg_state *reg) 2549 { 2550 return tnum_is_unknown(reg->var_off) && 2551 reg->smin_value == S64_MIN && reg->smax_value == S64_MAX && 2552 reg->umin_value == 0 && reg->umax_value == U64_MAX && 2553 reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX && 2554 reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX; 2555 } 2556 2557 static bool register_is_bounded(struct bpf_reg_state *reg) 2558 { 2559 return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg); 2560 } 2561 2562 static bool __is_pointer_value(bool allow_ptr_leaks, 2563 const struct bpf_reg_state *reg) 2564 { 2565 if (allow_ptr_leaks) 2566 return false; 2567 2568 return reg->type != SCALAR_VALUE; 2569 } 2570 2571 static void save_register_state(struct bpf_func_state *state, 2572 int spi, struct bpf_reg_state *reg) 2573 { 2574 int i; 2575 2576 state->stack[spi].spilled_ptr = *reg; 2577 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 2578 2579 for (i = 0; i < BPF_REG_SIZE; i++) 2580 state->stack[spi].slot_type[i] = STACK_SPILL; 2581 } 2582 2583 /* check_stack_{read,write}_fixed_off functions track spill/fill of registers, 2584 * stack boundary and alignment are checked in check_mem_access() 2585 */ 2586 static int check_stack_write_fixed_off(struct bpf_verifier_env *env, 2587 /* stack frame we're writing to */ 2588 struct bpf_func_state *state, 2589 int off, int size, int value_regno, 2590 int insn_idx) 2591 { 2592 struct bpf_func_state *cur; /* state of the current function */ 2593 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; 2594 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg; 2595 struct bpf_reg_state *reg = NULL; 2596 2597 err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE)); 2598 if (err) 2599 return err; 2600 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 2601 * so it's aligned access and [off, off + size) are within stack limits 2602 */ 2603 if (!env->allow_ptr_leaks && 2604 state->stack[spi].slot_type[0] == STACK_SPILL && 2605 size != BPF_REG_SIZE) { 2606 verbose(env, "attempt to corrupt spilled pointer on stack\n"); 2607 return -EACCES; 2608 } 2609 2610 cur = env->cur_state->frame[env->cur_state->curframe]; 2611 if (value_regno >= 0) 2612 reg = &cur->regs[value_regno]; 2613 2614 if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) && 2615 !register_is_null(reg) && env->bpf_capable) { 2616 if (dst_reg != BPF_REG_FP) { 2617 /* The backtracking logic can only recognize explicit 2618 * stack slot address like [fp - 8]. Other spill of 2619 * scalar via different register has to be conservative. 2620 * Backtrack from here and mark all registers as precise 2621 * that contributed into 'reg' being a constant. 2622 */ 2623 err = mark_chain_precision(env, value_regno); 2624 if (err) 2625 return err; 2626 } 2627 save_register_state(state, spi, reg); 2628 } else if (reg && is_spillable_regtype(reg->type)) { 2629 /* register containing pointer is being spilled into stack */ 2630 if (size != BPF_REG_SIZE) { 2631 verbose_linfo(env, insn_idx, "; "); 2632 verbose(env, "invalid size of register spill\n"); 2633 return -EACCES; 2634 } 2635 2636 if (state != cur && reg->type == PTR_TO_STACK) { 2637 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); 2638 return -EINVAL; 2639 } 2640 2641 if (!env->bypass_spec_v4) { 2642 bool sanitize = false; 2643 2644 if (state->stack[spi].slot_type[0] == STACK_SPILL && 2645 register_is_const(&state->stack[spi].spilled_ptr)) 2646 sanitize = true; 2647 for (i = 0; i < BPF_REG_SIZE; i++) 2648 if (state->stack[spi].slot_type[i] == STACK_MISC) { 2649 sanitize = true; 2650 break; 2651 } 2652 if (sanitize) { 2653 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; 2654 int soff = (-spi - 1) * BPF_REG_SIZE; 2655 2656 /* detected reuse of integer stack slot with a pointer 2657 * which means either llvm is reusing stack slot or 2658 * an attacker is trying to exploit CVE-2018-3639 2659 * (speculative store bypass) 2660 * Have to sanitize that slot with preemptive 2661 * store of zero. 2662 */ 2663 if (*poff && *poff != soff) { 2664 /* disallow programs where single insn stores 2665 * into two different stack slots, since verifier 2666 * cannot sanitize them 2667 */ 2668 verbose(env, 2669 "insn %d cannot access two stack slots fp%d and fp%d", 2670 insn_idx, *poff, soff); 2671 return -EINVAL; 2672 } 2673 *poff = soff; 2674 } 2675 } 2676 save_register_state(state, spi, reg); 2677 } else { 2678 u8 type = STACK_MISC; 2679 2680 /* regular write of data into stack destroys any spilled ptr */ 2681 state->stack[spi].spilled_ptr.type = NOT_INIT; 2682 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ 2683 if (state->stack[spi].slot_type[0] == STACK_SPILL) 2684 for (i = 0; i < BPF_REG_SIZE; i++) 2685 state->stack[spi].slot_type[i] = STACK_MISC; 2686 2687 /* only mark the slot as written if all 8 bytes were written 2688 * otherwise read propagation may incorrectly stop too soon 2689 * when stack slots are partially written. 2690 * This heuristic means that read propagation will be 2691 * conservative, since it will add reg_live_read marks 2692 * to stack slots all the way to first state when programs 2693 * writes+reads less than 8 bytes 2694 */ 2695 if (size == BPF_REG_SIZE) 2696 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 2697 2698 /* when we zero initialize stack slots mark them as such */ 2699 if (reg && register_is_null(reg)) { 2700 /* backtracking doesn't work for STACK_ZERO yet. */ 2701 err = mark_chain_precision(env, value_regno); 2702 if (err) 2703 return err; 2704 type = STACK_ZERO; 2705 } 2706 2707 /* Mark slots affected by this stack write. */ 2708 for (i = 0; i < size; i++) 2709 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = 2710 type; 2711 } 2712 return 0; 2713 } 2714 2715 /* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is 2716 * known to contain a variable offset. 2717 * This function checks whether the write is permitted and conservatively 2718 * tracks the effects of the write, considering that each stack slot in the 2719 * dynamic range is potentially written to. 2720 * 2721 * 'off' includes 'regno->off'. 2722 * 'value_regno' can be -1, meaning that an unknown value is being written to 2723 * the stack. 2724 * 2725 * Spilled pointers in range are not marked as written because we don't know 2726 * what's going to be actually written. This means that read propagation for 2727 * future reads cannot be terminated by this write. 2728 * 2729 * For privileged programs, uninitialized stack slots are considered 2730 * initialized by this write (even though we don't know exactly what offsets 2731 * are going to be written to). The idea is that we don't want the verifier to 2732 * reject future reads that access slots written to through variable offsets. 2733 */ 2734 static int check_stack_write_var_off(struct bpf_verifier_env *env, 2735 /* func where register points to */ 2736 struct bpf_func_state *state, 2737 int ptr_regno, int off, int size, 2738 int value_regno, int insn_idx) 2739 { 2740 struct bpf_func_state *cur; /* state of the current function */ 2741 int min_off, max_off; 2742 int i, err; 2743 struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL; 2744 bool writing_zero = false; 2745 /* set if the fact that we're writing a zero is used to let any 2746 * stack slots remain STACK_ZERO 2747 */ 2748 bool zero_used = false; 2749 2750 cur = env->cur_state->frame[env->cur_state->curframe]; 2751 ptr_reg = &cur->regs[ptr_regno]; 2752 min_off = ptr_reg->smin_value + off; 2753 max_off = ptr_reg->smax_value + off + size; 2754 if (value_regno >= 0) 2755 value_reg = &cur->regs[value_regno]; 2756 if (value_reg && register_is_null(value_reg)) 2757 writing_zero = true; 2758 2759 err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE)); 2760 if (err) 2761 return err; 2762 2763 2764 /* Variable offset writes destroy any spilled pointers in range. */ 2765 for (i = min_off; i < max_off; i++) { 2766 u8 new_type, *stype; 2767 int slot, spi; 2768 2769 slot = -i - 1; 2770 spi = slot / BPF_REG_SIZE; 2771 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 2772 2773 if (!env->allow_ptr_leaks 2774 && *stype != NOT_INIT 2775 && *stype != SCALAR_VALUE) { 2776 /* Reject the write if there's are spilled pointers in 2777 * range. If we didn't reject here, the ptr status 2778 * would be erased below (even though not all slots are 2779 * actually overwritten), possibly opening the door to 2780 * leaks. 2781 */ 2782 verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d", 2783 insn_idx, i); 2784 return -EINVAL; 2785 } 2786 2787 /* Erase all spilled pointers. */ 2788 state->stack[spi].spilled_ptr.type = NOT_INIT; 2789 2790 /* Update the slot type. */ 2791 new_type = STACK_MISC; 2792 if (writing_zero && *stype == STACK_ZERO) { 2793 new_type = STACK_ZERO; 2794 zero_used = true; 2795 } 2796 /* If the slot is STACK_INVALID, we check whether it's OK to 2797 * pretend that it will be initialized by this write. The slot 2798 * might not actually be written to, and so if we mark it as 2799 * initialized future reads might leak uninitialized memory. 2800 * For privileged programs, we will accept such reads to slots 2801 * that may or may not be written because, if we're reject 2802 * them, the error would be too confusing. 2803 */ 2804 if (*stype == STACK_INVALID && !env->allow_uninit_stack) { 2805 verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d", 2806 insn_idx, i); 2807 return -EINVAL; 2808 } 2809 *stype = new_type; 2810 } 2811 if (zero_used) { 2812 /* backtracking doesn't work for STACK_ZERO yet. */ 2813 err = mark_chain_precision(env, value_regno); 2814 if (err) 2815 return err; 2816 } 2817 return 0; 2818 } 2819 2820 /* When register 'dst_regno' is assigned some values from stack[min_off, 2821 * max_off), we set the register's type according to the types of the 2822 * respective stack slots. If all the stack values are known to be zeros, then 2823 * so is the destination reg. Otherwise, the register is considered to be 2824 * SCALAR. This function does not deal with register filling; the caller must 2825 * ensure that all spilled registers in the stack range have been marked as 2826 * read. 2827 */ 2828 static void mark_reg_stack_read(struct bpf_verifier_env *env, 2829 /* func where src register points to */ 2830 struct bpf_func_state *ptr_state, 2831 int min_off, int max_off, int dst_regno) 2832 { 2833 struct bpf_verifier_state *vstate = env->cur_state; 2834 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2835 int i, slot, spi; 2836 u8 *stype; 2837 int zeros = 0; 2838 2839 for (i = min_off; i < max_off; i++) { 2840 slot = -i - 1; 2841 spi = slot / BPF_REG_SIZE; 2842 stype = ptr_state->stack[spi].slot_type; 2843 if (stype[slot % BPF_REG_SIZE] != STACK_ZERO) 2844 break; 2845 zeros++; 2846 } 2847 if (zeros == max_off - min_off) { 2848 /* any access_size read into register is zero extended, 2849 * so the whole register == const_zero 2850 */ 2851 __mark_reg_const_zero(&state->regs[dst_regno]); 2852 /* backtracking doesn't support STACK_ZERO yet, 2853 * so mark it precise here, so that later 2854 * backtracking can stop here. 2855 * Backtracking may not need this if this register 2856 * doesn't participate in pointer adjustment. 2857 * Forward propagation of precise flag is not 2858 * necessary either. This mark is only to stop 2859 * backtracking. Any register that contributed 2860 * to const 0 was marked precise before spill. 2861 */ 2862 state->regs[dst_regno].precise = true; 2863 } else { 2864 /* have read misc data from the stack */ 2865 mark_reg_unknown(env, state->regs, dst_regno); 2866 } 2867 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; 2868 } 2869 2870 /* Read the stack at 'off' and put the results into the register indicated by 2871 * 'dst_regno'. It handles reg filling if the addressed stack slot is a 2872 * spilled reg. 2873 * 2874 * 'dst_regno' can be -1, meaning that the read value is not going to a 2875 * register. 2876 * 2877 * The access is assumed to be within the current stack bounds. 2878 */ 2879 static int check_stack_read_fixed_off(struct bpf_verifier_env *env, 2880 /* func where src register points to */ 2881 struct bpf_func_state *reg_state, 2882 int off, int size, int dst_regno) 2883 { 2884 struct bpf_verifier_state *vstate = env->cur_state; 2885 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2886 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; 2887 struct bpf_reg_state *reg; 2888 u8 *stype; 2889 2890 stype = reg_state->stack[spi].slot_type; 2891 reg = ®_state->stack[spi].spilled_ptr; 2892 2893 if (stype[0] == STACK_SPILL) { 2894 if (size != BPF_REG_SIZE) { 2895 if (reg->type != SCALAR_VALUE) { 2896 verbose_linfo(env, env->insn_idx, "; "); 2897 verbose(env, "invalid size of register fill\n"); 2898 return -EACCES; 2899 } 2900 if (dst_regno >= 0) { 2901 mark_reg_unknown(env, state->regs, dst_regno); 2902 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; 2903 } 2904 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 2905 return 0; 2906 } 2907 for (i = 1; i < BPF_REG_SIZE; i++) { 2908 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { 2909 verbose(env, "corrupted spill memory\n"); 2910 return -EACCES; 2911 } 2912 } 2913 2914 if (dst_regno >= 0) { 2915 /* restore register state from stack */ 2916 state->regs[dst_regno] = *reg; 2917 /* mark reg as written since spilled pointer state likely 2918 * has its liveness marks cleared by is_state_visited() 2919 * which resets stack/reg liveness for state transitions 2920 */ 2921 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; 2922 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { 2923 /* If dst_regno==-1, the caller is asking us whether 2924 * it is acceptable to use this value as a SCALAR_VALUE 2925 * (e.g. for XADD). 2926 * We must not allow unprivileged callers to do that 2927 * with spilled pointers. 2928 */ 2929 verbose(env, "leaking pointer from stack off %d\n", 2930 off); 2931 return -EACCES; 2932 } 2933 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 2934 } else { 2935 u8 type; 2936 2937 for (i = 0; i < size; i++) { 2938 type = stype[(slot - i) % BPF_REG_SIZE]; 2939 if (type == STACK_MISC) 2940 continue; 2941 if (type == STACK_ZERO) 2942 continue; 2943 verbose(env, "invalid read from stack off %d+%d size %d\n", 2944 off, i, size); 2945 return -EACCES; 2946 } 2947 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 2948 if (dst_regno >= 0) 2949 mark_reg_stack_read(env, reg_state, off, off + size, dst_regno); 2950 } 2951 return 0; 2952 } 2953 2954 enum stack_access_src { 2955 ACCESS_DIRECT = 1, /* the access is performed by an instruction */ 2956 ACCESS_HELPER = 2, /* the access is performed by a helper */ 2957 }; 2958 2959 static int check_stack_range_initialized(struct bpf_verifier_env *env, 2960 int regno, int off, int access_size, 2961 bool zero_size_allowed, 2962 enum stack_access_src type, 2963 struct bpf_call_arg_meta *meta); 2964 2965 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) 2966 { 2967 return cur_regs(env) + regno; 2968 } 2969 2970 /* Read the stack at 'ptr_regno + off' and put the result into the register 2971 * 'dst_regno'. 2972 * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'), 2973 * but not its variable offset. 2974 * 'size' is assumed to be <= reg size and the access is assumed to be aligned. 2975 * 2976 * As opposed to check_stack_read_fixed_off, this function doesn't deal with 2977 * filling registers (i.e. reads of spilled register cannot be detected when 2978 * the offset is not fixed). We conservatively mark 'dst_regno' as containing 2979 * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable 2980 * offset; for a fixed offset check_stack_read_fixed_off should be used 2981 * instead. 2982 */ 2983 static int check_stack_read_var_off(struct bpf_verifier_env *env, 2984 int ptr_regno, int off, int size, int dst_regno) 2985 { 2986 /* The state of the source register. */ 2987 struct bpf_reg_state *reg = reg_state(env, ptr_regno); 2988 struct bpf_func_state *ptr_state = func(env, reg); 2989 int err; 2990 int min_off, max_off; 2991 2992 /* Note that we pass a NULL meta, so raw access will not be permitted. 2993 */ 2994 err = check_stack_range_initialized(env, ptr_regno, off, size, 2995 false, ACCESS_DIRECT, NULL); 2996 if (err) 2997 return err; 2998 2999 min_off = reg->smin_value + off; 3000 max_off = reg->smax_value + off; 3001 mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno); 3002 return 0; 3003 } 3004 3005 /* check_stack_read dispatches to check_stack_read_fixed_off or 3006 * check_stack_read_var_off. 3007 * 3008 * The caller must ensure that the offset falls within the allocated stack 3009 * bounds. 3010 * 3011 * 'dst_regno' is a register which will receive the value from the stack. It 3012 * can be -1, meaning that the read value is not going to a register. 3013 */ 3014 static int check_stack_read(struct bpf_verifier_env *env, 3015 int ptr_regno, int off, int size, 3016 int dst_regno) 3017 { 3018 struct bpf_reg_state *reg = reg_state(env, ptr_regno); 3019 struct bpf_func_state *state = func(env, reg); 3020 int err; 3021 /* Some accesses are only permitted with a static offset. */ 3022 bool var_off = !tnum_is_const(reg->var_off); 3023 3024 /* The offset is required to be static when reads don't go to a 3025 * register, in order to not leak pointers (see 3026 * check_stack_read_fixed_off). 3027 */ 3028 if (dst_regno < 0 && var_off) { 3029 char tn_buf[48]; 3030 3031 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3032 verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n", 3033 tn_buf, off, size); 3034 return -EACCES; 3035 } 3036 /* Variable offset is prohibited for unprivileged mode for simplicity 3037 * since it requires corresponding support in Spectre masking for stack 3038 * ALU. See also retrieve_ptr_limit(). 3039 */ 3040 if (!env->bypass_spec_v1 && var_off) { 3041 char tn_buf[48]; 3042 3043 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3044 verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n", 3045 ptr_regno, tn_buf); 3046 return -EACCES; 3047 } 3048 3049 if (!var_off) { 3050 off += reg->var_off.value; 3051 err = check_stack_read_fixed_off(env, state, off, size, 3052 dst_regno); 3053 } else { 3054 /* Variable offset stack reads need more conservative handling 3055 * than fixed offset ones. Note that dst_regno >= 0 on this 3056 * branch. 3057 */ 3058 err = check_stack_read_var_off(env, ptr_regno, off, size, 3059 dst_regno); 3060 } 3061 return err; 3062 } 3063 3064 3065 /* check_stack_write dispatches to check_stack_write_fixed_off or 3066 * check_stack_write_var_off. 3067 * 3068 * 'ptr_regno' is the register used as a pointer into the stack. 3069 * 'off' includes 'ptr_regno->off', but not its variable offset (if any). 3070 * 'value_regno' is the register whose value we're writing to the stack. It can 3071 * be -1, meaning that we're not writing from a register. 3072 * 3073 * The caller must ensure that the offset falls within the maximum stack size. 3074 */ 3075 static int check_stack_write(struct bpf_verifier_env *env, 3076 int ptr_regno, int off, int size, 3077 int value_regno, int insn_idx) 3078 { 3079 struct bpf_reg_state *reg = reg_state(env, ptr_regno); 3080 struct bpf_func_state *state = func(env, reg); 3081 int err; 3082 3083 if (tnum_is_const(reg->var_off)) { 3084 off += reg->var_off.value; 3085 err = check_stack_write_fixed_off(env, state, off, size, 3086 value_regno, insn_idx); 3087 } else { 3088 /* Variable offset stack reads need more conservative handling 3089 * than fixed offset ones. 3090 */ 3091 err = check_stack_write_var_off(env, state, 3092 ptr_regno, off, size, 3093 value_regno, insn_idx); 3094 } 3095 return err; 3096 } 3097 3098 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno, 3099 int off, int size, enum bpf_access_type type) 3100 { 3101 struct bpf_reg_state *regs = cur_regs(env); 3102 struct bpf_map *map = regs[regno].map_ptr; 3103 u32 cap = bpf_map_flags_to_cap(map); 3104 3105 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) { 3106 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n", 3107 map->value_size, off, size); 3108 return -EACCES; 3109 } 3110 3111 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) { 3112 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n", 3113 map->value_size, off, size); 3114 return -EACCES; 3115 } 3116 3117 return 0; 3118 } 3119 3120 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */ 3121 static int __check_mem_access(struct bpf_verifier_env *env, int regno, 3122 int off, int size, u32 mem_size, 3123 bool zero_size_allowed) 3124 { 3125 bool size_ok = size > 0 || (size == 0 && zero_size_allowed); 3126 struct bpf_reg_state *reg; 3127 3128 if (off >= 0 && size_ok && (u64)off + size <= mem_size) 3129 return 0; 3130 3131 reg = &cur_regs(env)[regno]; 3132 switch (reg->type) { 3133 case PTR_TO_MAP_KEY: 3134 verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n", 3135 mem_size, off, size); 3136 break; 3137 case PTR_TO_MAP_VALUE: 3138 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", 3139 mem_size, off, size); 3140 break; 3141 case PTR_TO_PACKET: 3142 case PTR_TO_PACKET_META: 3143 case PTR_TO_PACKET_END: 3144 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 3145 off, size, regno, reg->id, off, mem_size); 3146 break; 3147 case PTR_TO_MEM: 3148 default: 3149 verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n", 3150 mem_size, off, size); 3151 } 3152 3153 return -EACCES; 3154 } 3155 3156 /* check read/write into a memory region with possible variable offset */ 3157 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno, 3158 int off, int size, u32 mem_size, 3159 bool zero_size_allowed) 3160 { 3161 struct bpf_verifier_state *vstate = env->cur_state; 3162 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 3163 struct bpf_reg_state *reg = &state->regs[regno]; 3164 int err; 3165 3166 /* We may have adjusted the register pointing to memory region, so we 3167 * need to try adding each of min_value and max_value to off 3168 * to make sure our theoretical access will be safe. 3169 */ 3170 if (env->log.level & BPF_LOG_LEVEL) 3171 print_verifier_state(env, state); 3172 3173 /* The minimum value is only important with signed 3174 * comparisons where we can't assume the floor of a 3175 * value is 0. If we are using signed variables for our 3176 * index'es we need to make sure that whatever we use 3177 * will have a set floor within our range. 3178 */ 3179 if (reg->smin_value < 0 && 3180 (reg->smin_value == S64_MIN || 3181 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || 3182 reg->smin_value + off < 0)) { 3183 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 3184 regno); 3185 return -EACCES; 3186 } 3187 err = __check_mem_access(env, regno, reg->smin_value + off, size, 3188 mem_size, zero_size_allowed); 3189 if (err) { 3190 verbose(env, "R%d min value is outside of the allowed memory range\n", 3191 regno); 3192 return err; 3193 } 3194 3195 /* If we haven't set a max value then we need to bail since we can't be 3196 * sure we won't do bad things. 3197 * If reg->umax_value + off could overflow, treat that as unbounded too. 3198 */ 3199 if (reg->umax_value >= BPF_MAX_VAR_OFF) { 3200 verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n", 3201 regno); 3202 return -EACCES; 3203 } 3204 err = __check_mem_access(env, regno, reg->umax_value + off, size, 3205 mem_size, zero_size_allowed); 3206 if (err) { 3207 verbose(env, "R%d max value is outside of the allowed memory range\n", 3208 regno); 3209 return err; 3210 } 3211 3212 return 0; 3213 } 3214 3215 /* check read/write into a map element with possible variable offset */ 3216 static int check_map_access(struct bpf_verifier_env *env, u32 regno, 3217 int off, int size, bool zero_size_allowed) 3218 { 3219 struct bpf_verifier_state *vstate = env->cur_state; 3220 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 3221 struct bpf_reg_state *reg = &state->regs[regno]; 3222 struct bpf_map *map = reg->map_ptr; 3223 int err; 3224 3225 err = check_mem_region_access(env, regno, off, size, map->value_size, 3226 zero_size_allowed); 3227 if (err) 3228 return err; 3229 3230 if (map_value_has_spin_lock(map)) { 3231 u32 lock = map->spin_lock_off; 3232 3233 /* if any part of struct bpf_spin_lock can be touched by 3234 * load/store reject this program. 3235 * To check that [x1, x2) overlaps with [y1, y2) 3236 * it is sufficient to check x1 < y2 && y1 < x2. 3237 */ 3238 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) && 3239 lock < reg->umax_value + off + size) { 3240 verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n"); 3241 return -EACCES; 3242 } 3243 } 3244 return err; 3245 } 3246 3247 #define MAX_PACKET_OFF 0xffff 3248 3249 static enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog) 3250 { 3251 return prog->aux->dst_prog ? prog->aux->dst_prog->type : prog->type; 3252 } 3253 3254 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, 3255 const struct bpf_call_arg_meta *meta, 3256 enum bpf_access_type t) 3257 { 3258 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 3259 3260 switch (prog_type) { 3261 /* Program types only with direct read access go here! */ 3262 case BPF_PROG_TYPE_LWT_IN: 3263 case BPF_PROG_TYPE_LWT_OUT: 3264 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 3265 case BPF_PROG_TYPE_SK_REUSEPORT: 3266 case BPF_PROG_TYPE_FLOW_DISSECTOR: 3267 case BPF_PROG_TYPE_CGROUP_SKB: 3268 if (t == BPF_WRITE) 3269 return false; 3270 fallthrough; 3271 3272 /* Program types with direct read + write access go here! */ 3273 case BPF_PROG_TYPE_SCHED_CLS: 3274 case BPF_PROG_TYPE_SCHED_ACT: 3275 case BPF_PROG_TYPE_XDP: 3276 case BPF_PROG_TYPE_LWT_XMIT: 3277 case BPF_PROG_TYPE_SK_SKB: 3278 case BPF_PROG_TYPE_SK_MSG: 3279 if (meta) 3280 return meta->pkt_access; 3281 3282 env->seen_direct_write = true; 3283 return true; 3284 3285 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3286 if (t == BPF_WRITE) 3287 env->seen_direct_write = true; 3288 3289 return true; 3290 3291 default: 3292 return false; 3293 } 3294 } 3295 3296 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, 3297 int size, bool zero_size_allowed) 3298 { 3299 struct bpf_reg_state *regs = cur_regs(env); 3300 struct bpf_reg_state *reg = ®s[regno]; 3301 int err; 3302 3303 /* We may have added a variable offset to the packet pointer; but any 3304 * reg->range we have comes after that. We are only checking the fixed 3305 * offset. 3306 */ 3307 3308 /* We don't allow negative numbers, because we aren't tracking enough 3309 * detail to prove they're safe. 3310 */ 3311 if (reg->smin_value < 0) { 3312 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 3313 regno); 3314 return -EACCES; 3315 } 3316 3317 err = reg->range < 0 ? -EINVAL : 3318 __check_mem_access(env, regno, off, size, reg->range, 3319 zero_size_allowed); 3320 if (err) { 3321 verbose(env, "R%d offset is outside of the packet\n", regno); 3322 return err; 3323 } 3324 3325 /* __check_mem_access has made sure "off + size - 1" is within u16. 3326 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, 3327 * otherwise find_good_pkt_pointers would have refused to set range info 3328 * that __check_mem_access would have rejected this pkt access. 3329 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. 3330 */ 3331 env->prog->aux->max_pkt_offset = 3332 max_t(u32, env->prog->aux->max_pkt_offset, 3333 off + reg->umax_value + size - 1); 3334 3335 return err; 3336 } 3337 3338 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ 3339 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, 3340 enum bpf_access_type t, enum bpf_reg_type *reg_type, 3341 struct btf **btf, u32 *btf_id) 3342 { 3343 struct bpf_insn_access_aux info = { 3344 .reg_type = *reg_type, 3345 .log = &env->log, 3346 }; 3347 3348 if (env->ops->is_valid_access && 3349 env->ops->is_valid_access(off, size, t, env->prog, &info)) { 3350 /* A non zero info.ctx_field_size indicates that this field is a 3351 * candidate for later verifier transformation to load the whole 3352 * field and then apply a mask when accessed with a narrower 3353 * access than actual ctx access size. A zero info.ctx_field_size 3354 * will only allow for whole field access and rejects any other 3355 * type of narrower access. 3356 */ 3357 *reg_type = info.reg_type; 3358 3359 if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL) { 3360 *btf = info.btf; 3361 *btf_id = info.btf_id; 3362 } else { 3363 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; 3364 } 3365 /* remember the offset of last byte accessed in ctx */ 3366 if (env->prog->aux->max_ctx_offset < off + size) 3367 env->prog->aux->max_ctx_offset = off + size; 3368 return 0; 3369 } 3370 3371 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); 3372 return -EACCES; 3373 } 3374 3375 static int check_flow_keys_access(struct bpf_verifier_env *env, int off, 3376 int size) 3377 { 3378 if (size < 0 || off < 0 || 3379 (u64)off + size > sizeof(struct bpf_flow_keys)) { 3380 verbose(env, "invalid access to flow keys off=%d size=%d\n", 3381 off, size); 3382 return -EACCES; 3383 } 3384 return 0; 3385 } 3386 3387 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, 3388 u32 regno, int off, int size, 3389 enum bpf_access_type t) 3390 { 3391 struct bpf_reg_state *regs = cur_regs(env); 3392 struct bpf_reg_state *reg = ®s[regno]; 3393 struct bpf_insn_access_aux info = {}; 3394 bool valid; 3395 3396 if (reg->smin_value < 0) { 3397 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 3398 regno); 3399 return -EACCES; 3400 } 3401 3402 switch (reg->type) { 3403 case PTR_TO_SOCK_COMMON: 3404 valid = bpf_sock_common_is_valid_access(off, size, t, &info); 3405 break; 3406 case PTR_TO_SOCKET: 3407 valid = bpf_sock_is_valid_access(off, size, t, &info); 3408 break; 3409 case PTR_TO_TCP_SOCK: 3410 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info); 3411 break; 3412 case PTR_TO_XDP_SOCK: 3413 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info); 3414 break; 3415 default: 3416 valid = false; 3417 } 3418 3419 3420 if (valid) { 3421 env->insn_aux_data[insn_idx].ctx_field_size = 3422 info.ctx_field_size; 3423 return 0; 3424 } 3425 3426 verbose(env, "R%d invalid %s access off=%d size=%d\n", 3427 regno, reg_type_str[reg->type], off, size); 3428 3429 return -EACCES; 3430 } 3431 3432 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 3433 { 3434 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); 3435 } 3436 3437 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) 3438 { 3439 const struct bpf_reg_state *reg = reg_state(env, regno); 3440 3441 return reg->type == PTR_TO_CTX; 3442 } 3443 3444 static bool is_sk_reg(struct bpf_verifier_env *env, int regno) 3445 { 3446 const struct bpf_reg_state *reg = reg_state(env, regno); 3447 3448 return type_is_sk_pointer(reg->type); 3449 } 3450 3451 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) 3452 { 3453 const struct bpf_reg_state *reg = reg_state(env, regno); 3454 3455 return type_is_pkt_pointer(reg->type); 3456 } 3457 3458 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) 3459 { 3460 const struct bpf_reg_state *reg = reg_state(env, regno); 3461 3462 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ 3463 return reg->type == PTR_TO_FLOW_KEYS; 3464 } 3465 3466 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, 3467 const struct bpf_reg_state *reg, 3468 int off, int size, bool strict) 3469 { 3470 struct tnum reg_off; 3471 int ip_align; 3472 3473 /* Byte size accesses are always allowed. */ 3474 if (!strict || size == 1) 3475 return 0; 3476 3477 /* For platforms that do not have a Kconfig enabling 3478 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of 3479 * NET_IP_ALIGN is universally set to '2'. And on platforms 3480 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get 3481 * to this code only in strict mode where we want to emulate 3482 * the NET_IP_ALIGN==2 checking. Therefore use an 3483 * unconditional IP align value of '2'. 3484 */ 3485 ip_align = 2; 3486 3487 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); 3488 if (!tnum_is_aligned(reg_off, size)) { 3489 char tn_buf[48]; 3490 3491 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3492 verbose(env, 3493 "misaligned packet access off %d+%s+%d+%d size %d\n", 3494 ip_align, tn_buf, reg->off, off, size); 3495 return -EACCES; 3496 } 3497 3498 return 0; 3499 } 3500 3501 static int check_generic_ptr_alignment(struct bpf_verifier_env *env, 3502 const struct bpf_reg_state *reg, 3503 const char *pointer_desc, 3504 int off, int size, bool strict) 3505 { 3506 struct tnum reg_off; 3507 3508 /* Byte size accesses are always allowed. */ 3509 if (!strict || size == 1) 3510 return 0; 3511 3512 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); 3513 if (!tnum_is_aligned(reg_off, size)) { 3514 char tn_buf[48]; 3515 3516 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3517 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", 3518 pointer_desc, tn_buf, reg->off, off, size); 3519 return -EACCES; 3520 } 3521 3522 return 0; 3523 } 3524 3525 static int check_ptr_alignment(struct bpf_verifier_env *env, 3526 const struct bpf_reg_state *reg, int off, 3527 int size, bool strict_alignment_once) 3528 { 3529 bool strict = env->strict_alignment || strict_alignment_once; 3530 const char *pointer_desc = ""; 3531 3532 switch (reg->type) { 3533 case PTR_TO_PACKET: 3534 case PTR_TO_PACKET_META: 3535 /* Special case, because of NET_IP_ALIGN. Given metadata sits 3536 * right in front, treat it the very same way. 3537 */ 3538 return check_pkt_ptr_alignment(env, reg, off, size, strict); 3539 case PTR_TO_FLOW_KEYS: 3540 pointer_desc = "flow keys "; 3541 break; 3542 case PTR_TO_MAP_KEY: 3543 pointer_desc = "key "; 3544 break; 3545 case PTR_TO_MAP_VALUE: 3546 pointer_desc = "value "; 3547 break; 3548 case PTR_TO_CTX: 3549 pointer_desc = "context "; 3550 break; 3551 case PTR_TO_STACK: 3552 pointer_desc = "stack "; 3553 /* The stack spill tracking logic in check_stack_write_fixed_off() 3554 * and check_stack_read_fixed_off() relies on stack accesses being 3555 * aligned. 3556 */ 3557 strict = true; 3558 break; 3559 case PTR_TO_SOCKET: 3560 pointer_desc = "sock "; 3561 break; 3562 case PTR_TO_SOCK_COMMON: 3563 pointer_desc = "sock_common "; 3564 break; 3565 case PTR_TO_TCP_SOCK: 3566 pointer_desc = "tcp_sock "; 3567 break; 3568 case PTR_TO_XDP_SOCK: 3569 pointer_desc = "xdp_sock "; 3570 break; 3571 default: 3572 break; 3573 } 3574 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, 3575 strict); 3576 } 3577 3578 static int update_stack_depth(struct bpf_verifier_env *env, 3579 const struct bpf_func_state *func, 3580 int off) 3581 { 3582 u16 stack = env->subprog_info[func->subprogno].stack_depth; 3583 3584 if (stack >= -off) 3585 return 0; 3586 3587 /* update known max for given subprogram */ 3588 env->subprog_info[func->subprogno].stack_depth = -off; 3589 return 0; 3590 } 3591 3592 /* starting from main bpf function walk all instructions of the function 3593 * and recursively walk all callees that given function can call. 3594 * Ignore jump and exit insns. 3595 * Since recursion is prevented by check_cfg() this algorithm 3596 * only needs a local stack of MAX_CALL_FRAMES to remember callsites 3597 */ 3598 static int check_max_stack_depth(struct bpf_verifier_env *env) 3599 { 3600 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end; 3601 struct bpf_subprog_info *subprog = env->subprog_info; 3602 struct bpf_insn *insn = env->prog->insnsi; 3603 bool tail_call_reachable = false; 3604 int ret_insn[MAX_CALL_FRAMES]; 3605 int ret_prog[MAX_CALL_FRAMES]; 3606 int j; 3607 3608 process_func: 3609 /* protect against potential stack overflow that might happen when 3610 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack 3611 * depth for such case down to 256 so that the worst case scenario 3612 * would result in 8k stack size (32 which is tailcall limit * 256 = 3613 * 8k). 3614 * 3615 * To get the idea what might happen, see an example: 3616 * func1 -> sub rsp, 128 3617 * subfunc1 -> sub rsp, 256 3618 * tailcall1 -> add rsp, 256 3619 * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320) 3620 * subfunc2 -> sub rsp, 64 3621 * subfunc22 -> sub rsp, 128 3622 * tailcall2 -> add rsp, 128 3623 * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416) 3624 * 3625 * tailcall will unwind the current stack frame but it will not get rid 3626 * of caller's stack as shown on the example above. 3627 */ 3628 if (idx && subprog[idx].has_tail_call && depth >= 256) { 3629 verbose(env, 3630 "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n", 3631 depth); 3632 return -EACCES; 3633 } 3634 /* round up to 32-bytes, since this is granularity 3635 * of interpreter stack size 3636 */ 3637 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 3638 if (depth > MAX_BPF_STACK) { 3639 verbose(env, "combined stack size of %d calls is %d. Too large\n", 3640 frame + 1, depth); 3641 return -EACCES; 3642 } 3643 continue_func: 3644 subprog_end = subprog[idx + 1].start; 3645 for (; i < subprog_end; i++) { 3646 if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i)) 3647 continue; 3648 /* remember insn and function to return to */ 3649 ret_insn[frame] = i + 1; 3650 ret_prog[frame] = idx; 3651 3652 /* find the callee */ 3653 i = i + insn[i].imm + 1; 3654 idx = find_subprog(env, i); 3655 if (idx < 0) { 3656 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 3657 i); 3658 return -EFAULT; 3659 } 3660 3661 if (subprog[idx].has_tail_call) 3662 tail_call_reachable = true; 3663 3664 frame++; 3665 if (frame >= MAX_CALL_FRAMES) { 3666 verbose(env, "the call stack of %d frames is too deep !\n", 3667 frame); 3668 return -E2BIG; 3669 } 3670 goto process_func; 3671 } 3672 /* if tail call got detected across bpf2bpf calls then mark each of the 3673 * currently present subprog frames as tail call reachable subprogs; 3674 * this info will be utilized by JIT so that we will be preserving the 3675 * tail call counter throughout bpf2bpf calls combined with tailcalls 3676 */ 3677 if (tail_call_reachable) 3678 for (j = 0; j < frame; j++) 3679 subprog[ret_prog[j]].tail_call_reachable = true; 3680 if (subprog[0].tail_call_reachable) 3681 env->prog->aux->tail_call_reachable = true; 3682 3683 /* end of for() loop means the last insn of the 'subprog' 3684 * was reached. Doesn't matter whether it was JA or EXIT 3685 */ 3686 if (frame == 0) 3687 return 0; 3688 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 3689 frame--; 3690 i = ret_insn[frame]; 3691 idx = ret_prog[frame]; 3692 goto continue_func; 3693 } 3694 3695 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 3696 static int get_callee_stack_depth(struct bpf_verifier_env *env, 3697 const struct bpf_insn *insn, int idx) 3698 { 3699 int start = idx + insn->imm + 1, subprog; 3700 3701 subprog = find_subprog(env, start); 3702 if (subprog < 0) { 3703 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 3704 start); 3705 return -EFAULT; 3706 } 3707 return env->subprog_info[subprog].stack_depth; 3708 } 3709 #endif 3710 3711 int check_ctx_reg(struct bpf_verifier_env *env, 3712 const struct bpf_reg_state *reg, int regno) 3713 { 3714 /* Access to ctx or passing it to a helper is only allowed in 3715 * its original, unmodified form. 3716 */ 3717 3718 if (reg->off) { 3719 verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n", 3720 regno, reg->off); 3721 return -EACCES; 3722 } 3723 3724 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 3725 char tn_buf[48]; 3726 3727 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3728 verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf); 3729 return -EACCES; 3730 } 3731 3732 return 0; 3733 } 3734 3735 static int __check_buffer_access(struct bpf_verifier_env *env, 3736 const char *buf_info, 3737 const struct bpf_reg_state *reg, 3738 int regno, int off, int size) 3739 { 3740 if (off < 0) { 3741 verbose(env, 3742 "R%d invalid %s buffer access: off=%d, size=%d\n", 3743 regno, buf_info, off, size); 3744 return -EACCES; 3745 } 3746 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 3747 char tn_buf[48]; 3748 3749 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3750 verbose(env, 3751 "R%d invalid variable buffer offset: off=%d, var_off=%s\n", 3752 regno, off, tn_buf); 3753 return -EACCES; 3754 } 3755 3756 return 0; 3757 } 3758 3759 static int check_tp_buffer_access(struct bpf_verifier_env *env, 3760 const struct bpf_reg_state *reg, 3761 int regno, int off, int size) 3762 { 3763 int err; 3764 3765 err = __check_buffer_access(env, "tracepoint", reg, regno, off, size); 3766 if (err) 3767 return err; 3768 3769 if (off + size > env->prog->aux->max_tp_access) 3770 env->prog->aux->max_tp_access = off + size; 3771 3772 return 0; 3773 } 3774 3775 static int check_buffer_access(struct bpf_verifier_env *env, 3776 const struct bpf_reg_state *reg, 3777 int regno, int off, int size, 3778 bool zero_size_allowed, 3779 const char *buf_info, 3780 u32 *max_access) 3781 { 3782 int err; 3783 3784 err = __check_buffer_access(env, buf_info, reg, regno, off, size); 3785 if (err) 3786 return err; 3787 3788 if (off + size > *max_access) 3789 *max_access = off + size; 3790 3791 return 0; 3792 } 3793 3794 /* BPF architecture zero extends alu32 ops into 64-bit registesr */ 3795 static void zext_32_to_64(struct bpf_reg_state *reg) 3796 { 3797 reg->var_off = tnum_subreg(reg->var_off); 3798 __reg_assign_32_into_64(reg); 3799 } 3800 3801 /* truncate register to smaller size (in bytes) 3802 * must be called with size < BPF_REG_SIZE 3803 */ 3804 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) 3805 { 3806 u64 mask; 3807 3808 /* clear high bits in bit representation */ 3809 reg->var_off = tnum_cast(reg->var_off, size); 3810 3811 /* fix arithmetic bounds */ 3812 mask = ((u64)1 << (size * 8)) - 1; 3813 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { 3814 reg->umin_value &= mask; 3815 reg->umax_value &= mask; 3816 } else { 3817 reg->umin_value = 0; 3818 reg->umax_value = mask; 3819 } 3820 reg->smin_value = reg->umin_value; 3821 reg->smax_value = reg->umax_value; 3822 3823 /* If size is smaller than 32bit register the 32bit register 3824 * values are also truncated so we push 64-bit bounds into 3825 * 32-bit bounds. Above were truncated < 32-bits already. 3826 */ 3827 if (size >= 4) 3828 return; 3829 __reg_combine_64_into_32(reg); 3830 } 3831 3832 static bool bpf_map_is_rdonly(const struct bpf_map *map) 3833 { 3834 return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen; 3835 } 3836 3837 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val) 3838 { 3839 void *ptr; 3840 u64 addr; 3841 int err; 3842 3843 err = map->ops->map_direct_value_addr(map, &addr, off); 3844 if (err) 3845 return err; 3846 ptr = (void *)(long)addr + off; 3847 3848 switch (size) { 3849 case sizeof(u8): 3850 *val = (u64)*(u8 *)ptr; 3851 break; 3852 case sizeof(u16): 3853 *val = (u64)*(u16 *)ptr; 3854 break; 3855 case sizeof(u32): 3856 *val = (u64)*(u32 *)ptr; 3857 break; 3858 case sizeof(u64): 3859 *val = *(u64 *)ptr; 3860 break; 3861 default: 3862 return -EINVAL; 3863 } 3864 return 0; 3865 } 3866 3867 static int check_ptr_to_btf_access(struct bpf_verifier_env *env, 3868 struct bpf_reg_state *regs, 3869 int regno, int off, int size, 3870 enum bpf_access_type atype, 3871 int value_regno) 3872 { 3873 struct bpf_reg_state *reg = regs + regno; 3874 const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id); 3875 const char *tname = btf_name_by_offset(reg->btf, t->name_off); 3876 u32 btf_id; 3877 int ret; 3878 3879 if (off < 0) { 3880 verbose(env, 3881 "R%d is ptr_%s invalid negative access: off=%d\n", 3882 regno, tname, off); 3883 return -EACCES; 3884 } 3885 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 3886 char tn_buf[48]; 3887 3888 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3889 verbose(env, 3890 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n", 3891 regno, tname, off, tn_buf); 3892 return -EACCES; 3893 } 3894 3895 if (env->ops->btf_struct_access) { 3896 ret = env->ops->btf_struct_access(&env->log, reg->btf, t, 3897 off, size, atype, &btf_id); 3898 } else { 3899 if (atype != BPF_READ) { 3900 verbose(env, "only read is supported\n"); 3901 return -EACCES; 3902 } 3903 3904 ret = btf_struct_access(&env->log, reg->btf, t, off, size, 3905 atype, &btf_id); 3906 } 3907 3908 if (ret < 0) 3909 return ret; 3910 3911 if (atype == BPF_READ && value_regno >= 0) 3912 mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id); 3913 3914 return 0; 3915 } 3916 3917 static int check_ptr_to_map_access(struct bpf_verifier_env *env, 3918 struct bpf_reg_state *regs, 3919 int regno, int off, int size, 3920 enum bpf_access_type atype, 3921 int value_regno) 3922 { 3923 struct bpf_reg_state *reg = regs + regno; 3924 struct bpf_map *map = reg->map_ptr; 3925 const struct btf_type *t; 3926 const char *tname; 3927 u32 btf_id; 3928 int ret; 3929 3930 if (!btf_vmlinux) { 3931 verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n"); 3932 return -ENOTSUPP; 3933 } 3934 3935 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { 3936 verbose(env, "map_ptr access not supported for map type %d\n", 3937 map->map_type); 3938 return -ENOTSUPP; 3939 } 3940 3941 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); 3942 tname = btf_name_by_offset(btf_vmlinux, t->name_off); 3943 3944 if (!env->allow_ptr_to_map_access) { 3945 verbose(env, 3946 "%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n", 3947 tname); 3948 return -EPERM; 3949 } 3950 3951 if (off < 0) { 3952 verbose(env, "R%d is %s invalid negative access: off=%d\n", 3953 regno, tname, off); 3954 return -EACCES; 3955 } 3956 3957 if (atype != BPF_READ) { 3958 verbose(env, "only read from %s is supported\n", tname); 3959 return -EACCES; 3960 } 3961 3962 ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id); 3963 if (ret < 0) 3964 return ret; 3965 3966 if (value_regno >= 0) 3967 mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id); 3968 3969 return 0; 3970 } 3971 3972 /* Check that the stack access at the given offset is within bounds. The 3973 * maximum valid offset is -1. 3974 * 3975 * The minimum valid offset is -MAX_BPF_STACK for writes, and 3976 * -state->allocated_stack for reads. 3977 */ 3978 static int check_stack_slot_within_bounds(int off, 3979 struct bpf_func_state *state, 3980 enum bpf_access_type t) 3981 { 3982 int min_valid_off; 3983 3984 if (t == BPF_WRITE) 3985 min_valid_off = -MAX_BPF_STACK; 3986 else 3987 min_valid_off = -state->allocated_stack; 3988 3989 if (off < min_valid_off || off > -1) 3990 return -EACCES; 3991 return 0; 3992 } 3993 3994 /* Check that the stack access at 'regno + off' falls within the maximum stack 3995 * bounds. 3996 * 3997 * 'off' includes `regno->offset`, but not its dynamic part (if any). 3998 */ 3999 static int check_stack_access_within_bounds( 4000 struct bpf_verifier_env *env, 4001 int regno, int off, int access_size, 4002 enum stack_access_src src, enum bpf_access_type type) 4003 { 4004 struct bpf_reg_state *regs = cur_regs(env); 4005 struct bpf_reg_state *reg = regs + regno; 4006 struct bpf_func_state *state = func(env, reg); 4007 int min_off, max_off; 4008 int err; 4009 char *err_extra; 4010 4011 if (src == ACCESS_HELPER) 4012 /* We don't know if helpers are reading or writing (or both). */ 4013 err_extra = " indirect access to"; 4014 else if (type == BPF_READ) 4015 err_extra = " read from"; 4016 else 4017 err_extra = " write to"; 4018 4019 if (tnum_is_const(reg->var_off)) { 4020 min_off = reg->var_off.value + off; 4021 if (access_size > 0) 4022 max_off = min_off + access_size - 1; 4023 else 4024 max_off = min_off; 4025 } else { 4026 if (reg->smax_value >= BPF_MAX_VAR_OFF || 4027 reg->smin_value <= -BPF_MAX_VAR_OFF) { 4028 verbose(env, "invalid unbounded variable-offset%s stack R%d\n", 4029 err_extra, regno); 4030 return -EACCES; 4031 } 4032 min_off = reg->smin_value + off; 4033 if (access_size > 0) 4034 max_off = reg->smax_value + off + access_size - 1; 4035 else 4036 max_off = min_off; 4037 } 4038 4039 err = check_stack_slot_within_bounds(min_off, state, type); 4040 if (!err) 4041 err = check_stack_slot_within_bounds(max_off, state, type); 4042 4043 if (err) { 4044 if (tnum_is_const(reg->var_off)) { 4045 verbose(env, "invalid%s stack R%d off=%d size=%d\n", 4046 err_extra, regno, off, access_size); 4047 } else { 4048 char tn_buf[48]; 4049 4050 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 4051 verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n", 4052 err_extra, regno, tn_buf, access_size); 4053 } 4054 } 4055 return err; 4056 } 4057 4058 /* check whether memory at (regno + off) is accessible for t = (read | write) 4059 * if t==write, value_regno is a register which value is stored into memory 4060 * if t==read, value_regno is a register which will receive the value from memory 4061 * if t==write && value_regno==-1, some unknown value is stored into memory 4062 * if t==read && value_regno==-1, don't care what we read from memory 4063 */ 4064 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, 4065 int off, int bpf_size, enum bpf_access_type t, 4066 int value_regno, bool strict_alignment_once) 4067 { 4068 struct bpf_reg_state *regs = cur_regs(env); 4069 struct bpf_reg_state *reg = regs + regno; 4070 struct bpf_func_state *state; 4071 int size, err = 0; 4072 4073 size = bpf_size_to_bytes(bpf_size); 4074 if (size < 0) 4075 return size; 4076 4077 /* alignment checks will add in reg->off themselves */ 4078 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); 4079 if (err) 4080 return err; 4081 4082 /* for access checks, reg->off is just part of off */ 4083 off += reg->off; 4084 4085 if (reg->type == PTR_TO_MAP_KEY) { 4086 if (t == BPF_WRITE) { 4087 verbose(env, "write to change key R%d not allowed\n", regno); 4088 return -EACCES; 4089 } 4090 4091 err = check_mem_region_access(env, regno, off, size, 4092 reg->map_ptr->key_size, false); 4093 if (err) 4094 return err; 4095 if (value_regno >= 0) 4096 mark_reg_unknown(env, regs, value_regno); 4097 } else if (reg->type == PTR_TO_MAP_VALUE) { 4098 if (t == BPF_WRITE && value_regno >= 0 && 4099 is_pointer_value(env, value_regno)) { 4100 verbose(env, "R%d leaks addr into map\n", value_regno); 4101 return -EACCES; 4102 } 4103 err = check_map_access_type(env, regno, off, size, t); 4104 if (err) 4105 return err; 4106 err = check_map_access(env, regno, off, size, false); 4107 if (!err && t == BPF_READ && value_regno >= 0) { 4108 struct bpf_map *map = reg->map_ptr; 4109 4110 /* if map is read-only, track its contents as scalars */ 4111 if (tnum_is_const(reg->var_off) && 4112 bpf_map_is_rdonly(map) && 4113 map->ops->map_direct_value_addr) { 4114 int map_off = off + reg->var_off.value; 4115 u64 val = 0; 4116 4117 err = bpf_map_direct_read(map, map_off, size, 4118 &val); 4119 if (err) 4120 return err; 4121 4122 regs[value_regno].type = SCALAR_VALUE; 4123 __mark_reg_known(®s[value_regno], val); 4124 } else { 4125 mark_reg_unknown(env, regs, value_regno); 4126 } 4127 } 4128 } else if (reg->type == PTR_TO_MEM) { 4129 if (t == BPF_WRITE && value_regno >= 0 && 4130 is_pointer_value(env, value_regno)) { 4131 verbose(env, "R%d leaks addr into mem\n", value_regno); 4132 return -EACCES; 4133 } 4134 err = check_mem_region_access(env, regno, off, size, 4135 reg->mem_size, false); 4136 if (!err && t == BPF_READ && value_regno >= 0) 4137 mark_reg_unknown(env, regs, value_regno); 4138 } else if (reg->type == PTR_TO_CTX) { 4139 enum bpf_reg_type reg_type = SCALAR_VALUE; 4140 struct btf *btf = NULL; 4141 u32 btf_id = 0; 4142 4143 if (t == BPF_WRITE && value_regno >= 0 && 4144 is_pointer_value(env, value_regno)) { 4145 verbose(env, "R%d leaks addr into ctx\n", value_regno); 4146 return -EACCES; 4147 } 4148 4149 err = check_ctx_reg(env, reg, regno); 4150 if (err < 0) 4151 return err; 4152 4153 err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf, &btf_id); 4154 if (err) 4155 verbose_linfo(env, insn_idx, "; "); 4156 if (!err && t == BPF_READ && value_regno >= 0) { 4157 /* ctx access returns either a scalar, or a 4158 * PTR_TO_PACKET[_META,_END]. In the latter 4159 * case, we know the offset is zero. 4160 */ 4161 if (reg_type == SCALAR_VALUE) { 4162 mark_reg_unknown(env, regs, value_regno); 4163 } else { 4164 mark_reg_known_zero(env, regs, 4165 value_regno); 4166 if (reg_type_may_be_null(reg_type)) 4167 regs[value_regno].id = ++env->id_gen; 4168 /* A load of ctx field could have different 4169 * actual load size with the one encoded in the 4170 * insn. When the dst is PTR, it is for sure not 4171 * a sub-register. 4172 */ 4173 regs[value_regno].subreg_def = DEF_NOT_SUBREG; 4174 if (reg_type == PTR_TO_BTF_ID || 4175 reg_type == PTR_TO_BTF_ID_OR_NULL) { 4176 regs[value_regno].btf = btf; 4177 regs[value_regno].btf_id = btf_id; 4178 } 4179 } 4180 regs[value_regno].type = reg_type; 4181 } 4182 4183 } else if (reg->type == PTR_TO_STACK) { 4184 /* Basic bounds checks. */ 4185 err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t); 4186 if (err) 4187 return err; 4188 4189 state = func(env, reg); 4190 err = update_stack_depth(env, state, off); 4191 if (err) 4192 return err; 4193 4194 if (t == BPF_READ) 4195 err = check_stack_read(env, regno, off, size, 4196 value_regno); 4197 else 4198 err = check_stack_write(env, regno, off, size, 4199 value_regno, insn_idx); 4200 } else if (reg_is_pkt_pointer(reg)) { 4201 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { 4202 verbose(env, "cannot write into packet\n"); 4203 return -EACCES; 4204 } 4205 if (t == BPF_WRITE && value_regno >= 0 && 4206 is_pointer_value(env, value_regno)) { 4207 verbose(env, "R%d leaks addr into packet\n", 4208 value_regno); 4209 return -EACCES; 4210 } 4211 err = check_packet_access(env, regno, off, size, false); 4212 if (!err && t == BPF_READ && value_regno >= 0) 4213 mark_reg_unknown(env, regs, value_regno); 4214 } else if (reg->type == PTR_TO_FLOW_KEYS) { 4215 if (t == BPF_WRITE && value_regno >= 0 && 4216 is_pointer_value(env, value_regno)) { 4217 verbose(env, "R%d leaks addr into flow keys\n", 4218 value_regno); 4219 return -EACCES; 4220 } 4221 4222 err = check_flow_keys_access(env, off, size); 4223 if (!err && t == BPF_READ && value_regno >= 0) 4224 mark_reg_unknown(env, regs, value_regno); 4225 } else if (type_is_sk_pointer(reg->type)) { 4226 if (t == BPF_WRITE) { 4227 verbose(env, "R%d cannot write into %s\n", 4228 regno, reg_type_str[reg->type]); 4229 return -EACCES; 4230 } 4231 err = check_sock_access(env, insn_idx, regno, off, size, t); 4232 if (!err && value_regno >= 0) 4233 mark_reg_unknown(env, regs, value_regno); 4234 } else if (reg->type == PTR_TO_TP_BUFFER) { 4235 err = check_tp_buffer_access(env, reg, regno, off, size); 4236 if (!err && t == BPF_READ && value_regno >= 0) 4237 mark_reg_unknown(env, regs, value_regno); 4238 } else if (reg->type == PTR_TO_BTF_ID) { 4239 err = check_ptr_to_btf_access(env, regs, regno, off, size, t, 4240 value_regno); 4241 } else if (reg->type == CONST_PTR_TO_MAP) { 4242 err = check_ptr_to_map_access(env, regs, regno, off, size, t, 4243 value_regno); 4244 } else if (reg->type == PTR_TO_RDONLY_BUF) { 4245 if (t == BPF_WRITE) { 4246 verbose(env, "R%d cannot write into %s\n", 4247 regno, reg_type_str[reg->type]); 4248 return -EACCES; 4249 } 4250 err = check_buffer_access(env, reg, regno, off, size, false, 4251 "rdonly", 4252 &env->prog->aux->max_rdonly_access); 4253 if (!err && value_regno >= 0) 4254 mark_reg_unknown(env, regs, value_regno); 4255 } else if (reg->type == PTR_TO_RDWR_BUF) { 4256 err = check_buffer_access(env, reg, regno, off, size, false, 4257 "rdwr", 4258 &env->prog->aux->max_rdwr_access); 4259 if (!err && t == BPF_READ && value_regno >= 0) 4260 mark_reg_unknown(env, regs, value_regno); 4261 } else { 4262 verbose(env, "R%d invalid mem access '%s'\n", regno, 4263 reg_type_str[reg->type]); 4264 return -EACCES; 4265 } 4266 4267 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 4268 regs[value_regno].type == SCALAR_VALUE) { 4269 /* b/h/w load zero-extends, mark upper bits as known 0 */ 4270 coerce_reg_to_size(®s[value_regno], size); 4271 } 4272 return err; 4273 } 4274 4275 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) 4276 { 4277 int load_reg; 4278 int err; 4279 4280 switch (insn->imm) { 4281 case BPF_ADD: 4282 case BPF_ADD | BPF_FETCH: 4283 case BPF_AND: 4284 case BPF_AND | BPF_FETCH: 4285 case BPF_OR: 4286 case BPF_OR | BPF_FETCH: 4287 case BPF_XOR: 4288 case BPF_XOR | BPF_FETCH: 4289 case BPF_XCHG: 4290 case BPF_CMPXCHG: 4291 break; 4292 default: 4293 verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm); 4294 return -EINVAL; 4295 } 4296 4297 if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) { 4298 verbose(env, "invalid atomic operand size\n"); 4299 return -EINVAL; 4300 } 4301 4302 /* check src1 operand */ 4303 err = check_reg_arg(env, insn->src_reg, SRC_OP); 4304 if (err) 4305 return err; 4306 4307 /* check src2 operand */ 4308 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 4309 if (err) 4310 return err; 4311 4312 if (insn->imm == BPF_CMPXCHG) { 4313 /* Check comparison of R0 with memory location */ 4314 err = check_reg_arg(env, BPF_REG_0, SRC_OP); 4315 if (err) 4316 return err; 4317 } 4318 4319 if (is_pointer_value(env, insn->src_reg)) { 4320 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); 4321 return -EACCES; 4322 } 4323 4324 if (is_ctx_reg(env, insn->dst_reg) || 4325 is_pkt_reg(env, insn->dst_reg) || 4326 is_flow_key_reg(env, insn->dst_reg) || 4327 is_sk_reg(env, insn->dst_reg)) { 4328 verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n", 4329 insn->dst_reg, 4330 reg_type_str[reg_state(env, insn->dst_reg)->type]); 4331 return -EACCES; 4332 } 4333 4334 if (insn->imm & BPF_FETCH) { 4335 if (insn->imm == BPF_CMPXCHG) 4336 load_reg = BPF_REG_0; 4337 else 4338 load_reg = insn->src_reg; 4339 4340 /* check and record load of old value */ 4341 err = check_reg_arg(env, load_reg, DST_OP); 4342 if (err) 4343 return err; 4344 } else { 4345 /* This instruction accesses a memory location but doesn't 4346 * actually load it into a register. 4347 */ 4348 load_reg = -1; 4349 } 4350 4351 /* check whether we can read the memory */ 4352 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 4353 BPF_SIZE(insn->code), BPF_READ, load_reg, true); 4354 if (err) 4355 return err; 4356 4357 /* check whether we can write into the same memory */ 4358 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 4359 BPF_SIZE(insn->code), BPF_WRITE, -1, true); 4360 if (err) 4361 return err; 4362 4363 return 0; 4364 } 4365 4366 /* When register 'regno' is used to read the stack (either directly or through 4367 * a helper function) make sure that it's within stack boundary and, depending 4368 * on the access type, that all elements of the stack are initialized. 4369 * 4370 * 'off' includes 'regno->off', but not its dynamic part (if any). 4371 * 4372 * All registers that have been spilled on the stack in the slots within the 4373 * read offsets are marked as read. 4374 */ 4375 static int check_stack_range_initialized( 4376 struct bpf_verifier_env *env, int regno, int off, 4377 int access_size, bool zero_size_allowed, 4378 enum stack_access_src type, struct bpf_call_arg_meta *meta) 4379 { 4380 struct bpf_reg_state *reg = reg_state(env, regno); 4381 struct bpf_func_state *state = func(env, reg); 4382 int err, min_off, max_off, i, j, slot, spi; 4383 char *err_extra = type == ACCESS_HELPER ? " indirect" : ""; 4384 enum bpf_access_type bounds_check_type; 4385 /* Some accesses can write anything into the stack, others are 4386 * read-only. 4387 */ 4388 bool clobber = false; 4389 4390 if (access_size == 0 && !zero_size_allowed) { 4391 verbose(env, "invalid zero-sized read\n"); 4392 return -EACCES; 4393 } 4394 4395 if (type == ACCESS_HELPER) { 4396 /* The bounds checks for writes are more permissive than for 4397 * reads. However, if raw_mode is not set, we'll do extra 4398 * checks below. 4399 */ 4400 bounds_check_type = BPF_WRITE; 4401 clobber = true; 4402 } else { 4403 bounds_check_type = BPF_READ; 4404 } 4405 err = check_stack_access_within_bounds(env, regno, off, access_size, 4406 type, bounds_check_type); 4407 if (err) 4408 return err; 4409 4410 4411 if (tnum_is_const(reg->var_off)) { 4412 min_off = max_off = reg->var_off.value + off; 4413 } else { 4414 /* Variable offset is prohibited for unprivileged mode for 4415 * simplicity since it requires corresponding support in 4416 * Spectre masking for stack ALU. 4417 * See also retrieve_ptr_limit(). 4418 */ 4419 if (!env->bypass_spec_v1) { 4420 char tn_buf[48]; 4421 4422 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 4423 verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n", 4424 regno, err_extra, tn_buf); 4425 return -EACCES; 4426 } 4427 /* Only initialized buffer on stack is allowed to be accessed 4428 * with variable offset. With uninitialized buffer it's hard to 4429 * guarantee that whole memory is marked as initialized on 4430 * helper return since specific bounds are unknown what may 4431 * cause uninitialized stack leaking. 4432 */ 4433 if (meta && meta->raw_mode) 4434 meta = NULL; 4435 4436 min_off = reg->smin_value + off; 4437 max_off = reg->smax_value + off; 4438 } 4439 4440 if (meta && meta->raw_mode) { 4441 meta->access_size = access_size; 4442 meta->regno = regno; 4443 return 0; 4444 } 4445 4446 for (i = min_off; i < max_off + access_size; i++) { 4447 u8 *stype; 4448 4449 slot = -i - 1; 4450 spi = slot / BPF_REG_SIZE; 4451 if (state->allocated_stack <= slot) 4452 goto err; 4453 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 4454 if (*stype == STACK_MISC) 4455 goto mark; 4456 if (*stype == STACK_ZERO) { 4457 if (clobber) { 4458 /* helper can write anything into the stack */ 4459 *stype = STACK_MISC; 4460 } 4461 goto mark; 4462 } 4463 4464 if (state->stack[spi].slot_type[0] == STACK_SPILL && 4465 state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID) 4466 goto mark; 4467 4468 if (state->stack[spi].slot_type[0] == STACK_SPILL && 4469 (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || 4470 env->allow_ptr_leaks)) { 4471 if (clobber) { 4472 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); 4473 for (j = 0; j < BPF_REG_SIZE; j++) 4474 state->stack[spi].slot_type[j] = STACK_MISC; 4475 } 4476 goto mark; 4477 } 4478 4479 err: 4480 if (tnum_is_const(reg->var_off)) { 4481 verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n", 4482 err_extra, regno, min_off, i - min_off, access_size); 4483 } else { 4484 char tn_buf[48]; 4485 4486 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 4487 verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n", 4488 err_extra, regno, tn_buf, i - min_off, access_size); 4489 } 4490 return -EACCES; 4491 mark: 4492 /* reading any byte out of 8-byte 'spill_slot' will cause 4493 * the whole slot to be marked as 'read' 4494 */ 4495 mark_reg_read(env, &state->stack[spi].spilled_ptr, 4496 state->stack[spi].spilled_ptr.parent, 4497 REG_LIVE_READ64); 4498 } 4499 return update_stack_depth(env, state, min_off); 4500 } 4501 4502 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, 4503 int access_size, bool zero_size_allowed, 4504 struct bpf_call_arg_meta *meta) 4505 { 4506 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 4507 4508 switch (reg->type) { 4509 case PTR_TO_PACKET: 4510 case PTR_TO_PACKET_META: 4511 return check_packet_access(env, regno, reg->off, access_size, 4512 zero_size_allowed); 4513 case PTR_TO_MAP_KEY: 4514 return check_mem_region_access(env, regno, reg->off, access_size, 4515 reg->map_ptr->key_size, false); 4516 case PTR_TO_MAP_VALUE: 4517 if (check_map_access_type(env, regno, reg->off, access_size, 4518 meta && meta->raw_mode ? BPF_WRITE : 4519 BPF_READ)) 4520 return -EACCES; 4521 return check_map_access(env, regno, reg->off, access_size, 4522 zero_size_allowed); 4523 case PTR_TO_MEM: 4524 return check_mem_region_access(env, regno, reg->off, 4525 access_size, reg->mem_size, 4526 zero_size_allowed); 4527 case PTR_TO_RDONLY_BUF: 4528 if (meta && meta->raw_mode) 4529 return -EACCES; 4530 return check_buffer_access(env, reg, regno, reg->off, 4531 access_size, zero_size_allowed, 4532 "rdonly", 4533 &env->prog->aux->max_rdonly_access); 4534 case PTR_TO_RDWR_BUF: 4535 return check_buffer_access(env, reg, regno, reg->off, 4536 access_size, zero_size_allowed, 4537 "rdwr", 4538 &env->prog->aux->max_rdwr_access); 4539 case PTR_TO_STACK: 4540 return check_stack_range_initialized( 4541 env, 4542 regno, reg->off, access_size, 4543 zero_size_allowed, ACCESS_HELPER, meta); 4544 default: /* scalar_value or invalid ptr */ 4545 /* Allow zero-byte read from NULL, regardless of pointer type */ 4546 if (zero_size_allowed && access_size == 0 && 4547 register_is_null(reg)) 4548 return 0; 4549 4550 verbose(env, "R%d type=%s expected=%s\n", regno, 4551 reg_type_str[reg->type], 4552 reg_type_str[PTR_TO_STACK]); 4553 return -EACCES; 4554 } 4555 } 4556 4557 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 4558 u32 regno, u32 mem_size) 4559 { 4560 if (register_is_null(reg)) 4561 return 0; 4562 4563 if (reg_type_may_be_null(reg->type)) { 4564 /* Assuming that the register contains a value check if the memory 4565 * access is safe. Temporarily save and restore the register's state as 4566 * the conversion shouldn't be visible to a caller. 4567 */ 4568 const struct bpf_reg_state saved_reg = *reg; 4569 int rv; 4570 4571 mark_ptr_not_null_reg(reg); 4572 rv = check_helper_mem_access(env, regno, mem_size, true, NULL); 4573 *reg = saved_reg; 4574 return rv; 4575 } 4576 4577 return check_helper_mem_access(env, regno, mem_size, true, NULL); 4578 } 4579 4580 /* Implementation details: 4581 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL 4582 * Two bpf_map_lookups (even with the same key) will have different reg->id. 4583 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after 4584 * value_or_null->value transition, since the verifier only cares about 4585 * the range of access to valid map value pointer and doesn't care about actual 4586 * address of the map element. 4587 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps 4588 * reg->id > 0 after value_or_null->value transition. By doing so 4589 * two bpf_map_lookups will be considered two different pointers that 4590 * point to different bpf_spin_locks. 4591 * The verifier allows taking only one bpf_spin_lock at a time to avoid 4592 * dead-locks. 4593 * Since only one bpf_spin_lock is allowed the checks are simpler than 4594 * reg_is_refcounted() logic. The verifier needs to remember only 4595 * one spin_lock instead of array of acquired_refs. 4596 * cur_state->active_spin_lock remembers which map value element got locked 4597 * and clears it after bpf_spin_unlock. 4598 */ 4599 static int process_spin_lock(struct bpf_verifier_env *env, int regno, 4600 bool is_lock) 4601 { 4602 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 4603 struct bpf_verifier_state *cur = env->cur_state; 4604 bool is_const = tnum_is_const(reg->var_off); 4605 struct bpf_map *map = reg->map_ptr; 4606 u64 val = reg->var_off.value; 4607 4608 if (!is_const) { 4609 verbose(env, 4610 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n", 4611 regno); 4612 return -EINVAL; 4613 } 4614 if (!map->btf) { 4615 verbose(env, 4616 "map '%s' has to have BTF in order to use bpf_spin_lock\n", 4617 map->name); 4618 return -EINVAL; 4619 } 4620 if (!map_value_has_spin_lock(map)) { 4621 if (map->spin_lock_off == -E2BIG) 4622 verbose(env, 4623 "map '%s' has more than one 'struct bpf_spin_lock'\n", 4624 map->name); 4625 else if (map->spin_lock_off == -ENOENT) 4626 verbose(env, 4627 "map '%s' doesn't have 'struct bpf_spin_lock'\n", 4628 map->name); 4629 else 4630 verbose(env, 4631 "map '%s' is not a struct type or bpf_spin_lock is mangled\n", 4632 map->name); 4633 return -EINVAL; 4634 } 4635 if (map->spin_lock_off != val + reg->off) { 4636 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n", 4637 val + reg->off); 4638 return -EINVAL; 4639 } 4640 if (is_lock) { 4641 if (cur->active_spin_lock) { 4642 verbose(env, 4643 "Locking two bpf_spin_locks are not allowed\n"); 4644 return -EINVAL; 4645 } 4646 cur->active_spin_lock = reg->id; 4647 } else { 4648 if (!cur->active_spin_lock) { 4649 verbose(env, "bpf_spin_unlock without taking a lock\n"); 4650 return -EINVAL; 4651 } 4652 if (cur->active_spin_lock != reg->id) { 4653 verbose(env, "bpf_spin_unlock of different lock\n"); 4654 return -EINVAL; 4655 } 4656 cur->active_spin_lock = 0; 4657 } 4658 return 0; 4659 } 4660 4661 static bool arg_type_is_mem_ptr(enum bpf_arg_type type) 4662 { 4663 return type == ARG_PTR_TO_MEM || 4664 type == ARG_PTR_TO_MEM_OR_NULL || 4665 type == ARG_PTR_TO_UNINIT_MEM; 4666 } 4667 4668 static bool arg_type_is_mem_size(enum bpf_arg_type type) 4669 { 4670 return type == ARG_CONST_SIZE || 4671 type == ARG_CONST_SIZE_OR_ZERO; 4672 } 4673 4674 static bool arg_type_is_alloc_size(enum bpf_arg_type type) 4675 { 4676 return type == ARG_CONST_ALLOC_SIZE_OR_ZERO; 4677 } 4678 4679 static bool arg_type_is_int_ptr(enum bpf_arg_type type) 4680 { 4681 return type == ARG_PTR_TO_INT || 4682 type == ARG_PTR_TO_LONG; 4683 } 4684 4685 static int int_ptr_type_to_size(enum bpf_arg_type type) 4686 { 4687 if (type == ARG_PTR_TO_INT) 4688 return sizeof(u32); 4689 else if (type == ARG_PTR_TO_LONG) 4690 return sizeof(u64); 4691 4692 return -EINVAL; 4693 } 4694 4695 static int resolve_map_arg_type(struct bpf_verifier_env *env, 4696 const struct bpf_call_arg_meta *meta, 4697 enum bpf_arg_type *arg_type) 4698 { 4699 if (!meta->map_ptr) { 4700 /* kernel subsystem misconfigured verifier */ 4701 verbose(env, "invalid map_ptr to access map->type\n"); 4702 return -EACCES; 4703 } 4704 4705 switch (meta->map_ptr->map_type) { 4706 case BPF_MAP_TYPE_SOCKMAP: 4707 case BPF_MAP_TYPE_SOCKHASH: 4708 if (*arg_type == ARG_PTR_TO_MAP_VALUE) { 4709 *arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON; 4710 } else { 4711 verbose(env, "invalid arg_type for sockmap/sockhash\n"); 4712 return -EINVAL; 4713 } 4714 break; 4715 4716 default: 4717 break; 4718 } 4719 return 0; 4720 } 4721 4722 struct bpf_reg_types { 4723 const enum bpf_reg_type types[10]; 4724 u32 *btf_id; 4725 }; 4726 4727 static const struct bpf_reg_types map_key_value_types = { 4728 .types = { 4729 PTR_TO_STACK, 4730 PTR_TO_PACKET, 4731 PTR_TO_PACKET_META, 4732 PTR_TO_MAP_KEY, 4733 PTR_TO_MAP_VALUE, 4734 }, 4735 }; 4736 4737 static const struct bpf_reg_types sock_types = { 4738 .types = { 4739 PTR_TO_SOCK_COMMON, 4740 PTR_TO_SOCKET, 4741 PTR_TO_TCP_SOCK, 4742 PTR_TO_XDP_SOCK, 4743 }, 4744 }; 4745 4746 #ifdef CONFIG_NET 4747 static const struct bpf_reg_types btf_id_sock_common_types = { 4748 .types = { 4749 PTR_TO_SOCK_COMMON, 4750 PTR_TO_SOCKET, 4751 PTR_TO_TCP_SOCK, 4752 PTR_TO_XDP_SOCK, 4753 PTR_TO_BTF_ID, 4754 }, 4755 .btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], 4756 }; 4757 #endif 4758 4759 static const struct bpf_reg_types mem_types = { 4760 .types = { 4761 PTR_TO_STACK, 4762 PTR_TO_PACKET, 4763 PTR_TO_PACKET_META, 4764 PTR_TO_MAP_KEY, 4765 PTR_TO_MAP_VALUE, 4766 PTR_TO_MEM, 4767 PTR_TO_RDONLY_BUF, 4768 PTR_TO_RDWR_BUF, 4769 }, 4770 }; 4771 4772 static const struct bpf_reg_types int_ptr_types = { 4773 .types = { 4774 PTR_TO_STACK, 4775 PTR_TO_PACKET, 4776 PTR_TO_PACKET_META, 4777 PTR_TO_MAP_KEY, 4778 PTR_TO_MAP_VALUE, 4779 }, 4780 }; 4781 4782 static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } }; 4783 static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } }; 4784 static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } }; 4785 static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM } }; 4786 static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } }; 4787 static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } }; 4788 static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } }; 4789 static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_PERCPU_BTF_ID } }; 4790 static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } }; 4791 static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } }; 4792 static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } }; 4793 4794 static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { 4795 [ARG_PTR_TO_MAP_KEY] = &map_key_value_types, 4796 [ARG_PTR_TO_MAP_VALUE] = &map_key_value_types, 4797 [ARG_PTR_TO_UNINIT_MAP_VALUE] = &map_key_value_types, 4798 [ARG_PTR_TO_MAP_VALUE_OR_NULL] = &map_key_value_types, 4799 [ARG_CONST_SIZE] = &scalar_types, 4800 [ARG_CONST_SIZE_OR_ZERO] = &scalar_types, 4801 [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types, 4802 [ARG_CONST_MAP_PTR] = &const_map_ptr_types, 4803 [ARG_PTR_TO_CTX] = &context_types, 4804 [ARG_PTR_TO_CTX_OR_NULL] = &context_types, 4805 [ARG_PTR_TO_SOCK_COMMON] = &sock_types, 4806 #ifdef CONFIG_NET 4807 [ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types, 4808 #endif 4809 [ARG_PTR_TO_SOCKET] = &fullsock_types, 4810 [ARG_PTR_TO_SOCKET_OR_NULL] = &fullsock_types, 4811 [ARG_PTR_TO_BTF_ID] = &btf_ptr_types, 4812 [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types, 4813 [ARG_PTR_TO_MEM] = &mem_types, 4814 [ARG_PTR_TO_MEM_OR_NULL] = &mem_types, 4815 [ARG_PTR_TO_UNINIT_MEM] = &mem_types, 4816 [ARG_PTR_TO_ALLOC_MEM] = &alloc_mem_types, 4817 [ARG_PTR_TO_ALLOC_MEM_OR_NULL] = &alloc_mem_types, 4818 [ARG_PTR_TO_INT] = &int_ptr_types, 4819 [ARG_PTR_TO_LONG] = &int_ptr_types, 4820 [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types, 4821 [ARG_PTR_TO_FUNC] = &func_ptr_types, 4822 [ARG_PTR_TO_STACK_OR_NULL] = &stack_ptr_types, 4823 [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types, 4824 }; 4825 4826 static int check_reg_type(struct bpf_verifier_env *env, u32 regno, 4827 enum bpf_arg_type arg_type, 4828 const u32 *arg_btf_id) 4829 { 4830 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 4831 enum bpf_reg_type expected, type = reg->type; 4832 const struct bpf_reg_types *compatible; 4833 int i, j; 4834 4835 compatible = compatible_reg_types[arg_type]; 4836 if (!compatible) { 4837 verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type); 4838 return -EFAULT; 4839 } 4840 4841 for (i = 0; i < ARRAY_SIZE(compatible->types); i++) { 4842 expected = compatible->types[i]; 4843 if (expected == NOT_INIT) 4844 break; 4845 4846 if (type == expected) 4847 goto found; 4848 } 4849 4850 verbose(env, "R%d type=%s expected=", regno, reg_type_str[type]); 4851 for (j = 0; j + 1 < i; j++) 4852 verbose(env, "%s, ", reg_type_str[compatible->types[j]]); 4853 verbose(env, "%s\n", reg_type_str[compatible->types[j]]); 4854 return -EACCES; 4855 4856 found: 4857 if (type == PTR_TO_BTF_ID) { 4858 if (!arg_btf_id) { 4859 if (!compatible->btf_id) { 4860 verbose(env, "verifier internal error: missing arg compatible BTF ID\n"); 4861 return -EFAULT; 4862 } 4863 arg_btf_id = compatible->btf_id; 4864 } 4865 4866 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, 4867 btf_vmlinux, *arg_btf_id)) { 4868 verbose(env, "R%d is of type %s but %s is expected\n", 4869 regno, kernel_type_name(reg->btf, reg->btf_id), 4870 kernel_type_name(btf_vmlinux, *arg_btf_id)); 4871 return -EACCES; 4872 } 4873 4874 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 4875 verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n", 4876 regno); 4877 return -EACCES; 4878 } 4879 } 4880 4881 return 0; 4882 } 4883 4884 static int check_func_arg(struct bpf_verifier_env *env, u32 arg, 4885 struct bpf_call_arg_meta *meta, 4886 const struct bpf_func_proto *fn) 4887 { 4888 u32 regno = BPF_REG_1 + arg; 4889 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 4890 enum bpf_arg_type arg_type = fn->arg_type[arg]; 4891 enum bpf_reg_type type = reg->type; 4892 int err = 0; 4893 4894 if (arg_type == ARG_DONTCARE) 4895 return 0; 4896 4897 err = check_reg_arg(env, regno, SRC_OP); 4898 if (err) 4899 return err; 4900 4901 if (arg_type == ARG_ANYTHING) { 4902 if (is_pointer_value(env, regno)) { 4903 verbose(env, "R%d leaks addr into helper function\n", 4904 regno); 4905 return -EACCES; 4906 } 4907 return 0; 4908 } 4909 4910 if (type_is_pkt_pointer(type) && 4911 !may_access_direct_pkt_data(env, meta, BPF_READ)) { 4912 verbose(env, "helper access to the packet is not allowed\n"); 4913 return -EACCES; 4914 } 4915 4916 if (arg_type == ARG_PTR_TO_MAP_VALUE || 4917 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE || 4918 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) { 4919 err = resolve_map_arg_type(env, meta, &arg_type); 4920 if (err) 4921 return err; 4922 } 4923 4924 if (register_is_null(reg) && arg_type_may_be_null(arg_type)) 4925 /* A NULL register has a SCALAR_VALUE type, so skip 4926 * type checking. 4927 */ 4928 goto skip_type_check; 4929 4930 err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg]); 4931 if (err) 4932 return err; 4933 4934 if (type == PTR_TO_CTX) { 4935 err = check_ctx_reg(env, reg, regno); 4936 if (err < 0) 4937 return err; 4938 } 4939 4940 skip_type_check: 4941 if (reg->ref_obj_id) { 4942 if (meta->ref_obj_id) { 4943 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 4944 regno, reg->ref_obj_id, 4945 meta->ref_obj_id); 4946 return -EFAULT; 4947 } 4948 meta->ref_obj_id = reg->ref_obj_id; 4949 } 4950 4951 if (arg_type == ARG_CONST_MAP_PTR) { 4952 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 4953 meta->map_ptr = reg->map_ptr; 4954 } else if (arg_type == ARG_PTR_TO_MAP_KEY) { 4955 /* bpf_map_xxx(..., map_ptr, ..., key) call: 4956 * check that [key, key + map->key_size) are within 4957 * stack limits and initialized 4958 */ 4959 if (!meta->map_ptr) { 4960 /* in function declaration map_ptr must come before 4961 * map_key, so that it's verified and known before 4962 * we have to check map_key here. Otherwise it means 4963 * that kernel subsystem misconfigured verifier 4964 */ 4965 verbose(env, "invalid map_ptr to access map->key\n"); 4966 return -EACCES; 4967 } 4968 err = check_helper_mem_access(env, regno, 4969 meta->map_ptr->key_size, false, 4970 NULL); 4971 } else if (arg_type == ARG_PTR_TO_MAP_VALUE || 4972 (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL && 4973 !register_is_null(reg)) || 4974 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) { 4975 /* bpf_map_xxx(..., map_ptr, ..., value) call: 4976 * check [value, value + map->value_size) validity 4977 */ 4978 if (!meta->map_ptr) { 4979 /* kernel subsystem misconfigured verifier */ 4980 verbose(env, "invalid map_ptr to access map->value\n"); 4981 return -EACCES; 4982 } 4983 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE); 4984 err = check_helper_mem_access(env, regno, 4985 meta->map_ptr->value_size, false, 4986 meta); 4987 } else if (arg_type == ARG_PTR_TO_PERCPU_BTF_ID) { 4988 if (!reg->btf_id) { 4989 verbose(env, "Helper has invalid btf_id in R%d\n", regno); 4990 return -EACCES; 4991 } 4992 meta->ret_btf = reg->btf; 4993 meta->ret_btf_id = reg->btf_id; 4994 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) { 4995 if (meta->func_id == BPF_FUNC_spin_lock) { 4996 if (process_spin_lock(env, regno, true)) 4997 return -EACCES; 4998 } else if (meta->func_id == BPF_FUNC_spin_unlock) { 4999 if (process_spin_lock(env, regno, false)) 5000 return -EACCES; 5001 } else { 5002 verbose(env, "verifier internal error\n"); 5003 return -EFAULT; 5004 } 5005 } else if (arg_type == ARG_PTR_TO_FUNC) { 5006 meta->subprogno = reg->subprogno; 5007 } else if (arg_type_is_mem_ptr(arg_type)) { 5008 /* The access to this pointer is only checked when we hit the 5009 * next is_mem_size argument below. 5010 */ 5011 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MEM); 5012 } else if (arg_type_is_mem_size(arg_type)) { 5013 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); 5014 5015 /* This is used to refine r0 return value bounds for helpers 5016 * that enforce this value as an upper bound on return values. 5017 * See do_refine_retval_range() for helpers that can refine 5018 * the return value. C type of helper is u32 so we pull register 5019 * bound from umax_value however, if negative verifier errors 5020 * out. Only upper bounds can be learned because retval is an 5021 * int type and negative retvals are allowed. 5022 */ 5023 meta->msize_max_value = reg->umax_value; 5024 5025 /* The register is SCALAR_VALUE; the access check 5026 * happens using its boundaries. 5027 */ 5028 if (!tnum_is_const(reg->var_off)) 5029 /* For unprivileged variable accesses, disable raw 5030 * mode so that the program is required to 5031 * initialize all the memory that the helper could 5032 * just partially fill up. 5033 */ 5034 meta = NULL; 5035 5036 if (reg->smin_value < 0) { 5037 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", 5038 regno); 5039 return -EACCES; 5040 } 5041 5042 if (reg->umin_value == 0) { 5043 err = check_helper_mem_access(env, regno - 1, 0, 5044 zero_size_allowed, 5045 meta); 5046 if (err) 5047 return err; 5048 } 5049 5050 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { 5051 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", 5052 regno); 5053 return -EACCES; 5054 } 5055 err = check_helper_mem_access(env, regno - 1, 5056 reg->umax_value, 5057 zero_size_allowed, meta); 5058 if (!err) 5059 err = mark_chain_precision(env, regno); 5060 } else if (arg_type_is_alloc_size(arg_type)) { 5061 if (!tnum_is_const(reg->var_off)) { 5062 verbose(env, "R%d is not a known constant'\n", 5063 regno); 5064 return -EACCES; 5065 } 5066 meta->mem_size = reg->var_off.value; 5067 } else if (arg_type_is_int_ptr(arg_type)) { 5068 int size = int_ptr_type_to_size(arg_type); 5069 5070 err = check_helper_mem_access(env, regno, size, false, meta); 5071 if (err) 5072 return err; 5073 err = check_ptr_alignment(env, reg, 0, size, true); 5074 } else if (arg_type == ARG_PTR_TO_CONST_STR) { 5075 struct bpf_map *map = reg->map_ptr; 5076 int map_off; 5077 u64 map_addr; 5078 char *str_ptr; 5079 5080 if (!bpf_map_is_rdonly(map)) { 5081 verbose(env, "R%d does not point to a readonly map'\n", regno); 5082 return -EACCES; 5083 } 5084 5085 if (!tnum_is_const(reg->var_off)) { 5086 verbose(env, "R%d is not a constant address'\n", regno); 5087 return -EACCES; 5088 } 5089 5090 if (!map->ops->map_direct_value_addr) { 5091 verbose(env, "no direct value access support for this map type\n"); 5092 return -EACCES; 5093 } 5094 5095 err = check_map_access(env, regno, reg->off, 5096 map->value_size - reg->off, false); 5097 if (err) 5098 return err; 5099 5100 map_off = reg->off + reg->var_off.value; 5101 err = map->ops->map_direct_value_addr(map, &map_addr, map_off); 5102 if (err) { 5103 verbose(env, "direct value access on string failed\n"); 5104 return err; 5105 } 5106 5107 str_ptr = (char *)(long)(map_addr); 5108 if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) { 5109 verbose(env, "string is not zero-terminated\n"); 5110 return -EINVAL; 5111 } 5112 } 5113 5114 return err; 5115 } 5116 5117 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id) 5118 { 5119 enum bpf_attach_type eatype = env->prog->expected_attach_type; 5120 enum bpf_prog_type type = resolve_prog_type(env->prog); 5121 5122 if (func_id != BPF_FUNC_map_update_elem) 5123 return false; 5124 5125 /* It's not possible to get access to a locked struct sock in these 5126 * contexts, so updating is safe. 5127 */ 5128 switch (type) { 5129 case BPF_PROG_TYPE_TRACING: 5130 if (eatype == BPF_TRACE_ITER) 5131 return true; 5132 break; 5133 case BPF_PROG_TYPE_SOCKET_FILTER: 5134 case BPF_PROG_TYPE_SCHED_CLS: 5135 case BPF_PROG_TYPE_SCHED_ACT: 5136 case BPF_PROG_TYPE_XDP: 5137 case BPF_PROG_TYPE_SK_REUSEPORT: 5138 case BPF_PROG_TYPE_FLOW_DISSECTOR: 5139 case BPF_PROG_TYPE_SK_LOOKUP: 5140 return true; 5141 default: 5142 break; 5143 } 5144 5145 verbose(env, "cannot update sockmap in this context\n"); 5146 return false; 5147 } 5148 5149 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env) 5150 { 5151 return env->prog->jit_requested && IS_ENABLED(CONFIG_X86_64); 5152 } 5153 5154 static int check_map_func_compatibility(struct bpf_verifier_env *env, 5155 struct bpf_map *map, int func_id) 5156 { 5157 if (!map) 5158 return 0; 5159 5160 /* We need a two way check, first is from map perspective ... */ 5161 switch (map->map_type) { 5162 case BPF_MAP_TYPE_PROG_ARRAY: 5163 if (func_id != BPF_FUNC_tail_call) 5164 goto error; 5165 break; 5166 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 5167 if (func_id != BPF_FUNC_perf_event_read && 5168 func_id != BPF_FUNC_perf_event_output && 5169 func_id != BPF_FUNC_skb_output && 5170 func_id != BPF_FUNC_perf_event_read_value && 5171 func_id != BPF_FUNC_xdp_output) 5172 goto error; 5173 break; 5174 case BPF_MAP_TYPE_RINGBUF: 5175 if (func_id != BPF_FUNC_ringbuf_output && 5176 func_id != BPF_FUNC_ringbuf_reserve && 5177 func_id != BPF_FUNC_ringbuf_submit && 5178 func_id != BPF_FUNC_ringbuf_discard && 5179 func_id != BPF_FUNC_ringbuf_query) 5180 goto error; 5181 break; 5182 case BPF_MAP_TYPE_STACK_TRACE: 5183 if (func_id != BPF_FUNC_get_stackid) 5184 goto error; 5185 break; 5186 case BPF_MAP_TYPE_CGROUP_ARRAY: 5187 if (func_id != BPF_FUNC_skb_under_cgroup && 5188 func_id != BPF_FUNC_current_task_under_cgroup) 5189 goto error; 5190 break; 5191 case BPF_MAP_TYPE_CGROUP_STORAGE: 5192 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 5193 if (func_id != BPF_FUNC_get_local_storage) 5194 goto error; 5195 break; 5196 case BPF_MAP_TYPE_DEVMAP: 5197 case BPF_MAP_TYPE_DEVMAP_HASH: 5198 if (func_id != BPF_FUNC_redirect_map && 5199 func_id != BPF_FUNC_map_lookup_elem) 5200 goto error; 5201 break; 5202 /* Restrict bpf side of cpumap and xskmap, open when use-cases 5203 * appear. 5204 */ 5205 case BPF_MAP_TYPE_CPUMAP: 5206 if (func_id != BPF_FUNC_redirect_map) 5207 goto error; 5208 break; 5209 case BPF_MAP_TYPE_XSKMAP: 5210 if (func_id != BPF_FUNC_redirect_map && 5211 func_id != BPF_FUNC_map_lookup_elem) 5212 goto error; 5213 break; 5214 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 5215 case BPF_MAP_TYPE_HASH_OF_MAPS: 5216 if (func_id != BPF_FUNC_map_lookup_elem) 5217 goto error; 5218 break; 5219 case BPF_MAP_TYPE_SOCKMAP: 5220 if (func_id != BPF_FUNC_sk_redirect_map && 5221 func_id != BPF_FUNC_sock_map_update && 5222 func_id != BPF_FUNC_map_delete_elem && 5223 func_id != BPF_FUNC_msg_redirect_map && 5224 func_id != BPF_FUNC_sk_select_reuseport && 5225 func_id != BPF_FUNC_map_lookup_elem && 5226 !may_update_sockmap(env, func_id)) 5227 goto error; 5228 break; 5229 case BPF_MAP_TYPE_SOCKHASH: 5230 if (func_id != BPF_FUNC_sk_redirect_hash && 5231 func_id != BPF_FUNC_sock_hash_update && 5232 func_id != BPF_FUNC_map_delete_elem && 5233 func_id != BPF_FUNC_msg_redirect_hash && 5234 func_id != BPF_FUNC_sk_select_reuseport && 5235 func_id != BPF_FUNC_map_lookup_elem && 5236 !may_update_sockmap(env, func_id)) 5237 goto error; 5238 break; 5239 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 5240 if (func_id != BPF_FUNC_sk_select_reuseport) 5241 goto error; 5242 break; 5243 case BPF_MAP_TYPE_QUEUE: 5244 case BPF_MAP_TYPE_STACK: 5245 if (func_id != BPF_FUNC_map_peek_elem && 5246 func_id != BPF_FUNC_map_pop_elem && 5247 func_id != BPF_FUNC_map_push_elem) 5248 goto error; 5249 break; 5250 case BPF_MAP_TYPE_SK_STORAGE: 5251 if (func_id != BPF_FUNC_sk_storage_get && 5252 func_id != BPF_FUNC_sk_storage_delete) 5253 goto error; 5254 break; 5255 case BPF_MAP_TYPE_INODE_STORAGE: 5256 if (func_id != BPF_FUNC_inode_storage_get && 5257 func_id != BPF_FUNC_inode_storage_delete) 5258 goto error; 5259 break; 5260 case BPF_MAP_TYPE_TASK_STORAGE: 5261 if (func_id != BPF_FUNC_task_storage_get && 5262 func_id != BPF_FUNC_task_storage_delete) 5263 goto error; 5264 break; 5265 default: 5266 break; 5267 } 5268 5269 /* ... and second from the function itself. */ 5270 switch (func_id) { 5271 case BPF_FUNC_tail_call: 5272 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 5273 goto error; 5274 if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) { 5275 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); 5276 return -EINVAL; 5277 } 5278 break; 5279 case BPF_FUNC_perf_event_read: 5280 case BPF_FUNC_perf_event_output: 5281 case BPF_FUNC_perf_event_read_value: 5282 case BPF_FUNC_skb_output: 5283 case BPF_FUNC_xdp_output: 5284 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 5285 goto error; 5286 break; 5287 case BPF_FUNC_get_stackid: 5288 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 5289 goto error; 5290 break; 5291 case BPF_FUNC_current_task_under_cgroup: 5292 case BPF_FUNC_skb_under_cgroup: 5293 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 5294 goto error; 5295 break; 5296 case BPF_FUNC_redirect_map: 5297 if (map->map_type != BPF_MAP_TYPE_DEVMAP && 5298 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && 5299 map->map_type != BPF_MAP_TYPE_CPUMAP && 5300 map->map_type != BPF_MAP_TYPE_XSKMAP) 5301 goto error; 5302 break; 5303 case BPF_FUNC_sk_redirect_map: 5304 case BPF_FUNC_msg_redirect_map: 5305 case BPF_FUNC_sock_map_update: 5306 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) 5307 goto error; 5308 break; 5309 case BPF_FUNC_sk_redirect_hash: 5310 case BPF_FUNC_msg_redirect_hash: 5311 case BPF_FUNC_sock_hash_update: 5312 if (map->map_type != BPF_MAP_TYPE_SOCKHASH) 5313 goto error; 5314 break; 5315 case BPF_FUNC_get_local_storage: 5316 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 5317 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 5318 goto error; 5319 break; 5320 case BPF_FUNC_sk_select_reuseport: 5321 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && 5322 map->map_type != BPF_MAP_TYPE_SOCKMAP && 5323 map->map_type != BPF_MAP_TYPE_SOCKHASH) 5324 goto error; 5325 break; 5326 case BPF_FUNC_map_peek_elem: 5327 case BPF_FUNC_map_pop_elem: 5328 case BPF_FUNC_map_push_elem: 5329 if (map->map_type != BPF_MAP_TYPE_QUEUE && 5330 map->map_type != BPF_MAP_TYPE_STACK) 5331 goto error; 5332 break; 5333 case BPF_FUNC_sk_storage_get: 5334 case BPF_FUNC_sk_storage_delete: 5335 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) 5336 goto error; 5337 break; 5338 case BPF_FUNC_inode_storage_get: 5339 case BPF_FUNC_inode_storage_delete: 5340 if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE) 5341 goto error; 5342 break; 5343 case BPF_FUNC_task_storage_get: 5344 case BPF_FUNC_task_storage_delete: 5345 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) 5346 goto error; 5347 break; 5348 default: 5349 break; 5350 } 5351 5352 return 0; 5353 error: 5354 verbose(env, "cannot pass map_type %d into func %s#%d\n", 5355 map->map_type, func_id_name(func_id), func_id); 5356 return -EINVAL; 5357 } 5358 5359 static bool check_raw_mode_ok(const struct bpf_func_proto *fn) 5360 { 5361 int count = 0; 5362 5363 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) 5364 count++; 5365 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) 5366 count++; 5367 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) 5368 count++; 5369 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) 5370 count++; 5371 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) 5372 count++; 5373 5374 /* We only support one arg being in raw mode at the moment, 5375 * which is sufficient for the helper functions we have 5376 * right now. 5377 */ 5378 return count <= 1; 5379 } 5380 5381 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr, 5382 enum bpf_arg_type arg_next) 5383 { 5384 return (arg_type_is_mem_ptr(arg_curr) && 5385 !arg_type_is_mem_size(arg_next)) || 5386 (!arg_type_is_mem_ptr(arg_curr) && 5387 arg_type_is_mem_size(arg_next)); 5388 } 5389 5390 static bool check_arg_pair_ok(const struct bpf_func_proto *fn) 5391 { 5392 /* bpf_xxx(..., buf, len) call will access 'len' 5393 * bytes from memory 'buf'. Both arg types need 5394 * to be paired, so make sure there's no buggy 5395 * helper function specification. 5396 */ 5397 if (arg_type_is_mem_size(fn->arg1_type) || 5398 arg_type_is_mem_ptr(fn->arg5_type) || 5399 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) || 5400 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) || 5401 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) || 5402 check_args_pair_invalid(fn->arg4_type, fn->arg5_type)) 5403 return false; 5404 5405 return true; 5406 } 5407 5408 static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id) 5409 { 5410 int count = 0; 5411 5412 if (arg_type_may_be_refcounted(fn->arg1_type)) 5413 count++; 5414 if (arg_type_may_be_refcounted(fn->arg2_type)) 5415 count++; 5416 if (arg_type_may_be_refcounted(fn->arg3_type)) 5417 count++; 5418 if (arg_type_may_be_refcounted(fn->arg4_type)) 5419 count++; 5420 if (arg_type_may_be_refcounted(fn->arg5_type)) 5421 count++; 5422 5423 /* A reference acquiring function cannot acquire 5424 * another refcounted ptr. 5425 */ 5426 if (may_be_acquire_function(func_id) && count) 5427 return false; 5428 5429 /* We only support one arg being unreferenced at the moment, 5430 * which is sufficient for the helper functions we have right now. 5431 */ 5432 return count <= 1; 5433 } 5434 5435 static bool check_btf_id_ok(const struct bpf_func_proto *fn) 5436 { 5437 int i; 5438 5439 for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) { 5440 if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i]) 5441 return false; 5442 5443 if (fn->arg_type[i] != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i]) 5444 return false; 5445 } 5446 5447 return true; 5448 } 5449 5450 static int check_func_proto(const struct bpf_func_proto *fn, int func_id) 5451 { 5452 return check_raw_mode_ok(fn) && 5453 check_arg_pair_ok(fn) && 5454 check_btf_id_ok(fn) && 5455 check_refcount_ok(fn, func_id) ? 0 : -EINVAL; 5456 } 5457 5458 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] 5459 * are now invalid, so turn them into unknown SCALAR_VALUE. 5460 */ 5461 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, 5462 struct bpf_func_state *state) 5463 { 5464 struct bpf_reg_state *regs = state->regs, *reg; 5465 int i; 5466 5467 for (i = 0; i < MAX_BPF_REG; i++) 5468 if (reg_is_pkt_pointer_any(®s[i])) 5469 mark_reg_unknown(env, regs, i); 5470 5471 bpf_for_each_spilled_reg(i, state, reg) { 5472 if (!reg) 5473 continue; 5474 if (reg_is_pkt_pointer_any(reg)) 5475 __mark_reg_unknown(env, reg); 5476 } 5477 } 5478 5479 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) 5480 { 5481 struct bpf_verifier_state *vstate = env->cur_state; 5482 int i; 5483 5484 for (i = 0; i <= vstate->curframe; i++) 5485 __clear_all_pkt_pointers(env, vstate->frame[i]); 5486 } 5487 5488 enum { 5489 AT_PKT_END = -1, 5490 BEYOND_PKT_END = -2, 5491 }; 5492 5493 static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open) 5494 { 5495 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 5496 struct bpf_reg_state *reg = &state->regs[regn]; 5497 5498 if (reg->type != PTR_TO_PACKET) 5499 /* PTR_TO_PACKET_META is not supported yet */ 5500 return; 5501 5502 /* The 'reg' is pkt > pkt_end or pkt >= pkt_end. 5503 * How far beyond pkt_end it goes is unknown. 5504 * if (!range_open) it's the case of pkt >= pkt_end 5505 * if (range_open) it's the case of pkt > pkt_end 5506 * hence this pointer is at least 1 byte bigger than pkt_end 5507 */ 5508 if (range_open) 5509 reg->range = BEYOND_PKT_END; 5510 else 5511 reg->range = AT_PKT_END; 5512 } 5513 5514 static void release_reg_references(struct bpf_verifier_env *env, 5515 struct bpf_func_state *state, 5516 int ref_obj_id) 5517 { 5518 struct bpf_reg_state *regs = state->regs, *reg; 5519 int i; 5520 5521 for (i = 0; i < MAX_BPF_REG; i++) 5522 if (regs[i].ref_obj_id == ref_obj_id) 5523 mark_reg_unknown(env, regs, i); 5524 5525 bpf_for_each_spilled_reg(i, state, reg) { 5526 if (!reg) 5527 continue; 5528 if (reg->ref_obj_id == ref_obj_id) 5529 __mark_reg_unknown(env, reg); 5530 } 5531 } 5532 5533 /* The pointer with the specified id has released its reference to kernel 5534 * resources. Identify all copies of the same pointer and clear the reference. 5535 */ 5536 static int release_reference(struct bpf_verifier_env *env, 5537 int ref_obj_id) 5538 { 5539 struct bpf_verifier_state *vstate = env->cur_state; 5540 int err; 5541 int i; 5542 5543 err = release_reference_state(cur_func(env), ref_obj_id); 5544 if (err) 5545 return err; 5546 5547 for (i = 0; i <= vstate->curframe; i++) 5548 release_reg_references(env, vstate->frame[i], ref_obj_id); 5549 5550 return 0; 5551 } 5552 5553 static void clear_caller_saved_regs(struct bpf_verifier_env *env, 5554 struct bpf_reg_state *regs) 5555 { 5556 int i; 5557 5558 /* after the call registers r0 - r5 were scratched */ 5559 for (i = 0; i < CALLER_SAVED_REGS; i++) { 5560 mark_reg_not_init(env, regs, caller_saved[i]); 5561 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 5562 } 5563 } 5564 5565 typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env, 5566 struct bpf_func_state *caller, 5567 struct bpf_func_state *callee, 5568 int insn_idx); 5569 5570 static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 5571 int *insn_idx, int subprog, 5572 set_callee_state_fn set_callee_state_cb) 5573 { 5574 struct bpf_verifier_state *state = env->cur_state; 5575 struct bpf_func_info_aux *func_info_aux; 5576 struct bpf_func_state *caller, *callee; 5577 int err; 5578 bool is_global = false; 5579 5580 if (state->curframe + 1 >= MAX_CALL_FRAMES) { 5581 verbose(env, "the call stack of %d frames is too deep\n", 5582 state->curframe + 2); 5583 return -E2BIG; 5584 } 5585 5586 caller = state->frame[state->curframe]; 5587 if (state->frame[state->curframe + 1]) { 5588 verbose(env, "verifier bug. Frame %d already allocated\n", 5589 state->curframe + 1); 5590 return -EFAULT; 5591 } 5592 5593 func_info_aux = env->prog->aux->func_info_aux; 5594 if (func_info_aux) 5595 is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; 5596 err = btf_check_subprog_arg_match(env, subprog, caller->regs); 5597 if (err == -EFAULT) 5598 return err; 5599 if (is_global) { 5600 if (err) { 5601 verbose(env, "Caller passes invalid args into func#%d\n", 5602 subprog); 5603 return err; 5604 } else { 5605 if (env->log.level & BPF_LOG_LEVEL) 5606 verbose(env, 5607 "Func#%d is global and valid. Skipping.\n", 5608 subprog); 5609 clear_caller_saved_regs(env, caller->regs); 5610 5611 /* All global functions return a 64-bit SCALAR_VALUE */ 5612 mark_reg_unknown(env, caller->regs, BPF_REG_0); 5613 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 5614 5615 /* continue with next insn after call */ 5616 return 0; 5617 } 5618 } 5619 5620 callee = kzalloc(sizeof(*callee), GFP_KERNEL); 5621 if (!callee) 5622 return -ENOMEM; 5623 state->frame[state->curframe + 1] = callee; 5624 5625 /* callee cannot access r0, r6 - r9 for reading and has to write 5626 * into its own stack before reading from it. 5627 * callee can read/write into caller's stack 5628 */ 5629 init_func_state(env, callee, 5630 /* remember the callsite, it will be used by bpf_exit */ 5631 *insn_idx /* callsite */, 5632 state->curframe + 1 /* frameno within this callchain */, 5633 subprog /* subprog number within this prog */); 5634 5635 /* Transfer references to the callee */ 5636 err = copy_reference_state(callee, caller); 5637 if (err) 5638 return err; 5639 5640 err = set_callee_state_cb(env, caller, callee, *insn_idx); 5641 if (err) 5642 return err; 5643 5644 clear_caller_saved_regs(env, caller->regs); 5645 5646 /* only increment it after check_reg_arg() finished */ 5647 state->curframe++; 5648 5649 /* and go analyze first insn of the callee */ 5650 *insn_idx = env->subprog_info[subprog].start - 1; 5651 5652 if (env->log.level & BPF_LOG_LEVEL) { 5653 verbose(env, "caller:\n"); 5654 print_verifier_state(env, caller); 5655 verbose(env, "callee:\n"); 5656 print_verifier_state(env, callee); 5657 } 5658 return 0; 5659 } 5660 5661 int map_set_for_each_callback_args(struct bpf_verifier_env *env, 5662 struct bpf_func_state *caller, 5663 struct bpf_func_state *callee) 5664 { 5665 /* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, 5666 * void *callback_ctx, u64 flags); 5667 * callback_fn(struct bpf_map *map, void *key, void *value, 5668 * void *callback_ctx); 5669 */ 5670 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; 5671 5672 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; 5673 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); 5674 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr; 5675 5676 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; 5677 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); 5678 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr; 5679 5680 /* pointer to stack or null */ 5681 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3]; 5682 5683 /* unused */ 5684 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 5685 return 0; 5686 } 5687 5688 static int set_callee_state(struct bpf_verifier_env *env, 5689 struct bpf_func_state *caller, 5690 struct bpf_func_state *callee, int insn_idx) 5691 { 5692 int i; 5693 5694 /* copy r1 - r5 args that callee can access. The copy includes parent 5695 * pointers, which connects us up to the liveness chain 5696 */ 5697 for (i = BPF_REG_1; i <= BPF_REG_5; i++) 5698 callee->regs[i] = caller->regs[i]; 5699 return 0; 5700 } 5701 5702 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 5703 int *insn_idx) 5704 { 5705 int subprog, target_insn; 5706 5707 target_insn = *insn_idx + insn->imm + 1; 5708 subprog = find_subprog(env, target_insn); 5709 if (subprog < 0) { 5710 verbose(env, "verifier bug. No program starts at insn %d\n", 5711 target_insn); 5712 return -EFAULT; 5713 } 5714 5715 return __check_func_call(env, insn, insn_idx, subprog, set_callee_state); 5716 } 5717 5718 static int set_map_elem_callback_state(struct bpf_verifier_env *env, 5719 struct bpf_func_state *caller, 5720 struct bpf_func_state *callee, 5721 int insn_idx) 5722 { 5723 struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx]; 5724 struct bpf_map *map; 5725 int err; 5726 5727 if (bpf_map_ptr_poisoned(insn_aux)) { 5728 verbose(env, "tail_call abusing map_ptr\n"); 5729 return -EINVAL; 5730 } 5731 5732 map = BPF_MAP_PTR(insn_aux->map_ptr_state); 5733 if (!map->ops->map_set_for_each_callback_args || 5734 !map->ops->map_for_each_callback) { 5735 verbose(env, "callback function not allowed for map\n"); 5736 return -ENOTSUPP; 5737 } 5738 5739 err = map->ops->map_set_for_each_callback_args(env, caller, callee); 5740 if (err) 5741 return err; 5742 5743 callee->in_callback_fn = true; 5744 return 0; 5745 } 5746 5747 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) 5748 { 5749 struct bpf_verifier_state *state = env->cur_state; 5750 struct bpf_func_state *caller, *callee; 5751 struct bpf_reg_state *r0; 5752 int err; 5753 5754 callee = state->frame[state->curframe]; 5755 r0 = &callee->regs[BPF_REG_0]; 5756 if (r0->type == PTR_TO_STACK) { 5757 /* technically it's ok to return caller's stack pointer 5758 * (or caller's caller's pointer) back to the caller, 5759 * since these pointers are valid. Only current stack 5760 * pointer will be invalid as soon as function exits, 5761 * but let's be conservative 5762 */ 5763 verbose(env, "cannot return stack pointer to the caller\n"); 5764 return -EINVAL; 5765 } 5766 5767 state->curframe--; 5768 caller = state->frame[state->curframe]; 5769 if (callee->in_callback_fn) { 5770 /* enforce R0 return value range [0, 1]. */ 5771 struct tnum range = tnum_range(0, 1); 5772 5773 if (r0->type != SCALAR_VALUE) { 5774 verbose(env, "R0 not a scalar value\n"); 5775 return -EACCES; 5776 } 5777 if (!tnum_in(range, r0->var_off)) { 5778 verbose_invalid_scalar(env, r0, &range, "callback return", "R0"); 5779 return -EINVAL; 5780 } 5781 } else { 5782 /* return to the caller whatever r0 had in the callee */ 5783 caller->regs[BPF_REG_0] = *r0; 5784 } 5785 5786 /* Transfer references to the caller */ 5787 err = copy_reference_state(caller, callee); 5788 if (err) 5789 return err; 5790 5791 *insn_idx = callee->callsite + 1; 5792 if (env->log.level & BPF_LOG_LEVEL) { 5793 verbose(env, "returning from callee:\n"); 5794 print_verifier_state(env, callee); 5795 verbose(env, "to caller at %d:\n", *insn_idx); 5796 print_verifier_state(env, caller); 5797 } 5798 /* clear everything in the callee */ 5799 free_func_state(callee); 5800 state->frame[state->curframe + 1] = NULL; 5801 return 0; 5802 } 5803 5804 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, 5805 int func_id, 5806 struct bpf_call_arg_meta *meta) 5807 { 5808 struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; 5809 5810 if (ret_type != RET_INTEGER || 5811 (func_id != BPF_FUNC_get_stack && 5812 func_id != BPF_FUNC_get_task_stack && 5813 func_id != BPF_FUNC_probe_read_str && 5814 func_id != BPF_FUNC_probe_read_kernel_str && 5815 func_id != BPF_FUNC_probe_read_user_str)) 5816 return; 5817 5818 ret_reg->smax_value = meta->msize_max_value; 5819 ret_reg->s32_max_value = meta->msize_max_value; 5820 ret_reg->smin_value = -MAX_ERRNO; 5821 ret_reg->s32_min_value = -MAX_ERRNO; 5822 __reg_deduce_bounds(ret_reg); 5823 __reg_bound_offset(ret_reg); 5824 __update_reg_bounds(ret_reg); 5825 } 5826 5827 static int 5828 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 5829 int func_id, int insn_idx) 5830 { 5831 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 5832 struct bpf_map *map = meta->map_ptr; 5833 5834 if (func_id != BPF_FUNC_tail_call && 5835 func_id != BPF_FUNC_map_lookup_elem && 5836 func_id != BPF_FUNC_map_update_elem && 5837 func_id != BPF_FUNC_map_delete_elem && 5838 func_id != BPF_FUNC_map_push_elem && 5839 func_id != BPF_FUNC_map_pop_elem && 5840 func_id != BPF_FUNC_map_peek_elem && 5841 func_id != BPF_FUNC_for_each_map_elem && 5842 func_id != BPF_FUNC_redirect_map) 5843 return 0; 5844 5845 if (map == NULL) { 5846 verbose(env, "kernel subsystem misconfigured verifier\n"); 5847 return -EINVAL; 5848 } 5849 5850 /* In case of read-only, some additional restrictions 5851 * need to be applied in order to prevent altering the 5852 * state of the map from program side. 5853 */ 5854 if ((map->map_flags & BPF_F_RDONLY_PROG) && 5855 (func_id == BPF_FUNC_map_delete_elem || 5856 func_id == BPF_FUNC_map_update_elem || 5857 func_id == BPF_FUNC_map_push_elem || 5858 func_id == BPF_FUNC_map_pop_elem)) { 5859 verbose(env, "write into map forbidden\n"); 5860 return -EACCES; 5861 } 5862 5863 if (!BPF_MAP_PTR(aux->map_ptr_state)) 5864 bpf_map_ptr_store(aux, meta->map_ptr, 5865 !meta->map_ptr->bypass_spec_v1); 5866 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr) 5867 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, 5868 !meta->map_ptr->bypass_spec_v1); 5869 return 0; 5870 } 5871 5872 static int 5873 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 5874 int func_id, int insn_idx) 5875 { 5876 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 5877 struct bpf_reg_state *regs = cur_regs(env), *reg; 5878 struct bpf_map *map = meta->map_ptr; 5879 struct tnum range; 5880 u64 val; 5881 int err; 5882 5883 if (func_id != BPF_FUNC_tail_call) 5884 return 0; 5885 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) { 5886 verbose(env, "kernel subsystem misconfigured verifier\n"); 5887 return -EINVAL; 5888 } 5889 5890 range = tnum_range(0, map->max_entries - 1); 5891 reg = ®s[BPF_REG_3]; 5892 5893 if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) { 5894 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 5895 return 0; 5896 } 5897 5898 err = mark_chain_precision(env, BPF_REG_3); 5899 if (err) 5900 return err; 5901 5902 val = reg->var_off.value; 5903 if (bpf_map_key_unseen(aux)) 5904 bpf_map_key_store(aux, val); 5905 else if (!bpf_map_key_poisoned(aux) && 5906 bpf_map_key_immediate(aux) != val) 5907 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 5908 return 0; 5909 } 5910 5911 static int check_reference_leak(struct bpf_verifier_env *env) 5912 { 5913 struct bpf_func_state *state = cur_func(env); 5914 int i; 5915 5916 for (i = 0; i < state->acquired_refs; i++) { 5917 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", 5918 state->refs[i].id, state->refs[i].insn_idx); 5919 } 5920 return state->acquired_refs ? -EINVAL : 0; 5921 } 5922 5923 static int check_bpf_snprintf_call(struct bpf_verifier_env *env, 5924 struct bpf_reg_state *regs) 5925 { 5926 struct bpf_reg_state *fmt_reg = ®s[BPF_REG_3]; 5927 struct bpf_reg_state *data_len_reg = ®s[BPF_REG_5]; 5928 struct bpf_map *fmt_map = fmt_reg->map_ptr; 5929 int err, fmt_map_off, num_args; 5930 u64 fmt_addr; 5931 char *fmt; 5932 5933 /* data must be an array of u64 */ 5934 if (data_len_reg->var_off.value % 8) 5935 return -EINVAL; 5936 num_args = data_len_reg->var_off.value / 8; 5937 5938 /* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const 5939 * and map_direct_value_addr is set. 5940 */ 5941 fmt_map_off = fmt_reg->off + fmt_reg->var_off.value; 5942 err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr, 5943 fmt_map_off); 5944 if (err) { 5945 verbose(env, "verifier bug\n"); 5946 return -EFAULT; 5947 } 5948 fmt = (char *)(long)fmt_addr + fmt_map_off; 5949 5950 /* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we 5951 * can focus on validating the format specifiers. 5952 */ 5953 err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, NULL, num_args); 5954 if (err < 0) 5955 verbose(env, "Invalid format string\n"); 5956 5957 return err; 5958 } 5959 5960 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 5961 int *insn_idx_p) 5962 { 5963 const struct bpf_func_proto *fn = NULL; 5964 struct bpf_reg_state *regs; 5965 struct bpf_call_arg_meta meta; 5966 int insn_idx = *insn_idx_p; 5967 bool changes_data; 5968 int i, err, func_id; 5969 5970 /* find function prototype */ 5971 func_id = insn->imm; 5972 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 5973 verbose(env, "invalid func %s#%d\n", func_id_name(func_id), 5974 func_id); 5975 return -EINVAL; 5976 } 5977 5978 if (env->ops->get_func_proto) 5979 fn = env->ops->get_func_proto(func_id, env->prog); 5980 if (!fn) { 5981 verbose(env, "unknown func %s#%d\n", func_id_name(func_id), 5982 func_id); 5983 return -EINVAL; 5984 } 5985 5986 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 5987 if (!env->prog->gpl_compatible && fn->gpl_only) { 5988 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); 5989 return -EINVAL; 5990 } 5991 5992 if (fn->allowed && !fn->allowed(env->prog)) { 5993 verbose(env, "helper call is not allowed in probe\n"); 5994 return -EINVAL; 5995 } 5996 5997 /* With LD_ABS/IND some JITs save/restore skb from r1. */ 5998 changes_data = bpf_helper_changes_pkt_data(fn->func); 5999 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { 6000 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", 6001 func_id_name(func_id), func_id); 6002 return -EINVAL; 6003 } 6004 6005 memset(&meta, 0, sizeof(meta)); 6006 meta.pkt_access = fn->pkt_access; 6007 6008 err = check_func_proto(fn, func_id); 6009 if (err) { 6010 verbose(env, "kernel subsystem misconfigured func %s#%d\n", 6011 func_id_name(func_id), func_id); 6012 return err; 6013 } 6014 6015 meta.func_id = func_id; 6016 /* check args */ 6017 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) { 6018 err = check_func_arg(env, i, &meta, fn); 6019 if (err) 6020 return err; 6021 } 6022 6023 err = record_func_map(env, &meta, func_id, insn_idx); 6024 if (err) 6025 return err; 6026 6027 err = record_func_key(env, &meta, func_id, insn_idx); 6028 if (err) 6029 return err; 6030 6031 /* Mark slots with STACK_MISC in case of raw mode, stack offset 6032 * is inferred from register state. 6033 */ 6034 for (i = 0; i < meta.access_size; i++) { 6035 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, 6036 BPF_WRITE, -1, false); 6037 if (err) 6038 return err; 6039 } 6040 6041 if (func_id == BPF_FUNC_tail_call) { 6042 err = check_reference_leak(env); 6043 if (err) { 6044 verbose(env, "tail_call would lead to reference leak\n"); 6045 return err; 6046 } 6047 } else if (is_release_function(func_id)) { 6048 err = release_reference(env, meta.ref_obj_id); 6049 if (err) { 6050 verbose(env, "func %s#%d reference has not been acquired before\n", 6051 func_id_name(func_id), func_id); 6052 return err; 6053 } 6054 } 6055 6056 regs = cur_regs(env); 6057 6058 /* check that flags argument in get_local_storage(map, flags) is 0, 6059 * this is required because get_local_storage() can't return an error. 6060 */ 6061 if (func_id == BPF_FUNC_get_local_storage && 6062 !register_is_null(®s[BPF_REG_2])) { 6063 verbose(env, "get_local_storage() doesn't support non-zero flags\n"); 6064 return -EINVAL; 6065 } 6066 6067 if (func_id == BPF_FUNC_for_each_map_elem) { 6068 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 6069 set_map_elem_callback_state); 6070 if (err < 0) 6071 return -EINVAL; 6072 } 6073 6074 if (func_id == BPF_FUNC_snprintf) { 6075 err = check_bpf_snprintf_call(env, regs); 6076 if (err < 0) 6077 return err; 6078 } 6079 6080 /* reset caller saved regs */ 6081 for (i = 0; i < CALLER_SAVED_REGS; i++) { 6082 mark_reg_not_init(env, regs, caller_saved[i]); 6083 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 6084 } 6085 6086 /* helper call returns 64-bit value. */ 6087 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 6088 6089 /* update return register (already marked as written above) */ 6090 if (fn->ret_type == RET_INTEGER) { 6091 /* sets type to SCALAR_VALUE */ 6092 mark_reg_unknown(env, regs, BPF_REG_0); 6093 } else if (fn->ret_type == RET_VOID) { 6094 regs[BPF_REG_0].type = NOT_INIT; 6095 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || 6096 fn->ret_type == RET_PTR_TO_MAP_VALUE) { 6097 /* There is no offset yet applied, variable or fixed */ 6098 mark_reg_known_zero(env, regs, BPF_REG_0); 6099 /* remember map_ptr, so that check_map_access() 6100 * can check 'value_size' boundary of memory access 6101 * to map element returned from bpf_map_lookup_elem() 6102 */ 6103 if (meta.map_ptr == NULL) { 6104 verbose(env, 6105 "kernel subsystem misconfigured verifier\n"); 6106 return -EINVAL; 6107 } 6108 regs[BPF_REG_0].map_ptr = meta.map_ptr; 6109 if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { 6110 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; 6111 if (map_value_has_spin_lock(meta.map_ptr)) 6112 regs[BPF_REG_0].id = ++env->id_gen; 6113 } else { 6114 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 6115 } 6116 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { 6117 mark_reg_known_zero(env, regs, BPF_REG_0); 6118 regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; 6119 } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) { 6120 mark_reg_known_zero(env, regs, BPF_REG_0); 6121 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL; 6122 } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) { 6123 mark_reg_known_zero(env, regs, BPF_REG_0); 6124 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL; 6125 } else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) { 6126 mark_reg_known_zero(env, regs, BPF_REG_0); 6127 regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL; 6128 regs[BPF_REG_0].mem_size = meta.mem_size; 6129 } else if (fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL || 6130 fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID) { 6131 const struct btf_type *t; 6132 6133 mark_reg_known_zero(env, regs, BPF_REG_0); 6134 t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL); 6135 if (!btf_type_is_struct(t)) { 6136 u32 tsize; 6137 const struct btf_type *ret; 6138 const char *tname; 6139 6140 /* resolve the type size of ksym. */ 6141 ret = btf_resolve_size(meta.ret_btf, t, &tsize); 6142 if (IS_ERR(ret)) { 6143 tname = btf_name_by_offset(meta.ret_btf, t->name_off); 6144 verbose(env, "unable to resolve the size of type '%s': %ld\n", 6145 tname, PTR_ERR(ret)); 6146 return -EINVAL; 6147 } 6148 regs[BPF_REG_0].type = 6149 fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ? 6150 PTR_TO_MEM : PTR_TO_MEM_OR_NULL; 6151 regs[BPF_REG_0].mem_size = tsize; 6152 } else { 6153 regs[BPF_REG_0].type = 6154 fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ? 6155 PTR_TO_BTF_ID : PTR_TO_BTF_ID_OR_NULL; 6156 regs[BPF_REG_0].btf = meta.ret_btf; 6157 regs[BPF_REG_0].btf_id = meta.ret_btf_id; 6158 } 6159 } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL || 6160 fn->ret_type == RET_PTR_TO_BTF_ID) { 6161 int ret_btf_id; 6162 6163 mark_reg_known_zero(env, regs, BPF_REG_0); 6164 regs[BPF_REG_0].type = fn->ret_type == RET_PTR_TO_BTF_ID ? 6165 PTR_TO_BTF_ID : 6166 PTR_TO_BTF_ID_OR_NULL; 6167 ret_btf_id = *fn->ret_btf_id; 6168 if (ret_btf_id == 0) { 6169 verbose(env, "invalid return type %d of func %s#%d\n", 6170 fn->ret_type, func_id_name(func_id), func_id); 6171 return -EINVAL; 6172 } 6173 /* current BPF helper definitions are only coming from 6174 * built-in code with type IDs from vmlinux BTF 6175 */ 6176 regs[BPF_REG_0].btf = btf_vmlinux; 6177 regs[BPF_REG_0].btf_id = ret_btf_id; 6178 } else { 6179 verbose(env, "unknown return type %d of func %s#%d\n", 6180 fn->ret_type, func_id_name(func_id), func_id); 6181 return -EINVAL; 6182 } 6183 6184 if (reg_type_may_be_null(regs[BPF_REG_0].type)) 6185 regs[BPF_REG_0].id = ++env->id_gen; 6186 6187 if (is_ptr_cast_function(func_id)) { 6188 /* For release_reference() */ 6189 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; 6190 } else if (is_acquire_function(func_id, meta.map_ptr)) { 6191 int id = acquire_reference_state(env, insn_idx); 6192 6193 if (id < 0) 6194 return id; 6195 /* For mark_ptr_or_null_reg() */ 6196 regs[BPF_REG_0].id = id; 6197 /* For release_reference() */ 6198 regs[BPF_REG_0].ref_obj_id = id; 6199 } 6200 6201 do_refine_retval_range(regs, fn->ret_type, func_id, &meta); 6202 6203 err = check_map_func_compatibility(env, meta.map_ptr, func_id); 6204 if (err) 6205 return err; 6206 6207 if ((func_id == BPF_FUNC_get_stack || 6208 func_id == BPF_FUNC_get_task_stack) && 6209 !env->prog->has_callchain_buf) { 6210 const char *err_str; 6211 6212 #ifdef CONFIG_PERF_EVENTS 6213 err = get_callchain_buffers(sysctl_perf_event_max_stack); 6214 err_str = "cannot get callchain buffer for func %s#%d\n"; 6215 #else 6216 err = -ENOTSUPP; 6217 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; 6218 #endif 6219 if (err) { 6220 verbose(env, err_str, func_id_name(func_id), func_id); 6221 return err; 6222 } 6223 6224 env->prog->has_callchain_buf = true; 6225 } 6226 6227 if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack) 6228 env->prog->call_get_stack = true; 6229 6230 if (changes_data) 6231 clear_all_pkt_pointers(env); 6232 return 0; 6233 } 6234 6235 /* mark_btf_func_reg_size() is used when the reg size is determined by 6236 * the BTF func_proto's return value size and argument. 6237 */ 6238 static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno, 6239 size_t reg_size) 6240 { 6241 struct bpf_reg_state *reg = &cur_regs(env)[regno]; 6242 6243 if (regno == BPF_REG_0) { 6244 /* Function return value */ 6245 reg->live |= REG_LIVE_WRITTEN; 6246 reg->subreg_def = reg_size == sizeof(u64) ? 6247 DEF_NOT_SUBREG : env->insn_idx + 1; 6248 } else { 6249 /* Function argument */ 6250 if (reg_size == sizeof(u64)) { 6251 mark_insn_zext(env, reg); 6252 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 6253 } else { 6254 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32); 6255 } 6256 } 6257 } 6258 6259 static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn) 6260 { 6261 const struct btf_type *t, *func, *func_proto, *ptr_type; 6262 struct bpf_reg_state *regs = cur_regs(env); 6263 const char *func_name, *ptr_type_name; 6264 u32 i, nargs, func_id, ptr_type_id; 6265 const struct btf_param *args; 6266 int err; 6267 6268 func_id = insn->imm; 6269 func = btf_type_by_id(btf_vmlinux, func_id); 6270 func_name = btf_name_by_offset(btf_vmlinux, func->name_off); 6271 func_proto = btf_type_by_id(btf_vmlinux, func->type); 6272 6273 if (!env->ops->check_kfunc_call || 6274 !env->ops->check_kfunc_call(func_id)) { 6275 verbose(env, "calling kernel function %s is not allowed\n", 6276 func_name); 6277 return -EACCES; 6278 } 6279 6280 /* Check the arguments */ 6281 err = btf_check_kfunc_arg_match(env, btf_vmlinux, func_id, regs); 6282 if (err) 6283 return err; 6284 6285 for (i = 0; i < CALLER_SAVED_REGS; i++) 6286 mark_reg_not_init(env, regs, caller_saved[i]); 6287 6288 /* Check return type */ 6289 t = btf_type_skip_modifiers(btf_vmlinux, func_proto->type, NULL); 6290 if (btf_type_is_scalar(t)) { 6291 mark_reg_unknown(env, regs, BPF_REG_0); 6292 mark_btf_func_reg_size(env, BPF_REG_0, t->size); 6293 } else if (btf_type_is_ptr(t)) { 6294 ptr_type = btf_type_skip_modifiers(btf_vmlinux, t->type, 6295 &ptr_type_id); 6296 if (!btf_type_is_struct(ptr_type)) { 6297 ptr_type_name = btf_name_by_offset(btf_vmlinux, 6298 ptr_type->name_off); 6299 verbose(env, "kernel function %s returns pointer type %s %s is not supported\n", 6300 func_name, btf_type_str(ptr_type), 6301 ptr_type_name); 6302 return -EINVAL; 6303 } 6304 mark_reg_known_zero(env, regs, BPF_REG_0); 6305 regs[BPF_REG_0].btf = btf_vmlinux; 6306 regs[BPF_REG_0].type = PTR_TO_BTF_ID; 6307 regs[BPF_REG_0].btf_id = ptr_type_id; 6308 mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *)); 6309 } /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */ 6310 6311 nargs = btf_type_vlen(func_proto); 6312 args = (const struct btf_param *)(func_proto + 1); 6313 for (i = 0; i < nargs; i++) { 6314 u32 regno = i + 1; 6315 6316 t = btf_type_skip_modifiers(btf_vmlinux, args[i].type, NULL); 6317 if (btf_type_is_ptr(t)) 6318 mark_btf_func_reg_size(env, regno, sizeof(void *)); 6319 else 6320 /* scalar. ensured by btf_check_kfunc_arg_match() */ 6321 mark_btf_func_reg_size(env, regno, t->size); 6322 } 6323 6324 return 0; 6325 } 6326 6327 static bool signed_add_overflows(s64 a, s64 b) 6328 { 6329 /* Do the add in u64, where overflow is well-defined */ 6330 s64 res = (s64)((u64)a + (u64)b); 6331 6332 if (b < 0) 6333 return res > a; 6334 return res < a; 6335 } 6336 6337 static bool signed_add32_overflows(s32 a, s32 b) 6338 { 6339 /* Do the add in u32, where overflow is well-defined */ 6340 s32 res = (s32)((u32)a + (u32)b); 6341 6342 if (b < 0) 6343 return res > a; 6344 return res < a; 6345 } 6346 6347 static bool signed_sub_overflows(s64 a, s64 b) 6348 { 6349 /* Do the sub in u64, where overflow is well-defined */ 6350 s64 res = (s64)((u64)a - (u64)b); 6351 6352 if (b < 0) 6353 return res < a; 6354 return res > a; 6355 } 6356 6357 static bool signed_sub32_overflows(s32 a, s32 b) 6358 { 6359 /* Do the sub in u32, where overflow is well-defined */ 6360 s32 res = (s32)((u32)a - (u32)b); 6361 6362 if (b < 0) 6363 return res < a; 6364 return res > a; 6365 } 6366 6367 static bool check_reg_sane_offset(struct bpf_verifier_env *env, 6368 const struct bpf_reg_state *reg, 6369 enum bpf_reg_type type) 6370 { 6371 bool known = tnum_is_const(reg->var_off); 6372 s64 val = reg->var_off.value; 6373 s64 smin = reg->smin_value; 6374 6375 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { 6376 verbose(env, "math between %s pointer and %lld is not allowed\n", 6377 reg_type_str[type], val); 6378 return false; 6379 } 6380 6381 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { 6382 verbose(env, "%s pointer offset %d is not allowed\n", 6383 reg_type_str[type], reg->off); 6384 return false; 6385 } 6386 6387 if (smin == S64_MIN) { 6388 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", 6389 reg_type_str[type]); 6390 return false; 6391 } 6392 6393 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { 6394 verbose(env, "value %lld makes %s pointer be out of bounds\n", 6395 smin, reg_type_str[type]); 6396 return false; 6397 } 6398 6399 return true; 6400 } 6401 6402 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) 6403 { 6404 return &env->insn_aux_data[env->insn_idx]; 6405 } 6406 6407 enum { 6408 REASON_BOUNDS = -1, 6409 REASON_TYPE = -2, 6410 REASON_PATHS = -3, 6411 REASON_LIMIT = -4, 6412 REASON_STACK = -5, 6413 }; 6414 6415 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, 6416 u32 *alu_limit, bool mask_to_left) 6417 { 6418 u32 max = 0, ptr_limit = 0; 6419 6420 switch (ptr_reg->type) { 6421 case PTR_TO_STACK: 6422 /* Offset 0 is out-of-bounds, but acceptable start for the 6423 * left direction, see BPF_REG_FP. Also, unknown scalar 6424 * offset where we would need to deal with min/max bounds is 6425 * currently prohibited for unprivileged. 6426 */ 6427 max = MAX_BPF_STACK + mask_to_left; 6428 ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off); 6429 break; 6430 case PTR_TO_MAP_VALUE: 6431 max = ptr_reg->map_ptr->value_size; 6432 ptr_limit = (mask_to_left ? 6433 ptr_reg->smin_value : 6434 ptr_reg->umax_value) + ptr_reg->off; 6435 break; 6436 default: 6437 return REASON_TYPE; 6438 } 6439 6440 if (ptr_limit >= max) 6441 return REASON_LIMIT; 6442 *alu_limit = ptr_limit; 6443 return 0; 6444 } 6445 6446 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, 6447 const struct bpf_insn *insn) 6448 { 6449 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K; 6450 } 6451 6452 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, 6453 u32 alu_state, u32 alu_limit) 6454 { 6455 /* If we arrived here from different branches with different 6456 * state or limits to sanitize, then this won't work. 6457 */ 6458 if (aux->alu_state && 6459 (aux->alu_state != alu_state || 6460 aux->alu_limit != alu_limit)) 6461 return REASON_PATHS; 6462 6463 /* Corresponding fixup done in do_misc_fixups(). */ 6464 aux->alu_state = alu_state; 6465 aux->alu_limit = alu_limit; 6466 return 0; 6467 } 6468 6469 static int sanitize_val_alu(struct bpf_verifier_env *env, 6470 struct bpf_insn *insn) 6471 { 6472 struct bpf_insn_aux_data *aux = cur_aux(env); 6473 6474 if (can_skip_alu_sanitation(env, insn)) 6475 return 0; 6476 6477 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); 6478 } 6479 6480 static bool sanitize_needed(u8 opcode) 6481 { 6482 return opcode == BPF_ADD || opcode == BPF_SUB; 6483 } 6484 6485 struct bpf_sanitize_info { 6486 struct bpf_insn_aux_data aux; 6487 bool mask_to_left; 6488 }; 6489 6490 static struct bpf_verifier_state * 6491 sanitize_speculative_path(struct bpf_verifier_env *env, 6492 const struct bpf_insn *insn, 6493 u32 next_idx, u32 curr_idx) 6494 { 6495 struct bpf_verifier_state *branch; 6496 struct bpf_reg_state *regs; 6497 6498 branch = push_stack(env, next_idx, curr_idx, true); 6499 if (branch && insn) { 6500 regs = branch->frame[branch->curframe]->regs; 6501 if (BPF_SRC(insn->code) == BPF_K) { 6502 mark_reg_unknown(env, regs, insn->dst_reg); 6503 } else if (BPF_SRC(insn->code) == BPF_X) { 6504 mark_reg_unknown(env, regs, insn->dst_reg); 6505 mark_reg_unknown(env, regs, insn->src_reg); 6506 } 6507 } 6508 return branch; 6509 } 6510 6511 static int sanitize_ptr_alu(struct bpf_verifier_env *env, 6512 struct bpf_insn *insn, 6513 const struct bpf_reg_state *ptr_reg, 6514 const struct bpf_reg_state *off_reg, 6515 struct bpf_reg_state *dst_reg, 6516 struct bpf_sanitize_info *info, 6517 const bool commit_window) 6518 { 6519 struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux; 6520 struct bpf_verifier_state *vstate = env->cur_state; 6521 bool off_is_imm = tnum_is_const(off_reg->var_off); 6522 bool off_is_neg = off_reg->smin_value < 0; 6523 bool ptr_is_dst_reg = ptr_reg == dst_reg; 6524 u8 opcode = BPF_OP(insn->code); 6525 u32 alu_state, alu_limit; 6526 struct bpf_reg_state tmp; 6527 bool ret; 6528 int err; 6529 6530 if (can_skip_alu_sanitation(env, insn)) 6531 return 0; 6532 6533 /* We already marked aux for masking from non-speculative 6534 * paths, thus we got here in the first place. We only care 6535 * to explore bad access from here. 6536 */ 6537 if (vstate->speculative) 6538 goto do_sim; 6539 6540 if (!commit_window) { 6541 if (!tnum_is_const(off_reg->var_off) && 6542 (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) 6543 return REASON_BOUNDS; 6544 6545 info->mask_to_left = (opcode == BPF_ADD && off_is_neg) || 6546 (opcode == BPF_SUB && !off_is_neg); 6547 } 6548 6549 err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left); 6550 if (err < 0) 6551 return err; 6552 6553 if (commit_window) { 6554 /* In commit phase we narrow the masking window based on 6555 * the observed pointer move after the simulated operation. 6556 */ 6557 alu_state = info->aux.alu_state; 6558 alu_limit = abs(info->aux.alu_limit - alu_limit); 6559 } else { 6560 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; 6561 alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0; 6562 alu_state |= ptr_is_dst_reg ? 6563 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; 6564 } 6565 6566 err = update_alu_sanitation_state(aux, alu_state, alu_limit); 6567 if (err < 0) 6568 return err; 6569 do_sim: 6570 /* If we're in commit phase, we're done here given we already 6571 * pushed the truncated dst_reg into the speculative verification 6572 * stack. 6573 * 6574 * Also, when register is a known constant, we rewrite register-based 6575 * operation to immediate-based, and thus do not need masking (and as 6576 * a consequence, do not need to simulate the zero-truncation either). 6577 */ 6578 if (commit_window || off_is_imm) 6579 return 0; 6580 6581 /* Simulate and find potential out-of-bounds access under 6582 * speculative execution from truncation as a result of 6583 * masking when off was not within expected range. If off 6584 * sits in dst, then we temporarily need to move ptr there 6585 * to simulate dst (== 0) +/-= ptr. Needed, for example, 6586 * for cases where we use K-based arithmetic in one direction 6587 * and truncated reg-based in the other in order to explore 6588 * bad access. 6589 */ 6590 if (!ptr_is_dst_reg) { 6591 tmp = *dst_reg; 6592 *dst_reg = *ptr_reg; 6593 } 6594 ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, 6595 env->insn_idx); 6596 if (!ptr_is_dst_reg && ret) 6597 *dst_reg = tmp; 6598 return !ret ? REASON_STACK : 0; 6599 } 6600 6601 static void sanitize_mark_insn_seen(struct bpf_verifier_env *env) 6602 { 6603 struct bpf_verifier_state *vstate = env->cur_state; 6604 6605 /* If we simulate paths under speculation, we don't update the 6606 * insn as 'seen' such that when we verify unreachable paths in 6607 * the non-speculative domain, sanitize_dead_code() can still 6608 * rewrite/sanitize them. 6609 */ 6610 if (!vstate->speculative) 6611 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; 6612 } 6613 6614 static int sanitize_err(struct bpf_verifier_env *env, 6615 const struct bpf_insn *insn, int reason, 6616 const struct bpf_reg_state *off_reg, 6617 const struct bpf_reg_state *dst_reg) 6618 { 6619 static const char *err = "pointer arithmetic with it prohibited for !root"; 6620 const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub"; 6621 u32 dst = insn->dst_reg, src = insn->src_reg; 6622 6623 switch (reason) { 6624 case REASON_BOUNDS: 6625 verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n", 6626 off_reg == dst_reg ? dst : src, err); 6627 break; 6628 case REASON_TYPE: 6629 verbose(env, "R%d has pointer with unsupported alu operation, %s\n", 6630 off_reg == dst_reg ? src : dst, err); 6631 break; 6632 case REASON_PATHS: 6633 verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n", 6634 dst, op, err); 6635 break; 6636 case REASON_LIMIT: 6637 verbose(env, "R%d tried to %s beyond pointer bounds, %s\n", 6638 dst, op, err); 6639 break; 6640 case REASON_STACK: 6641 verbose(env, "R%d could not be pushed for speculative verification, %s\n", 6642 dst, err); 6643 break; 6644 default: 6645 verbose(env, "verifier internal error: unknown reason (%d)\n", 6646 reason); 6647 break; 6648 } 6649 6650 return -EACCES; 6651 } 6652 6653 /* check that stack access falls within stack limits and that 'reg' doesn't 6654 * have a variable offset. 6655 * 6656 * Variable offset is prohibited for unprivileged mode for simplicity since it 6657 * requires corresponding support in Spectre masking for stack ALU. See also 6658 * retrieve_ptr_limit(). 6659 * 6660 * 6661 * 'off' includes 'reg->off'. 6662 */ 6663 static int check_stack_access_for_ptr_arithmetic( 6664 struct bpf_verifier_env *env, 6665 int regno, 6666 const struct bpf_reg_state *reg, 6667 int off) 6668 { 6669 if (!tnum_is_const(reg->var_off)) { 6670 char tn_buf[48]; 6671 6672 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 6673 verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n", 6674 regno, tn_buf, off); 6675 return -EACCES; 6676 } 6677 6678 if (off >= 0 || off < -MAX_BPF_STACK) { 6679 verbose(env, "R%d stack pointer arithmetic goes out of range, " 6680 "prohibited for !root; off=%d\n", regno, off); 6681 return -EACCES; 6682 } 6683 6684 return 0; 6685 } 6686 6687 static int sanitize_check_bounds(struct bpf_verifier_env *env, 6688 const struct bpf_insn *insn, 6689 const struct bpf_reg_state *dst_reg) 6690 { 6691 u32 dst = insn->dst_reg; 6692 6693 /* For unprivileged we require that resulting offset must be in bounds 6694 * in order to be able to sanitize access later on. 6695 */ 6696 if (env->bypass_spec_v1) 6697 return 0; 6698 6699 switch (dst_reg->type) { 6700 case PTR_TO_STACK: 6701 if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg, 6702 dst_reg->off + dst_reg->var_off.value)) 6703 return -EACCES; 6704 break; 6705 case PTR_TO_MAP_VALUE: 6706 if (check_map_access(env, dst, dst_reg->off, 1, false)) { 6707 verbose(env, "R%d pointer arithmetic of map value goes out of range, " 6708 "prohibited for !root\n", dst); 6709 return -EACCES; 6710 } 6711 break; 6712 default: 6713 break; 6714 } 6715 6716 return 0; 6717 } 6718 6719 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. 6720 * Caller should also handle BPF_MOV case separately. 6721 * If we return -EACCES, caller may want to try again treating pointer as a 6722 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. 6723 */ 6724 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, 6725 struct bpf_insn *insn, 6726 const struct bpf_reg_state *ptr_reg, 6727 const struct bpf_reg_state *off_reg) 6728 { 6729 struct bpf_verifier_state *vstate = env->cur_state; 6730 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 6731 struct bpf_reg_state *regs = state->regs, *dst_reg; 6732 bool known = tnum_is_const(off_reg->var_off); 6733 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, 6734 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; 6735 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, 6736 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; 6737 struct bpf_sanitize_info info = {}; 6738 u8 opcode = BPF_OP(insn->code); 6739 u32 dst = insn->dst_reg; 6740 int ret; 6741 6742 dst_reg = ®s[dst]; 6743 6744 if ((known && (smin_val != smax_val || umin_val != umax_val)) || 6745 smin_val > smax_val || umin_val > umax_val) { 6746 /* Taint dst register if offset had invalid bounds derived from 6747 * e.g. dead branches. 6748 */ 6749 __mark_reg_unknown(env, dst_reg); 6750 return 0; 6751 } 6752 6753 if (BPF_CLASS(insn->code) != BPF_ALU64) { 6754 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ 6755 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 6756 __mark_reg_unknown(env, dst_reg); 6757 return 0; 6758 } 6759 6760 verbose(env, 6761 "R%d 32-bit pointer arithmetic prohibited\n", 6762 dst); 6763 return -EACCES; 6764 } 6765 6766 switch (ptr_reg->type) { 6767 case PTR_TO_MAP_VALUE_OR_NULL: 6768 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", 6769 dst, reg_type_str[ptr_reg->type]); 6770 return -EACCES; 6771 case CONST_PTR_TO_MAP: 6772 /* smin_val represents the known value */ 6773 if (known && smin_val == 0 && opcode == BPF_ADD) 6774 break; 6775 fallthrough; 6776 case PTR_TO_PACKET_END: 6777 case PTR_TO_SOCKET: 6778 case PTR_TO_SOCKET_OR_NULL: 6779 case PTR_TO_SOCK_COMMON: 6780 case PTR_TO_SOCK_COMMON_OR_NULL: 6781 case PTR_TO_TCP_SOCK: 6782 case PTR_TO_TCP_SOCK_OR_NULL: 6783 case PTR_TO_XDP_SOCK: 6784 verbose(env, "R%d pointer arithmetic on %s prohibited\n", 6785 dst, reg_type_str[ptr_reg->type]); 6786 return -EACCES; 6787 default: 6788 break; 6789 } 6790 6791 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. 6792 * The id may be overwritten later if we create a new variable offset. 6793 */ 6794 dst_reg->type = ptr_reg->type; 6795 dst_reg->id = ptr_reg->id; 6796 6797 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || 6798 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) 6799 return -EINVAL; 6800 6801 /* pointer types do not carry 32-bit bounds at the moment. */ 6802 __mark_reg32_unbounded(dst_reg); 6803 6804 if (sanitize_needed(opcode)) { 6805 ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg, 6806 &info, false); 6807 if (ret < 0) 6808 return sanitize_err(env, insn, ret, off_reg, dst_reg); 6809 } 6810 6811 switch (opcode) { 6812 case BPF_ADD: 6813 /* We can take a fixed offset as long as it doesn't overflow 6814 * the s32 'off' field 6815 */ 6816 if (known && (ptr_reg->off + smin_val == 6817 (s64)(s32)(ptr_reg->off + smin_val))) { 6818 /* pointer += K. Accumulate it into fixed offset */ 6819 dst_reg->smin_value = smin_ptr; 6820 dst_reg->smax_value = smax_ptr; 6821 dst_reg->umin_value = umin_ptr; 6822 dst_reg->umax_value = umax_ptr; 6823 dst_reg->var_off = ptr_reg->var_off; 6824 dst_reg->off = ptr_reg->off + smin_val; 6825 dst_reg->raw = ptr_reg->raw; 6826 break; 6827 } 6828 /* A new variable offset is created. Note that off_reg->off 6829 * == 0, since it's a scalar. 6830 * dst_reg gets the pointer type and since some positive 6831 * integer value was added to the pointer, give it a new 'id' 6832 * if it's a PTR_TO_PACKET. 6833 * this creates a new 'base' pointer, off_reg (variable) gets 6834 * added into the variable offset, and we copy the fixed offset 6835 * from ptr_reg. 6836 */ 6837 if (signed_add_overflows(smin_ptr, smin_val) || 6838 signed_add_overflows(smax_ptr, smax_val)) { 6839 dst_reg->smin_value = S64_MIN; 6840 dst_reg->smax_value = S64_MAX; 6841 } else { 6842 dst_reg->smin_value = smin_ptr + smin_val; 6843 dst_reg->smax_value = smax_ptr + smax_val; 6844 } 6845 if (umin_ptr + umin_val < umin_ptr || 6846 umax_ptr + umax_val < umax_ptr) { 6847 dst_reg->umin_value = 0; 6848 dst_reg->umax_value = U64_MAX; 6849 } else { 6850 dst_reg->umin_value = umin_ptr + umin_val; 6851 dst_reg->umax_value = umax_ptr + umax_val; 6852 } 6853 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); 6854 dst_reg->off = ptr_reg->off; 6855 dst_reg->raw = ptr_reg->raw; 6856 if (reg_is_pkt_pointer(ptr_reg)) { 6857 dst_reg->id = ++env->id_gen; 6858 /* something was added to pkt_ptr, set range to zero */ 6859 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); 6860 } 6861 break; 6862 case BPF_SUB: 6863 if (dst_reg == off_reg) { 6864 /* scalar -= pointer. Creates an unknown scalar */ 6865 verbose(env, "R%d tried to subtract pointer from scalar\n", 6866 dst); 6867 return -EACCES; 6868 } 6869 /* We don't allow subtraction from FP, because (according to 6870 * test_verifier.c test "invalid fp arithmetic", JITs might not 6871 * be able to deal with it. 6872 */ 6873 if (ptr_reg->type == PTR_TO_STACK) { 6874 verbose(env, "R%d subtraction from stack pointer prohibited\n", 6875 dst); 6876 return -EACCES; 6877 } 6878 if (known && (ptr_reg->off - smin_val == 6879 (s64)(s32)(ptr_reg->off - smin_val))) { 6880 /* pointer -= K. Subtract it from fixed offset */ 6881 dst_reg->smin_value = smin_ptr; 6882 dst_reg->smax_value = smax_ptr; 6883 dst_reg->umin_value = umin_ptr; 6884 dst_reg->umax_value = umax_ptr; 6885 dst_reg->var_off = ptr_reg->var_off; 6886 dst_reg->id = ptr_reg->id; 6887 dst_reg->off = ptr_reg->off - smin_val; 6888 dst_reg->raw = ptr_reg->raw; 6889 break; 6890 } 6891 /* A new variable offset is created. If the subtrahend is known 6892 * nonnegative, then any reg->range we had before is still good. 6893 */ 6894 if (signed_sub_overflows(smin_ptr, smax_val) || 6895 signed_sub_overflows(smax_ptr, smin_val)) { 6896 /* Overflow possible, we know nothing */ 6897 dst_reg->smin_value = S64_MIN; 6898 dst_reg->smax_value = S64_MAX; 6899 } else { 6900 dst_reg->smin_value = smin_ptr - smax_val; 6901 dst_reg->smax_value = smax_ptr - smin_val; 6902 } 6903 if (umin_ptr < umax_val) { 6904 /* Overflow possible, we know nothing */ 6905 dst_reg->umin_value = 0; 6906 dst_reg->umax_value = U64_MAX; 6907 } else { 6908 /* Cannot overflow (as long as bounds are consistent) */ 6909 dst_reg->umin_value = umin_ptr - umax_val; 6910 dst_reg->umax_value = umax_ptr - umin_val; 6911 } 6912 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); 6913 dst_reg->off = ptr_reg->off; 6914 dst_reg->raw = ptr_reg->raw; 6915 if (reg_is_pkt_pointer(ptr_reg)) { 6916 dst_reg->id = ++env->id_gen; 6917 /* something was added to pkt_ptr, set range to zero */ 6918 if (smin_val < 0) 6919 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); 6920 } 6921 break; 6922 case BPF_AND: 6923 case BPF_OR: 6924 case BPF_XOR: 6925 /* bitwise ops on pointers are troublesome, prohibit. */ 6926 verbose(env, "R%d bitwise operator %s on pointer prohibited\n", 6927 dst, bpf_alu_string[opcode >> 4]); 6928 return -EACCES; 6929 default: 6930 /* other operators (e.g. MUL,LSH) produce non-pointer results */ 6931 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", 6932 dst, bpf_alu_string[opcode >> 4]); 6933 return -EACCES; 6934 } 6935 6936 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) 6937 return -EINVAL; 6938 6939 __update_reg_bounds(dst_reg); 6940 __reg_deduce_bounds(dst_reg); 6941 __reg_bound_offset(dst_reg); 6942 6943 if (sanitize_check_bounds(env, insn, dst_reg) < 0) 6944 return -EACCES; 6945 if (sanitize_needed(opcode)) { 6946 ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg, 6947 &info, true); 6948 if (ret < 0) 6949 return sanitize_err(env, insn, ret, off_reg, dst_reg); 6950 } 6951 6952 return 0; 6953 } 6954 6955 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg, 6956 struct bpf_reg_state *src_reg) 6957 { 6958 s32 smin_val = src_reg->s32_min_value; 6959 s32 smax_val = src_reg->s32_max_value; 6960 u32 umin_val = src_reg->u32_min_value; 6961 u32 umax_val = src_reg->u32_max_value; 6962 6963 if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) || 6964 signed_add32_overflows(dst_reg->s32_max_value, smax_val)) { 6965 dst_reg->s32_min_value = S32_MIN; 6966 dst_reg->s32_max_value = S32_MAX; 6967 } else { 6968 dst_reg->s32_min_value += smin_val; 6969 dst_reg->s32_max_value += smax_val; 6970 } 6971 if (dst_reg->u32_min_value + umin_val < umin_val || 6972 dst_reg->u32_max_value + umax_val < umax_val) { 6973 dst_reg->u32_min_value = 0; 6974 dst_reg->u32_max_value = U32_MAX; 6975 } else { 6976 dst_reg->u32_min_value += umin_val; 6977 dst_reg->u32_max_value += umax_val; 6978 } 6979 } 6980 6981 static void scalar_min_max_add(struct bpf_reg_state *dst_reg, 6982 struct bpf_reg_state *src_reg) 6983 { 6984 s64 smin_val = src_reg->smin_value; 6985 s64 smax_val = src_reg->smax_value; 6986 u64 umin_val = src_reg->umin_value; 6987 u64 umax_val = src_reg->umax_value; 6988 6989 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 6990 signed_add_overflows(dst_reg->smax_value, smax_val)) { 6991 dst_reg->smin_value = S64_MIN; 6992 dst_reg->smax_value = S64_MAX; 6993 } else { 6994 dst_reg->smin_value += smin_val; 6995 dst_reg->smax_value += smax_val; 6996 } 6997 if (dst_reg->umin_value + umin_val < umin_val || 6998 dst_reg->umax_value + umax_val < umax_val) { 6999 dst_reg->umin_value = 0; 7000 dst_reg->umax_value = U64_MAX; 7001 } else { 7002 dst_reg->umin_value += umin_val; 7003 dst_reg->umax_value += umax_val; 7004 } 7005 } 7006 7007 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg, 7008 struct bpf_reg_state *src_reg) 7009 { 7010 s32 smin_val = src_reg->s32_min_value; 7011 s32 smax_val = src_reg->s32_max_value; 7012 u32 umin_val = src_reg->u32_min_value; 7013 u32 umax_val = src_reg->u32_max_value; 7014 7015 if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) || 7016 signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) { 7017 /* Overflow possible, we know nothing */ 7018 dst_reg->s32_min_value = S32_MIN; 7019 dst_reg->s32_max_value = S32_MAX; 7020 } else { 7021 dst_reg->s32_min_value -= smax_val; 7022 dst_reg->s32_max_value -= smin_val; 7023 } 7024 if (dst_reg->u32_min_value < umax_val) { 7025 /* Overflow possible, we know nothing */ 7026 dst_reg->u32_min_value = 0; 7027 dst_reg->u32_max_value = U32_MAX; 7028 } else { 7029 /* Cannot overflow (as long as bounds are consistent) */ 7030 dst_reg->u32_min_value -= umax_val; 7031 dst_reg->u32_max_value -= umin_val; 7032 } 7033 } 7034 7035 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg, 7036 struct bpf_reg_state *src_reg) 7037 { 7038 s64 smin_val = src_reg->smin_value; 7039 s64 smax_val = src_reg->smax_value; 7040 u64 umin_val = src_reg->umin_value; 7041 u64 umax_val = src_reg->umax_value; 7042 7043 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || 7044 signed_sub_overflows(dst_reg->smax_value, smin_val)) { 7045 /* Overflow possible, we know nothing */ 7046 dst_reg->smin_value = S64_MIN; 7047 dst_reg->smax_value = S64_MAX; 7048 } else { 7049 dst_reg->smin_value -= smax_val; 7050 dst_reg->smax_value -= smin_val; 7051 } 7052 if (dst_reg->umin_value < umax_val) { 7053 /* Overflow possible, we know nothing */ 7054 dst_reg->umin_value = 0; 7055 dst_reg->umax_value = U64_MAX; 7056 } else { 7057 /* Cannot overflow (as long as bounds are consistent) */ 7058 dst_reg->umin_value -= umax_val; 7059 dst_reg->umax_value -= umin_val; 7060 } 7061 } 7062 7063 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg, 7064 struct bpf_reg_state *src_reg) 7065 { 7066 s32 smin_val = src_reg->s32_min_value; 7067 u32 umin_val = src_reg->u32_min_value; 7068 u32 umax_val = src_reg->u32_max_value; 7069 7070 if (smin_val < 0 || dst_reg->s32_min_value < 0) { 7071 /* Ain't nobody got time to multiply that sign */ 7072 __mark_reg32_unbounded(dst_reg); 7073 return; 7074 } 7075 /* Both values are positive, so we can work with unsigned and 7076 * copy the result to signed (unless it exceeds S32_MAX). 7077 */ 7078 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) { 7079 /* Potential overflow, we know nothing */ 7080 __mark_reg32_unbounded(dst_reg); 7081 return; 7082 } 7083 dst_reg->u32_min_value *= umin_val; 7084 dst_reg->u32_max_value *= umax_val; 7085 if (dst_reg->u32_max_value > S32_MAX) { 7086 /* Overflow possible, we know nothing */ 7087 dst_reg->s32_min_value = S32_MIN; 7088 dst_reg->s32_max_value = S32_MAX; 7089 } else { 7090 dst_reg->s32_min_value = dst_reg->u32_min_value; 7091 dst_reg->s32_max_value = dst_reg->u32_max_value; 7092 } 7093 } 7094 7095 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg, 7096 struct bpf_reg_state *src_reg) 7097 { 7098 s64 smin_val = src_reg->smin_value; 7099 u64 umin_val = src_reg->umin_value; 7100 u64 umax_val = src_reg->umax_value; 7101 7102 if (smin_val < 0 || dst_reg->smin_value < 0) { 7103 /* Ain't nobody got time to multiply that sign */ 7104 __mark_reg64_unbounded(dst_reg); 7105 return; 7106 } 7107 /* Both values are positive, so we can work with unsigned and 7108 * copy the result to signed (unless it exceeds S64_MAX). 7109 */ 7110 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { 7111 /* Potential overflow, we know nothing */ 7112 __mark_reg64_unbounded(dst_reg); 7113 return; 7114 } 7115 dst_reg->umin_value *= umin_val; 7116 dst_reg->umax_value *= umax_val; 7117 if (dst_reg->umax_value > S64_MAX) { 7118 /* Overflow possible, we know nothing */ 7119 dst_reg->smin_value = S64_MIN; 7120 dst_reg->smax_value = S64_MAX; 7121 } else { 7122 dst_reg->smin_value = dst_reg->umin_value; 7123 dst_reg->smax_value = dst_reg->umax_value; 7124 } 7125 } 7126 7127 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg, 7128 struct bpf_reg_state *src_reg) 7129 { 7130 bool src_known = tnum_subreg_is_const(src_reg->var_off); 7131 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 7132 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 7133 s32 smin_val = src_reg->s32_min_value; 7134 u32 umax_val = src_reg->u32_max_value; 7135 7136 if (src_known && dst_known) { 7137 __mark_reg32_known(dst_reg, var32_off.value); 7138 return; 7139 } 7140 7141 /* We get our minimum from the var_off, since that's inherently 7142 * bitwise. Our maximum is the minimum of the operands' maxima. 7143 */ 7144 dst_reg->u32_min_value = var32_off.value; 7145 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val); 7146 if (dst_reg->s32_min_value < 0 || smin_val < 0) { 7147 /* Lose signed bounds when ANDing negative numbers, 7148 * ain't nobody got time for that. 7149 */ 7150 dst_reg->s32_min_value = S32_MIN; 7151 dst_reg->s32_max_value = S32_MAX; 7152 } else { 7153 /* ANDing two positives gives a positive, so safe to 7154 * cast result into s64. 7155 */ 7156 dst_reg->s32_min_value = dst_reg->u32_min_value; 7157 dst_reg->s32_max_value = dst_reg->u32_max_value; 7158 } 7159 } 7160 7161 static void scalar_min_max_and(struct bpf_reg_state *dst_reg, 7162 struct bpf_reg_state *src_reg) 7163 { 7164 bool src_known = tnum_is_const(src_reg->var_off); 7165 bool dst_known = tnum_is_const(dst_reg->var_off); 7166 s64 smin_val = src_reg->smin_value; 7167 u64 umax_val = src_reg->umax_value; 7168 7169 if (src_known && dst_known) { 7170 __mark_reg_known(dst_reg, dst_reg->var_off.value); 7171 return; 7172 } 7173 7174 /* We get our minimum from the var_off, since that's inherently 7175 * bitwise. Our maximum is the minimum of the operands' maxima. 7176 */ 7177 dst_reg->umin_value = dst_reg->var_off.value; 7178 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); 7179 if (dst_reg->smin_value < 0 || smin_val < 0) { 7180 /* Lose signed bounds when ANDing negative numbers, 7181 * ain't nobody got time for that. 7182 */ 7183 dst_reg->smin_value = S64_MIN; 7184 dst_reg->smax_value = S64_MAX; 7185 } else { 7186 /* ANDing two positives gives a positive, so safe to 7187 * cast result into s64. 7188 */ 7189 dst_reg->smin_value = dst_reg->umin_value; 7190 dst_reg->smax_value = dst_reg->umax_value; 7191 } 7192 /* We may learn something more from the var_off */ 7193 __update_reg_bounds(dst_reg); 7194 } 7195 7196 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg, 7197 struct bpf_reg_state *src_reg) 7198 { 7199 bool src_known = tnum_subreg_is_const(src_reg->var_off); 7200 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 7201 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 7202 s32 smin_val = src_reg->s32_min_value; 7203 u32 umin_val = src_reg->u32_min_value; 7204 7205 if (src_known && dst_known) { 7206 __mark_reg32_known(dst_reg, var32_off.value); 7207 return; 7208 } 7209 7210 /* We get our maximum from the var_off, and our minimum is the 7211 * maximum of the operands' minima 7212 */ 7213 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val); 7214 dst_reg->u32_max_value = var32_off.value | var32_off.mask; 7215 if (dst_reg->s32_min_value < 0 || smin_val < 0) { 7216 /* Lose signed bounds when ORing negative numbers, 7217 * ain't nobody got time for that. 7218 */ 7219 dst_reg->s32_min_value = S32_MIN; 7220 dst_reg->s32_max_value = S32_MAX; 7221 } else { 7222 /* ORing two positives gives a positive, so safe to 7223 * cast result into s64. 7224 */ 7225 dst_reg->s32_min_value = dst_reg->u32_min_value; 7226 dst_reg->s32_max_value = dst_reg->u32_max_value; 7227 } 7228 } 7229 7230 static void scalar_min_max_or(struct bpf_reg_state *dst_reg, 7231 struct bpf_reg_state *src_reg) 7232 { 7233 bool src_known = tnum_is_const(src_reg->var_off); 7234 bool dst_known = tnum_is_const(dst_reg->var_off); 7235 s64 smin_val = src_reg->smin_value; 7236 u64 umin_val = src_reg->umin_value; 7237 7238 if (src_known && dst_known) { 7239 __mark_reg_known(dst_reg, dst_reg->var_off.value); 7240 return; 7241 } 7242 7243 /* We get our maximum from the var_off, and our minimum is the 7244 * maximum of the operands' minima 7245 */ 7246 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); 7247 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; 7248 if (dst_reg->smin_value < 0 || smin_val < 0) { 7249 /* Lose signed bounds when ORing negative numbers, 7250 * ain't nobody got time for that. 7251 */ 7252 dst_reg->smin_value = S64_MIN; 7253 dst_reg->smax_value = S64_MAX; 7254 } else { 7255 /* ORing two positives gives a positive, so safe to 7256 * cast result into s64. 7257 */ 7258 dst_reg->smin_value = dst_reg->umin_value; 7259 dst_reg->smax_value = dst_reg->umax_value; 7260 } 7261 /* We may learn something more from the var_off */ 7262 __update_reg_bounds(dst_reg); 7263 } 7264 7265 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg, 7266 struct bpf_reg_state *src_reg) 7267 { 7268 bool src_known = tnum_subreg_is_const(src_reg->var_off); 7269 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 7270 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 7271 s32 smin_val = src_reg->s32_min_value; 7272 7273 if (src_known && dst_known) { 7274 __mark_reg32_known(dst_reg, var32_off.value); 7275 return; 7276 } 7277 7278 /* We get both minimum and maximum from the var32_off. */ 7279 dst_reg->u32_min_value = var32_off.value; 7280 dst_reg->u32_max_value = var32_off.value | var32_off.mask; 7281 7282 if (dst_reg->s32_min_value >= 0 && smin_val >= 0) { 7283 /* XORing two positive sign numbers gives a positive, 7284 * so safe to cast u32 result into s32. 7285 */ 7286 dst_reg->s32_min_value = dst_reg->u32_min_value; 7287 dst_reg->s32_max_value = dst_reg->u32_max_value; 7288 } else { 7289 dst_reg->s32_min_value = S32_MIN; 7290 dst_reg->s32_max_value = S32_MAX; 7291 } 7292 } 7293 7294 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg, 7295 struct bpf_reg_state *src_reg) 7296 { 7297 bool src_known = tnum_is_const(src_reg->var_off); 7298 bool dst_known = tnum_is_const(dst_reg->var_off); 7299 s64 smin_val = src_reg->smin_value; 7300 7301 if (src_known && dst_known) { 7302 /* dst_reg->var_off.value has been updated earlier */ 7303 __mark_reg_known(dst_reg, dst_reg->var_off.value); 7304 return; 7305 } 7306 7307 /* We get both minimum and maximum from the var_off. */ 7308 dst_reg->umin_value = dst_reg->var_off.value; 7309 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; 7310 7311 if (dst_reg->smin_value >= 0 && smin_val >= 0) { 7312 /* XORing two positive sign numbers gives a positive, 7313 * so safe to cast u64 result into s64. 7314 */ 7315 dst_reg->smin_value = dst_reg->umin_value; 7316 dst_reg->smax_value = dst_reg->umax_value; 7317 } else { 7318 dst_reg->smin_value = S64_MIN; 7319 dst_reg->smax_value = S64_MAX; 7320 } 7321 7322 __update_reg_bounds(dst_reg); 7323 } 7324 7325 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, 7326 u64 umin_val, u64 umax_val) 7327 { 7328 /* We lose all sign bit information (except what we can pick 7329 * up from var_off) 7330 */ 7331 dst_reg->s32_min_value = S32_MIN; 7332 dst_reg->s32_max_value = S32_MAX; 7333 /* If we might shift our top bit out, then we know nothing */ 7334 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) { 7335 dst_reg->u32_min_value = 0; 7336 dst_reg->u32_max_value = U32_MAX; 7337 } else { 7338 dst_reg->u32_min_value <<= umin_val; 7339 dst_reg->u32_max_value <<= umax_val; 7340 } 7341 } 7342 7343 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, 7344 struct bpf_reg_state *src_reg) 7345 { 7346 u32 umax_val = src_reg->u32_max_value; 7347 u32 umin_val = src_reg->u32_min_value; 7348 /* u32 alu operation will zext upper bits */ 7349 struct tnum subreg = tnum_subreg(dst_reg->var_off); 7350 7351 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); 7352 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val)); 7353 /* Not required but being careful mark reg64 bounds as unknown so 7354 * that we are forced to pick them up from tnum and zext later and 7355 * if some path skips this step we are still safe. 7356 */ 7357 __mark_reg64_unbounded(dst_reg); 7358 __update_reg32_bounds(dst_reg); 7359 } 7360 7361 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg, 7362 u64 umin_val, u64 umax_val) 7363 { 7364 /* Special case <<32 because it is a common compiler pattern to sign 7365 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are 7366 * positive we know this shift will also be positive so we can track 7367 * bounds correctly. Otherwise we lose all sign bit information except 7368 * what we can pick up from var_off. Perhaps we can generalize this 7369 * later to shifts of any length. 7370 */ 7371 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0) 7372 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32; 7373 else 7374 dst_reg->smax_value = S64_MAX; 7375 7376 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0) 7377 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32; 7378 else 7379 dst_reg->smin_value = S64_MIN; 7380 7381 /* If we might shift our top bit out, then we know nothing */ 7382 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { 7383 dst_reg->umin_value = 0; 7384 dst_reg->umax_value = U64_MAX; 7385 } else { 7386 dst_reg->umin_value <<= umin_val; 7387 dst_reg->umax_value <<= umax_val; 7388 } 7389 } 7390 7391 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg, 7392 struct bpf_reg_state *src_reg) 7393 { 7394 u64 umax_val = src_reg->umax_value; 7395 u64 umin_val = src_reg->umin_value; 7396 7397 /* scalar64 calc uses 32bit unshifted bounds so must be called first */ 7398 __scalar64_min_max_lsh(dst_reg, umin_val, umax_val); 7399 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); 7400 7401 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); 7402 /* We may learn something more from the var_off */ 7403 __update_reg_bounds(dst_reg); 7404 } 7405 7406 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg, 7407 struct bpf_reg_state *src_reg) 7408 { 7409 struct tnum subreg = tnum_subreg(dst_reg->var_off); 7410 u32 umax_val = src_reg->u32_max_value; 7411 u32 umin_val = src_reg->u32_min_value; 7412 7413 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 7414 * be negative, then either: 7415 * 1) src_reg might be zero, so the sign bit of the result is 7416 * unknown, so we lose our signed bounds 7417 * 2) it's known negative, thus the unsigned bounds capture the 7418 * signed bounds 7419 * 3) the signed bounds cross zero, so they tell us nothing 7420 * about the result 7421 * If the value in dst_reg is known nonnegative, then again the 7422 * unsigned bounds capture the signed bounds. 7423 * Thus, in all cases it suffices to blow away our signed bounds 7424 * and rely on inferring new ones from the unsigned bounds and 7425 * var_off of the result. 7426 */ 7427 dst_reg->s32_min_value = S32_MIN; 7428 dst_reg->s32_max_value = S32_MAX; 7429 7430 dst_reg->var_off = tnum_rshift(subreg, umin_val); 7431 dst_reg->u32_min_value >>= umax_val; 7432 dst_reg->u32_max_value >>= umin_val; 7433 7434 __mark_reg64_unbounded(dst_reg); 7435 __update_reg32_bounds(dst_reg); 7436 } 7437 7438 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg, 7439 struct bpf_reg_state *src_reg) 7440 { 7441 u64 umax_val = src_reg->umax_value; 7442 u64 umin_val = src_reg->umin_value; 7443 7444 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 7445 * be negative, then either: 7446 * 1) src_reg might be zero, so the sign bit of the result is 7447 * unknown, so we lose our signed bounds 7448 * 2) it's known negative, thus the unsigned bounds capture the 7449 * signed bounds 7450 * 3) the signed bounds cross zero, so they tell us nothing 7451 * about the result 7452 * If the value in dst_reg is known nonnegative, then again the 7453 * unsigned bounds capture the signed bounds. 7454 * Thus, in all cases it suffices to blow away our signed bounds 7455 * and rely on inferring new ones from the unsigned bounds and 7456 * var_off of the result. 7457 */ 7458 dst_reg->smin_value = S64_MIN; 7459 dst_reg->smax_value = S64_MAX; 7460 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); 7461 dst_reg->umin_value >>= umax_val; 7462 dst_reg->umax_value >>= umin_val; 7463 7464 /* Its not easy to operate on alu32 bounds here because it depends 7465 * on bits being shifted in. Take easy way out and mark unbounded 7466 * so we can recalculate later from tnum. 7467 */ 7468 __mark_reg32_unbounded(dst_reg); 7469 __update_reg_bounds(dst_reg); 7470 } 7471 7472 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg, 7473 struct bpf_reg_state *src_reg) 7474 { 7475 u64 umin_val = src_reg->u32_min_value; 7476 7477 /* Upon reaching here, src_known is true and 7478 * umax_val is equal to umin_val. 7479 */ 7480 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val); 7481 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val); 7482 7483 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32); 7484 7485 /* blow away the dst_reg umin_value/umax_value and rely on 7486 * dst_reg var_off to refine the result. 7487 */ 7488 dst_reg->u32_min_value = 0; 7489 dst_reg->u32_max_value = U32_MAX; 7490 7491 __mark_reg64_unbounded(dst_reg); 7492 __update_reg32_bounds(dst_reg); 7493 } 7494 7495 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg, 7496 struct bpf_reg_state *src_reg) 7497 { 7498 u64 umin_val = src_reg->umin_value; 7499 7500 /* Upon reaching here, src_known is true and umax_val is equal 7501 * to umin_val. 7502 */ 7503 dst_reg->smin_value >>= umin_val; 7504 dst_reg->smax_value >>= umin_val; 7505 7506 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64); 7507 7508 /* blow away the dst_reg umin_value/umax_value and rely on 7509 * dst_reg var_off to refine the result. 7510 */ 7511 dst_reg->umin_value = 0; 7512 dst_reg->umax_value = U64_MAX; 7513 7514 /* Its not easy to operate on alu32 bounds here because it depends 7515 * on bits being shifted in from upper 32-bits. Take easy way out 7516 * and mark unbounded so we can recalculate later from tnum. 7517 */ 7518 __mark_reg32_unbounded(dst_reg); 7519 __update_reg_bounds(dst_reg); 7520 } 7521 7522 /* WARNING: This function does calculations on 64-bit values, but the actual 7523 * execution may occur on 32-bit values. Therefore, things like bitshifts 7524 * need extra checks in the 32-bit case. 7525 */ 7526 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 7527 struct bpf_insn *insn, 7528 struct bpf_reg_state *dst_reg, 7529 struct bpf_reg_state src_reg) 7530 { 7531 struct bpf_reg_state *regs = cur_regs(env); 7532 u8 opcode = BPF_OP(insn->code); 7533 bool src_known; 7534 s64 smin_val, smax_val; 7535 u64 umin_val, umax_val; 7536 s32 s32_min_val, s32_max_val; 7537 u32 u32_min_val, u32_max_val; 7538 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 7539 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); 7540 int ret; 7541 7542 smin_val = src_reg.smin_value; 7543 smax_val = src_reg.smax_value; 7544 umin_val = src_reg.umin_value; 7545 umax_val = src_reg.umax_value; 7546 7547 s32_min_val = src_reg.s32_min_value; 7548 s32_max_val = src_reg.s32_max_value; 7549 u32_min_val = src_reg.u32_min_value; 7550 u32_max_val = src_reg.u32_max_value; 7551 7552 if (alu32) { 7553 src_known = tnum_subreg_is_const(src_reg.var_off); 7554 if ((src_known && 7555 (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) || 7556 s32_min_val > s32_max_val || u32_min_val > u32_max_val) { 7557 /* Taint dst register if offset had invalid bounds 7558 * derived from e.g. dead branches. 7559 */ 7560 __mark_reg_unknown(env, dst_reg); 7561 return 0; 7562 } 7563 } else { 7564 src_known = tnum_is_const(src_reg.var_off); 7565 if ((src_known && 7566 (smin_val != smax_val || umin_val != umax_val)) || 7567 smin_val > smax_val || umin_val > umax_val) { 7568 /* Taint dst register if offset had invalid bounds 7569 * derived from e.g. dead branches. 7570 */ 7571 __mark_reg_unknown(env, dst_reg); 7572 return 0; 7573 } 7574 } 7575 7576 if (!src_known && 7577 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { 7578 __mark_reg_unknown(env, dst_reg); 7579 return 0; 7580 } 7581 7582 if (sanitize_needed(opcode)) { 7583 ret = sanitize_val_alu(env, insn); 7584 if (ret < 0) 7585 return sanitize_err(env, insn, ret, NULL, NULL); 7586 } 7587 7588 /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops. 7589 * There are two classes of instructions: The first class we track both 7590 * alu32 and alu64 sign/unsigned bounds independently this provides the 7591 * greatest amount of precision when alu operations are mixed with jmp32 7592 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD, 7593 * and BPF_OR. This is possible because these ops have fairly easy to 7594 * understand and calculate behavior in both 32-bit and 64-bit alu ops. 7595 * See alu32 verifier tests for examples. The second class of 7596 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy 7597 * with regards to tracking sign/unsigned bounds because the bits may 7598 * cross subreg boundaries in the alu64 case. When this happens we mark 7599 * the reg unbounded in the subreg bound space and use the resulting 7600 * tnum to calculate an approximation of the sign/unsigned bounds. 7601 */ 7602 switch (opcode) { 7603 case BPF_ADD: 7604 scalar32_min_max_add(dst_reg, &src_reg); 7605 scalar_min_max_add(dst_reg, &src_reg); 7606 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); 7607 break; 7608 case BPF_SUB: 7609 scalar32_min_max_sub(dst_reg, &src_reg); 7610 scalar_min_max_sub(dst_reg, &src_reg); 7611 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); 7612 break; 7613 case BPF_MUL: 7614 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); 7615 scalar32_min_max_mul(dst_reg, &src_reg); 7616 scalar_min_max_mul(dst_reg, &src_reg); 7617 break; 7618 case BPF_AND: 7619 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); 7620 scalar32_min_max_and(dst_reg, &src_reg); 7621 scalar_min_max_and(dst_reg, &src_reg); 7622 break; 7623 case BPF_OR: 7624 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); 7625 scalar32_min_max_or(dst_reg, &src_reg); 7626 scalar_min_max_or(dst_reg, &src_reg); 7627 break; 7628 case BPF_XOR: 7629 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off); 7630 scalar32_min_max_xor(dst_reg, &src_reg); 7631 scalar_min_max_xor(dst_reg, &src_reg); 7632 break; 7633 case BPF_LSH: 7634 if (umax_val >= insn_bitness) { 7635 /* Shifts greater than 31 or 63 are undefined. 7636 * This includes shifts by a negative number. 7637 */ 7638 mark_reg_unknown(env, regs, insn->dst_reg); 7639 break; 7640 } 7641 if (alu32) 7642 scalar32_min_max_lsh(dst_reg, &src_reg); 7643 else 7644 scalar_min_max_lsh(dst_reg, &src_reg); 7645 break; 7646 case BPF_RSH: 7647 if (umax_val >= insn_bitness) { 7648 /* Shifts greater than 31 or 63 are undefined. 7649 * This includes shifts by a negative number. 7650 */ 7651 mark_reg_unknown(env, regs, insn->dst_reg); 7652 break; 7653 } 7654 if (alu32) 7655 scalar32_min_max_rsh(dst_reg, &src_reg); 7656 else 7657 scalar_min_max_rsh(dst_reg, &src_reg); 7658 break; 7659 case BPF_ARSH: 7660 if (umax_val >= insn_bitness) { 7661 /* Shifts greater than 31 or 63 are undefined. 7662 * This includes shifts by a negative number. 7663 */ 7664 mark_reg_unknown(env, regs, insn->dst_reg); 7665 break; 7666 } 7667 if (alu32) 7668 scalar32_min_max_arsh(dst_reg, &src_reg); 7669 else 7670 scalar_min_max_arsh(dst_reg, &src_reg); 7671 break; 7672 default: 7673 mark_reg_unknown(env, regs, insn->dst_reg); 7674 break; 7675 } 7676 7677 /* ALU32 ops are zero extended into 64bit register */ 7678 if (alu32) 7679 zext_32_to_64(dst_reg); 7680 7681 __update_reg_bounds(dst_reg); 7682 __reg_deduce_bounds(dst_reg); 7683 __reg_bound_offset(dst_reg); 7684 return 0; 7685 } 7686 7687 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max 7688 * and var_off. 7689 */ 7690 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, 7691 struct bpf_insn *insn) 7692 { 7693 struct bpf_verifier_state *vstate = env->cur_state; 7694 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 7695 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; 7696 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; 7697 u8 opcode = BPF_OP(insn->code); 7698 int err; 7699 7700 dst_reg = ®s[insn->dst_reg]; 7701 src_reg = NULL; 7702 if (dst_reg->type != SCALAR_VALUE) 7703 ptr_reg = dst_reg; 7704 else 7705 /* Make sure ID is cleared otherwise dst_reg min/max could be 7706 * incorrectly propagated into other registers by find_equal_scalars() 7707 */ 7708 dst_reg->id = 0; 7709 if (BPF_SRC(insn->code) == BPF_X) { 7710 src_reg = ®s[insn->src_reg]; 7711 if (src_reg->type != SCALAR_VALUE) { 7712 if (dst_reg->type != SCALAR_VALUE) { 7713 /* Combining two pointers by any ALU op yields 7714 * an arbitrary scalar. Disallow all math except 7715 * pointer subtraction 7716 */ 7717 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 7718 mark_reg_unknown(env, regs, insn->dst_reg); 7719 return 0; 7720 } 7721 verbose(env, "R%d pointer %s pointer prohibited\n", 7722 insn->dst_reg, 7723 bpf_alu_string[opcode >> 4]); 7724 return -EACCES; 7725 } else { 7726 /* scalar += pointer 7727 * This is legal, but we have to reverse our 7728 * src/dest handling in computing the range 7729 */ 7730 err = mark_chain_precision(env, insn->dst_reg); 7731 if (err) 7732 return err; 7733 return adjust_ptr_min_max_vals(env, insn, 7734 src_reg, dst_reg); 7735 } 7736 } else if (ptr_reg) { 7737 /* pointer += scalar */ 7738 err = mark_chain_precision(env, insn->src_reg); 7739 if (err) 7740 return err; 7741 return adjust_ptr_min_max_vals(env, insn, 7742 dst_reg, src_reg); 7743 } 7744 } else { 7745 /* Pretend the src is a reg with a known value, since we only 7746 * need to be able to read from this state. 7747 */ 7748 off_reg.type = SCALAR_VALUE; 7749 __mark_reg_known(&off_reg, insn->imm); 7750 src_reg = &off_reg; 7751 if (ptr_reg) /* pointer += K */ 7752 return adjust_ptr_min_max_vals(env, insn, 7753 ptr_reg, src_reg); 7754 } 7755 7756 /* Got here implies adding two SCALAR_VALUEs */ 7757 if (WARN_ON_ONCE(ptr_reg)) { 7758 print_verifier_state(env, state); 7759 verbose(env, "verifier internal error: unexpected ptr_reg\n"); 7760 return -EINVAL; 7761 } 7762 if (WARN_ON(!src_reg)) { 7763 print_verifier_state(env, state); 7764 verbose(env, "verifier internal error: no src_reg\n"); 7765 return -EINVAL; 7766 } 7767 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); 7768 } 7769 7770 /* check validity of 32-bit and 64-bit arithmetic operations */ 7771 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) 7772 { 7773 struct bpf_reg_state *regs = cur_regs(env); 7774 u8 opcode = BPF_OP(insn->code); 7775 int err; 7776 7777 if (opcode == BPF_END || opcode == BPF_NEG) { 7778 if (opcode == BPF_NEG) { 7779 if (BPF_SRC(insn->code) != 0 || 7780 insn->src_reg != BPF_REG_0 || 7781 insn->off != 0 || insn->imm != 0) { 7782 verbose(env, "BPF_NEG uses reserved fields\n"); 7783 return -EINVAL; 7784 } 7785 } else { 7786 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 7787 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || 7788 BPF_CLASS(insn->code) == BPF_ALU64) { 7789 verbose(env, "BPF_END uses reserved fields\n"); 7790 return -EINVAL; 7791 } 7792 } 7793 7794 /* check src operand */ 7795 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 7796 if (err) 7797 return err; 7798 7799 if (is_pointer_value(env, insn->dst_reg)) { 7800 verbose(env, "R%d pointer arithmetic prohibited\n", 7801 insn->dst_reg); 7802 return -EACCES; 7803 } 7804 7805 /* check dest operand */ 7806 err = check_reg_arg(env, insn->dst_reg, DST_OP); 7807 if (err) 7808 return err; 7809 7810 } else if (opcode == BPF_MOV) { 7811 7812 if (BPF_SRC(insn->code) == BPF_X) { 7813 if (insn->imm != 0 || insn->off != 0) { 7814 verbose(env, "BPF_MOV uses reserved fields\n"); 7815 return -EINVAL; 7816 } 7817 7818 /* check src operand */ 7819 err = check_reg_arg(env, insn->src_reg, SRC_OP); 7820 if (err) 7821 return err; 7822 } else { 7823 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 7824 verbose(env, "BPF_MOV uses reserved fields\n"); 7825 return -EINVAL; 7826 } 7827 } 7828 7829 /* check dest operand, mark as required later */ 7830 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 7831 if (err) 7832 return err; 7833 7834 if (BPF_SRC(insn->code) == BPF_X) { 7835 struct bpf_reg_state *src_reg = regs + insn->src_reg; 7836 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; 7837 7838 if (BPF_CLASS(insn->code) == BPF_ALU64) { 7839 /* case: R1 = R2 7840 * copy register state to dest reg 7841 */ 7842 if (src_reg->type == SCALAR_VALUE && !src_reg->id) 7843 /* Assign src and dst registers the same ID 7844 * that will be used by find_equal_scalars() 7845 * to propagate min/max range. 7846 */ 7847 src_reg->id = ++env->id_gen; 7848 *dst_reg = *src_reg; 7849 dst_reg->live |= REG_LIVE_WRITTEN; 7850 dst_reg->subreg_def = DEF_NOT_SUBREG; 7851 } else { 7852 /* R1 = (u32) R2 */ 7853 if (is_pointer_value(env, insn->src_reg)) { 7854 verbose(env, 7855 "R%d partial copy of pointer\n", 7856 insn->src_reg); 7857 return -EACCES; 7858 } else if (src_reg->type == SCALAR_VALUE) { 7859 *dst_reg = *src_reg; 7860 /* Make sure ID is cleared otherwise 7861 * dst_reg min/max could be incorrectly 7862 * propagated into src_reg by find_equal_scalars() 7863 */ 7864 dst_reg->id = 0; 7865 dst_reg->live |= REG_LIVE_WRITTEN; 7866 dst_reg->subreg_def = env->insn_idx + 1; 7867 } else { 7868 mark_reg_unknown(env, regs, 7869 insn->dst_reg); 7870 } 7871 zext_32_to_64(dst_reg); 7872 } 7873 } else { 7874 /* case: R = imm 7875 * remember the value we stored into this reg 7876 */ 7877 /* clear any state __mark_reg_known doesn't set */ 7878 mark_reg_unknown(env, regs, insn->dst_reg); 7879 regs[insn->dst_reg].type = SCALAR_VALUE; 7880 if (BPF_CLASS(insn->code) == BPF_ALU64) { 7881 __mark_reg_known(regs + insn->dst_reg, 7882 insn->imm); 7883 } else { 7884 __mark_reg_known(regs + insn->dst_reg, 7885 (u32)insn->imm); 7886 } 7887 } 7888 7889 } else if (opcode > BPF_END) { 7890 verbose(env, "invalid BPF_ALU opcode %x\n", opcode); 7891 return -EINVAL; 7892 7893 } else { /* all other ALU ops: and, sub, xor, add, ... */ 7894 7895 if (BPF_SRC(insn->code) == BPF_X) { 7896 if (insn->imm != 0 || insn->off != 0) { 7897 verbose(env, "BPF_ALU uses reserved fields\n"); 7898 return -EINVAL; 7899 } 7900 /* check src1 operand */ 7901 err = check_reg_arg(env, insn->src_reg, SRC_OP); 7902 if (err) 7903 return err; 7904 } else { 7905 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 7906 verbose(env, "BPF_ALU uses reserved fields\n"); 7907 return -EINVAL; 7908 } 7909 } 7910 7911 /* check src2 operand */ 7912 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 7913 if (err) 7914 return err; 7915 7916 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 7917 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 7918 verbose(env, "div by zero\n"); 7919 return -EINVAL; 7920 } 7921 7922 if ((opcode == BPF_LSH || opcode == BPF_RSH || 7923 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 7924 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 7925 7926 if (insn->imm < 0 || insn->imm >= size) { 7927 verbose(env, "invalid shift %d\n", insn->imm); 7928 return -EINVAL; 7929 } 7930 } 7931 7932 /* check dest operand */ 7933 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 7934 if (err) 7935 return err; 7936 7937 return adjust_reg_min_max_vals(env, insn); 7938 } 7939 7940 return 0; 7941 } 7942 7943 static void __find_good_pkt_pointers(struct bpf_func_state *state, 7944 struct bpf_reg_state *dst_reg, 7945 enum bpf_reg_type type, int new_range) 7946 { 7947 struct bpf_reg_state *reg; 7948 int i; 7949 7950 for (i = 0; i < MAX_BPF_REG; i++) { 7951 reg = &state->regs[i]; 7952 if (reg->type == type && reg->id == dst_reg->id) 7953 /* keep the maximum range already checked */ 7954 reg->range = max(reg->range, new_range); 7955 } 7956 7957 bpf_for_each_spilled_reg(i, state, reg) { 7958 if (!reg) 7959 continue; 7960 if (reg->type == type && reg->id == dst_reg->id) 7961 reg->range = max(reg->range, new_range); 7962 } 7963 } 7964 7965 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, 7966 struct bpf_reg_state *dst_reg, 7967 enum bpf_reg_type type, 7968 bool range_right_open) 7969 { 7970 int new_range, i; 7971 7972 if (dst_reg->off < 0 || 7973 (dst_reg->off == 0 && range_right_open)) 7974 /* This doesn't give us any range */ 7975 return; 7976 7977 if (dst_reg->umax_value > MAX_PACKET_OFF || 7978 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) 7979 /* Risk of overflow. For instance, ptr + (1<<63) may be less 7980 * than pkt_end, but that's because it's also less than pkt. 7981 */ 7982 return; 7983 7984 new_range = dst_reg->off; 7985 if (range_right_open) 7986 new_range--; 7987 7988 /* Examples for register markings: 7989 * 7990 * pkt_data in dst register: 7991 * 7992 * r2 = r3; 7993 * r2 += 8; 7994 * if (r2 > pkt_end) goto <handle exception> 7995 * <access okay> 7996 * 7997 * r2 = r3; 7998 * r2 += 8; 7999 * if (r2 < pkt_end) goto <access okay> 8000 * <handle exception> 8001 * 8002 * Where: 8003 * r2 == dst_reg, pkt_end == src_reg 8004 * r2=pkt(id=n,off=8,r=0) 8005 * r3=pkt(id=n,off=0,r=0) 8006 * 8007 * pkt_data in src register: 8008 * 8009 * r2 = r3; 8010 * r2 += 8; 8011 * if (pkt_end >= r2) goto <access okay> 8012 * <handle exception> 8013 * 8014 * r2 = r3; 8015 * r2 += 8; 8016 * if (pkt_end <= r2) goto <handle exception> 8017 * <access okay> 8018 * 8019 * Where: 8020 * pkt_end == dst_reg, r2 == src_reg 8021 * r2=pkt(id=n,off=8,r=0) 8022 * r3=pkt(id=n,off=0,r=0) 8023 * 8024 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 8025 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) 8026 * and [r3, r3 + 8-1) respectively is safe to access depending on 8027 * the check. 8028 */ 8029 8030 /* If our ids match, then we must have the same max_value. And we 8031 * don't care about the other reg's fixed offset, since if it's too big 8032 * the range won't allow anything. 8033 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. 8034 */ 8035 for (i = 0; i <= vstate->curframe; i++) 8036 __find_good_pkt_pointers(vstate->frame[i], dst_reg, type, 8037 new_range); 8038 } 8039 8040 static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode) 8041 { 8042 struct tnum subreg = tnum_subreg(reg->var_off); 8043 s32 sval = (s32)val; 8044 8045 switch (opcode) { 8046 case BPF_JEQ: 8047 if (tnum_is_const(subreg)) 8048 return !!tnum_equals_const(subreg, val); 8049 break; 8050 case BPF_JNE: 8051 if (tnum_is_const(subreg)) 8052 return !tnum_equals_const(subreg, val); 8053 break; 8054 case BPF_JSET: 8055 if ((~subreg.mask & subreg.value) & val) 8056 return 1; 8057 if (!((subreg.mask | subreg.value) & val)) 8058 return 0; 8059 break; 8060 case BPF_JGT: 8061 if (reg->u32_min_value > val) 8062 return 1; 8063 else if (reg->u32_max_value <= val) 8064 return 0; 8065 break; 8066 case BPF_JSGT: 8067 if (reg->s32_min_value > sval) 8068 return 1; 8069 else if (reg->s32_max_value <= sval) 8070 return 0; 8071 break; 8072 case BPF_JLT: 8073 if (reg->u32_max_value < val) 8074 return 1; 8075 else if (reg->u32_min_value >= val) 8076 return 0; 8077 break; 8078 case BPF_JSLT: 8079 if (reg->s32_max_value < sval) 8080 return 1; 8081 else if (reg->s32_min_value >= sval) 8082 return 0; 8083 break; 8084 case BPF_JGE: 8085 if (reg->u32_min_value >= val) 8086 return 1; 8087 else if (reg->u32_max_value < val) 8088 return 0; 8089 break; 8090 case BPF_JSGE: 8091 if (reg->s32_min_value >= sval) 8092 return 1; 8093 else if (reg->s32_max_value < sval) 8094 return 0; 8095 break; 8096 case BPF_JLE: 8097 if (reg->u32_max_value <= val) 8098 return 1; 8099 else if (reg->u32_min_value > val) 8100 return 0; 8101 break; 8102 case BPF_JSLE: 8103 if (reg->s32_max_value <= sval) 8104 return 1; 8105 else if (reg->s32_min_value > sval) 8106 return 0; 8107 break; 8108 } 8109 8110 return -1; 8111 } 8112 8113 8114 static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) 8115 { 8116 s64 sval = (s64)val; 8117 8118 switch (opcode) { 8119 case BPF_JEQ: 8120 if (tnum_is_const(reg->var_off)) 8121 return !!tnum_equals_const(reg->var_off, val); 8122 break; 8123 case BPF_JNE: 8124 if (tnum_is_const(reg->var_off)) 8125 return !tnum_equals_const(reg->var_off, val); 8126 break; 8127 case BPF_JSET: 8128 if ((~reg->var_off.mask & reg->var_off.value) & val) 8129 return 1; 8130 if (!((reg->var_off.mask | reg->var_off.value) & val)) 8131 return 0; 8132 break; 8133 case BPF_JGT: 8134 if (reg->umin_value > val) 8135 return 1; 8136 else if (reg->umax_value <= val) 8137 return 0; 8138 break; 8139 case BPF_JSGT: 8140 if (reg->smin_value > sval) 8141 return 1; 8142 else if (reg->smax_value <= sval) 8143 return 0; 8144 break; 8145 case BPF_JLT: 8146 if (reg->umax_value < val) 8147 return 1; 8148 else if (reg->umin_value >= val) 8149 return 0; 8150 break; 8151 case BPF_JSLT: 8152 if (reg->smax_value < sval) 8153 return 1; 8154 else if (reg->smin_value >= sval) 8155 return 0; 8156 break; 8157 case BPF_JGE: 8158 if (reg->umin_value >= val) 8159 return 1; 8160 else if (reg->umax_value < val) 8161 return 0; 8162 break; 8163 case BPF_JSGE: 8164 if (reg->smin_value >= sval) 8165 return 1; 8166 else if (reg->smax_value < sval) 8167 return 0; 8168 break; 8169 case BPF_JLE: 8170 if (reg->umax_value <= val) 8171 return 1; 8172 else if (reg->umin_value > val) 8173 return 0; 8174 break; 8175 case BPF_JSLE: 8176 if (reg->smax_value <= sval) 8177 return 1; 8178 else if (reg->smin_value > sval) 8179 return 0; 8180 break; 8181 } 8182 8183 return -1; 8184 } 8185 8186 /* compute branch direction of the expression "if (reg opcode val) goto target;" 8187 * and return: 8188 * 1 - branch will be taken and "goto target" will be executed 8189 * 0 - branch will not be taken and fall-through to next insn 8190 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value 8191 * range [0,10] 8192 */ 8193 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, 8194 bool is_jmp32) 8195 { 8196 if (__is_pointer_value(false, reg)) { 8197 if (!reg_type_not_null(reg->type)) 8198 return -1; 8199 8200 /* If pointer is valid tests against zero will fail so we can 8201 * use this to direct branch taken. 8202 */ 8203 if (val != 0) 8204 return -1; 8205 8206 switch (opcode) { 8207 case BPF_JEQ: 8208 return 0; 8209 case BPF_JNE: 8210 return 1; 8211 default: 8212 return -1; 8213 } 8214 } 8215 8216 if (is_jmp32) 8217 return is_branch32_taken(reg, val, opcode); 8218 return is_branch64_taken(reg, val, opcode); 8219 } 8220 8221 static int flip_opcode(u32 opcode) 8222 { 8223 /* How can we transform "a <op> b" into "b <op> a"? */ 8224 static const u8 opcode_flip[16] = { 8225 /* these stay the same */ 8226 [BPF_JEQ >> 4] = BPF_JEQ, 8227 [BPF_JNE >> 4] = BPF_JNE, 8228 [BPF_JSET >> 4] = BPF_JSET, 8229 /* these swap "lesser" and "greater" (L and G in the opcodes) */ 8230 [BPF_JGE >> 4] = BPF_JLE, 8231 [BPF_JGT >> 4] = BPF_JLT, 8232 [BPF_JLE >> 4] = BPF_JGE, 8233 [BPF_JLT >> 4] = BPF_JGT, 8234 [BPF_JSGE >> 4] = BPF_JSLE, 8235 [BPF_JSGT >> 4] = BPF_JSLT, 8236 [BPF_JSLE >> 4] = BPF_JSGE, 8237 [BPF_JSLT >> 4] = BPF_JSGT 8238 }; 8239 return opcode_flip[opcode >> 4]; 8240 } 8241 8242 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg, 8243 struct bpf_reg_state *src_reg, 8244 u8 opcode) 8245 { 8246 struct bpf_reg_state *pkt; 8247 8248 if (src_reg->type == PTR_TO_PACKET_END) { 8249 pkt = dst_reg; 8250 } else if (dst_reg->type == PTR_TO_PACKET_END) { 8251 pkt = src_reg; 8252 opcode = flip_opcode(opcode); 8253 } else { 8254 return -1; 8255 } 8256 8257 if (pkt->range >= 0) 8258 return -1; 8259 8260 switch (opcode) { 8261 case BPF_JLE: 8262 /* pkt <= pkt_end */ 8263 fallthrough; 8264 case BPF_JGT: 8265 /* pkt > pkt_end */ 8266 if (pkt->range == BEYOND_PKT_END) 8267 /* pkt has at last one extra byte beyond pkt_end */ 8268 return opcode == BPF_JGT; 8269 break; 8270 case BPF_JLT: 8271 /* pkt < pkt_end */ 8272 fallthrough; 8273 case BPF_JGE: 8274 /* pkt >= pkt_end */ 8275 if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END) 8276 return opcode == BPF_JGE; 8277 break; 8278 } 8279 return -1; 8280 } 8281 8282 /* Adjusts the register min/max values in the case that the dst_reg is the 8283 * variable register that we are working on, and src_reg is a constant or we're 8284 * simply doing a BPF_K check. 8285 * In JEQ/JNE cases we also adjust the var_off values. 8286 */ 8287 static void reg_set_min_max(struct bpf_reg_state *true_reg, 8288 struct bpf_reg_state *false_reg, 8289 u64 val, u32 val32, 8290 u8 opcode, bool is_jmp32) 8291 { 8292 struct tnum false_32off = tnum_subreg(false_reg->var_off); 8293 struct tnum false_64off = false_reg->var_off; 8294 struct tnum true_32off = tnum_subreg(true_reg->var_off); 8295 struct tnum true_64off = true_reg->var_off; 8296 s64 sval = (s64)val; 8297 s32 sval32 = (s32)val32; 8298 8299 /* If the dst_reg is a pointer, we can't learn anything about its 8300 * variable offset from the compare (unless src_reg were a pointer into 8301 * the same object, but we don't bother with that. 8302 * Since false_reg and true_reg have the same type by construction, we 8303 * only need to check one of them for pointerness. 8304 */ 8305 if (__is_pointer_value(false, false_reg)) 8306 return; 8307 8308 switch (opcode) { 8309 case BPF_JEQ: 8310 case BPF_JNE: 8311 { 8312 struct bpf_reg_state *reg = 8313 opcode == BPF_JEQ ? true_reg : false_reg; 8314 8315 /* JEQ/JNE comparison doesn't change the register equivalence. 8316 * r1 = r2; 8317 * if (r1 == 42) goto label; 8318 * ... 8319 * label: // here both r1 and r2 are known to be 42. 8320 * 8321 * Hence when marking register as known preserve it's ID. 8322 */ 8323 if (is_jmp32) 8324 __mark_reg32_known(reg, val32); 8325 else 8326 ___mark_reg_known(reg, val); 8327 break; 8328 } 8329 case BPF_JSET: 8330 if (is_jmp32) { 8331 false_32off = tnum_and(false_32off, tnum_const(~val32)); 8332 if (is_power_of_2(val32)) 8333 true_32off = tnum_or(true_32off, 8334 tnum_const(val32)); 8335 } else { 8336 false_64off = tnum_and(false_64off, tnum_const(~val)); 8337 if (is_power_of_2(val)) 8338 true_64off = tnum_or(true_64off, 8339 tnum_const(val)); 8340 } 8341 break; 8342 case BPF_JGE: 8343 case BPF_JGT: 8344 { 8345 if (is_jmp32) { 8346 u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1; 8347 u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32; 8348 8349 false_reg->u32_max_value = min(false_reg->u32_max_value, 8350 false_umax); 8351 true_reg->u32_min_value = max(true_reg->u32_min_value, 8352 true_umin); 8353 } else { 8354 u64 false_umax = opcode == BPF_JGT ? val : val - 1; 8355 u64 true_umin = opcode == BPF_JGT ? val + 1 : val; 8356 8357 false_reg->umax_value = min(false_reg->umax_value, false_umax); 8358 true_reg->umin_value = max(true_reg->umin_value, true_umin); 8359 } 8360 break; 8361 } 8362 case BPF_JSGE: 8363 case BPF_JSGT: 8364 { 8365 if (is_jmp32) { 8366 s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1; 8367 s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32; 8368 8369 false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax); 8370 true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin); 8371 } else { 8372 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; 8373 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval; 8374 8375 false_reg->smax_value = min(false_reg->smax_value, false_smax); 8376 true_reg->smin_value = max(true_reg->smin_value, true_smin); 8377 } 8378 break; 8379 } 8380 case BPF_JLE: 8381 case BPF_JLT: 8382 { 8383 if (is_jmp32) { 8384 u32 false_umin = opcode == BPF_JLT ? val32 : val32 + 1; 8385 u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32; 8386 8387 false_reg->u32_min_value = max(false_reg->u32_min_value, 8388 false_umin); 8389 true_reg->u32_max_value = min(true_reg->u32_max_value, 8390 true_umax); 8391 } else { 8392 u64 false_umin = opcode == BPF_JLT ? val : val + 1; 8393 u64 true_umax = opcode == BPF_JLT ? val - 1 : val; 8394 8395 false_reg->umin_value = max(false_reg->umin_value, false_umin); 8396 true_reg->umax_value = min(true_reg->umax_value, true_umax); 8397 } 8398 break; 8399 } 8400 case BPF_JSLE: 8401 case BPF_JSLT: 8402 { 8403 if (is_jmp32) { 8404 s32 false_smin = opcode == BPF_JSLT ? sval32 : sval32 + 1; 8405 s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32; 8406 8407 false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin); 8408 true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax); 8409 } else { 8410 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; 8411 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval; 8412 8413 false_reg->smin_value = max(false_reg->smin_value, false_smin); 8414 true_reg->smax_value = min(true_reg->smax_value, true_smax); 8415 } 8416 break; 8417 } 8418 default: 8419 return; 8420 } 8421 8422 if (is_jmp32) { 8423 false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off), 8424 tnum_subreg(false_32off)); 8425 true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off), 8426 tnum_subreg(true_32off)); 8427 __reg_combine_32_into_64(false_reg); 8428 __reg_combine_32_into_64(true_reg); 8429 } else { 8430 false_reg->var_off = false_64off; 8431 true_reg->var_off = true_64off; 8432 __reg_combine_64_into_32(false_reg); 8433 __reg_combine_64_into_32(true_reg); 8434 } 8435 } 8436 8437 /* Same as above, but for the case that dst_reg holds a constant and src_reg is 8438 * the variable reg. 8439 */ 8440 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, 8441 struct bpf_reg_state *false_reg, 8442 u64 val, u32 val32, 8443 u8 opcode, bool is_jmp32) 8444 { 8445 opcode = flip_opcode(opcode); 8446 /* This uses zero as "not present in table"; luckily the zero opcode, 8447 * BPF_JA, can't get here. 8448 */ 8449 if (opcode) 8450 reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32); 8451 } 8452 8453 /* Regs are known to be equal, so intersect their min/max/var_off */ 8454 static void __reg_combine_min_max(struct bpf_reg_state *src_reg, 8455 struct bpf_reg_state *dst_reg) 8456 { 8457 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, 8458 dst_reg->umin_value); 8459 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, 8460 dst_reg->umax_value); 8461 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, 8462 dst_reg->smin_value); 8463 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, 8464 dst_reg->smax_value); 8465 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, 8466 dst_reg->var_off); 8467 /* We might have learned new bounds from the var_off. */ 8468 __update_reg_bounds(src_reg); 8469 __update_reg_bounds(dst_reg); 8470 /* We might have learned something about the sign bit. */ 8471 __reg_deduce_bounds(src_reg); 8472 __reg_deduce_bounds(dst_reg); 8473 /* We might have learned some bits from the bounds. */ 8474 __reg_bound_offset(src_reg); 8475 __reg_bound_offset(dst_reg); 8476 /* Intersecting with the old var_off might have improved our bounds 8477 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 8478 * then new var_off is (0; 0x7f...fc) which improves our umax. 8479 */ 8480 __update_reg_bounds(src_reg); 8481 __update_reg_bounds(dst_reg); 8482 } 8483 8484 static void reg_combine_min_max(struct bpf_reg_state *true_src, 8485 struct bpf_reg_state *true_dst, 8486 struct bpf_reg_state *false_src, 8487 struct bpf_reg_state *false_dst, 8488 u8 opcode) 8489 { 8490 switch (opcode) { 8491 case BPF_JEQ: 8492 __reg_combine_min_max(true_src, true_dst); 8493 break; 8494 case BPF_JNE: 8495 __reg_combine_min_max(false_src, false_dst); 8496 break; 8497 } 8498 } 8499 8500 static void mark_ptr_or_null_reg(struct bpf_func_state *state, 8501 struct bpf_reg_state *reg, u32 id, 8502 bool is_null) 8503 { 8504 if (reg_type_may_be_null(reg->type) && reg->id == id && 8505 !WARN_ON_ONCE(!reg->id)) { 8506 /* Old offset (both fixed and variable parts) should 8507 * have been known-zero, because we don't allow pointer 8508 * arithmetic on pointers that might be NULL. 8509 */ 8510 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || 8511 !tnum_equals_const(reg->var_off, 0) || 8512 reg->off)) { 8513 __mark_reg_known_zero(reg); 8514 reg->off = 0; 8515 } 8516 if (is_null) { 8517 reg->type = SCALAR_VALUE; 8518 /* We don't need id and ref_obj_id from this point 8519 * onwards anymore, thus we should better reset it, 8520 * so that state pruning has chances to take effect. 8521 */ 8522 reg->id = 0; 8523 reg->ref_obj_id = 0; 8524 8525 return; 8526 } 8527 8528 mark_ptr_not_null_reg(reg); 8529 8530 if (!reg_may_point_to_spin_lock(reg)) { 8531 /* For not-NULL ptr, reg->ref_obj_id will be reset 8532 * in release_reg_references(). 8533 * 8534 * reg->id is still used by spin_lock ptr. Other 8535 * than spin_lock ptr type, reg->id can be reset. 8536 */ 8537 reg->id = 0; 8538 } 8539 } 8540 } 8541 8542 static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id, 8543 bool is_null) 8544 { 8545 struct bpf_reg_state *reg; 8546 int i; 8547 8548 for (i = 0; i < MAX_BPF_REG; i++) 8549 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null); 8550 8551 bpf_for_each_spilled_reg(i, state, reg) { 8552 if (!reg) 8553 continue; 8554 mark_ptr_or_null_reg(state, reg, id, is_null); 8555 } 8556 } 8557 8558 /* The logic is similar to find_good_pkt_pointers(), both could eventually 8559 * be folded together at some point. 8560 */ 8561 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, 8562 bool is_null) 8563 { 8564 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 8565 struct bpf_reg_state *regs = state->regs; 8566 u32 ref_obj_id = regs[regno].ref_obj_id; 8567 u32 id = regs[regno].id; 8568 int i; 8569 8570 if (ref_obj_id && ref_obj_id == id && is_null) 8571 /* regs[regno] is in the " == NULL" branch. 8572 * No one could have freed the reference state before 8573 * doing the NULL check. 8574 */ 8575 WARN_ON_ONCE(release_reference_state(state, id)); 8576 8577 for (i = 0; i <= vstate->curframe; i++) 8578 __mark_ptr_or_null_regs(vstate->frame[i], id, is_null); 8579 } 8580 8581 static bool try_match_pkt_pointers(const struct bpf_insn *insn, 8582 struct bpf_reg_state *dst_reg, 8583 struct bpf_reg_state *src_reg, 8584 struct bpf_verifier_state *this_branch, 8585 struct bpf_verifier_state *other_branch) 8586 { 8587 if (BPF_SRC(insn->code) != BPF_X) 8588 return false; 8589 8590 /* Pointers are always 64-bit. */ 8591 if (BPF_CLASS(insn->code) == BPF_JMP32) 8592 return false; 8593 8594 switch (BPF_OP(insn->code)) { 8595 case BPF_JGT: 8596 if ((dst_reg->type == PTR_TO_PACKET && 8597 src_reg->type == PTR_TO_PACKET_END) || 8598 (dst_reg->type == PTR_TO_PACKET_META && 8599 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 8600 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ 8601 find_good_pkt_pointers(this_branch, dst_reg, 8602 dst_reg->type, false); 8603 mark_pkt_end(other_branch, insn->dst_reg, true); 8604 } else if ((dst_reg->type == PTR_TO_PACKET_END && 8605 src_reg->type == PTR_TO_PACKET) || 8606 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 8607 src_reg->type == PTR_TO_PACKET_META)) { 8608 /* pkt_end > pkt_data', pkt_data > pkt_meta' */ 8609 find_good_pkt_pointers(other_branch, src_reg, 8610 src_reg->type, true); 8611 mark_pkt_end(this_branch, insn->src_reg, false); 8612 } else { 8613 return false; 8614 } 8615 break; 8616 case BPF_JLT: 8617 if ((dst_reg->type == PTR_TO_PACKET && 8618 src_reg->type == PTR_TO_PACKET_END) || 8619 (dst_reg->type == PTR_TO_PACKET_META && 8620 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 8621 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ 8622 find_good_pkt_pointers(other_branch, dst_reg, 8623 dst_reg->type, true); 8624 mark_pkt_end(this_branch, insn->dst_reg, false); 8625 } else if ((dst_reg->type == PTR_TO_PACKET_END && 8626 src_reg->type == PTR_TO_PACKET) || 8627 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 8628 src_reg->type == PTR_TO_PACKET_META)) { 8629 /* pkt_end < pkt_data', pkt_data > pkt_meta' */ 8630 find_good_pkt_pointers(this_branch, src_reg, 8631 src_reg->type, false); 8632 mark_pkt_end(other_branch, insn->src_reg, true); 8633 } else { 8634 return false; 8635 } 8636 break; 8637 case BPF_JGE: 8638 if ((dst_reg->type == PTR_TO_PACKET && 8639 src_reg->type == PTR_TO_PACKET_END) || 8640 (dst_reg->type == PTR_TO_PACKET_META && 8641 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 8642 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ 8643 find_good_pkt_pointers(this_branch, dst_reg, 8644 dst_reg->type, true); 8645 mark_pkt_end(other_branch, insn->dst_reg, false); 8646 } else if ((dst_reg->type == PTR_TO_PACKET_END && 8647 src_reg->type == PTR_TO_PACKET) || 8648 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 8649 src_reg->type == PTR_TO_PACKET_META)) { 8650 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ 8651 find_good_pkt_pointers(other_branch, src_reg, 8652 src_reg->type, false); 8653 mark_pkt_end(this_branch, insn->src_reg, true); 8654 } else { 8655 return false; 8656 } 8657 break; 8658 case BPF_JLE: 8659 if ((dst_reg->type == PTR_TO_PACKET && 8660 src_reg->type == PTR_TO_PACKET_END) || 8661 (dst_reg->type == PTR_TO_PACKET_META && 8662 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 8663 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ 8664 find_good_pkt_pointers(other_branch, dst_reg, 8665 dst_reg->type, false); 8666 mark_pkt_end(this_branch, insn->dst_reg, true); 8667 } else if ((dst_reg->type == PTR_TO_PACKET_END && 8668 src_reg->type == PTR_TO_PACKET) || 8669 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 8670 src_reg->type == PTR_TO_PACKET_META)) { 8671 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ 8672 find_good_pkt_pointers(this_branch, src_reg, 8673 src_reg->type, true); 8674 mark_pkt_end(other_branch, insn->src_reg, false); 8675 } else { 8676 return false; 8677 } 8678 break; 8679 default: 8680 return false; 8681 } 8682 8683 return true; 8684 } 8685 8686 static void find_equal_scalars(struct bpf_verifier_state *vstate, 8687 struct bpf_reg_state *known_reg) 8688 { 8689 struct bpf_func_state *state; 8690 struct bpf_reg_state *reg; 8691 int i, j; 8692 8693 for (i = 0; i <= vstate->curframe; i++) { 8694 state = vstate->frame[i]; 8695 for (j = 0; j < MAX_BPF_REG; j++) { 8696 reg = &state->regs[j]; 8697 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) 8698 *reg = *known_reg; 8699 } 8700 8701 bpf_for_each_spilled_reg(j, state, reg) { 8702 if (!reg) 8703 continue; 8704 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) 8705 *reg = *known_reg; 8706 } 8707 } 8708 } 8709 8710 static int check_cond_jmp_op(struct bpf_verifier_env *env, 8711 struct bpf_insn *insn, int *insn_idx) 8712 { 8713 struct bpf_verifier_state *this_branch = env->cur_state; 8714 struct bpf_verifier_state *other_branch; 8715 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; 8716 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; 8717 u8 opcode = BPF_OP(insn->code); 8718 bool is_jmp32; 8719 int pred = -1; 8720 int err; 8721 8722 /* Only conditional jumps are expected to reach here. */ 8723 if (opcode == BPF_JA || opcode > BPF_JSLE) { 8724 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); 8725 return -EINVAL; 8726 } 8727 8728 if (BPF_SRC(insn->code) == BPF_X) { 8729 if (insn->imm != 0) { 8730 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 8731 return -EINVAL; 8732 } 8733 8734 /* check src1 operand */ 8735 err = check_reg_arg(env, insn->src_reg, SRC_OP); 8736 if (err) 8737 return err; 8738 8739 if (is_pointer_value(env, insn->src_reg)) { 8740 verbose(env, "R%d pointer comparison prohibited\n", 8741 insn->src_reg); 8742 return -EACCES; 8743 } 8744 src_reg = ®s[insn->src_reg]; 8745 } else { 8746 if (insn->src_reg != BPF_REG_0) { 8747 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 8748 return -EINVAL; 8749 } 8750 } 8751 8752 /* check src2 operand */ 8753 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 8754 if (err) 8755 return err; 8756 8757 dst_reg = ®s[insn->dst_reg]; 8758 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 8759 8760 if (BPF_SRC(insn->code) == BPF_K) { 8761 pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32); 8762 } else if (src_reg->type == SCALAR_VALUE && 8763 is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) { 8764 pred = is_branch_taken(dst_reg, 8765 tnum_subreg(src_reg->var_off).value, 8766 opcode, 8767 is_jmp32); 8768 } else if (src_reg->type == SCALAR_VALUE && 8769 !is_jmp32 && tnum_is_const(src_reg->var_off)) { 8770 pred = is_branch_taken(dst_reg, 8771 src_reg->var_off.value, 8772 opcode, 8773 is_jmp32); 8774 } else if (reg_is_pkt_pointer_any(dst_reg) && 8775 reg_is_pkt_pointer_any(src_reg) && 8776 !is_jmp32) { 8777 pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode); 8778 } 8779 8780 if (pred >= 0) { 8781 /* If we get here with a dst_reg pointer type it is because 8782 * above is_branch_taken() special cased the 0 comparison. 8783 */ 8784 if (!__is_pointer_value(false, dst_reg)) 8785 err = mark_chain_precision(env, insn->dst_reg); 8786 if (BPF_SRC(insn->code) == BPF_X && !err && 8787 !__is_pointer_value(false, src_reg)) 8788 err = mark_chain_precision(env, insn->src_reg); 8789 if (err) 8790 return err; 8791 } 8792 8793 if (pred == 1) { 8794 /* Only follow the goto, ignore fall-through. If needed, push 8795 * the fall-through branch for simulation under speculative 8796 * execution. 8797 */ 8798 if (!env->bypass_spec_v1 && 8799 !sanitize_speculative_path(env, insn, *insn_idx + 1, 8800 *insn_idx)) 8801 return -EFAULT; 8802 *insn_idx += insn->off; 8803 return 0; 8804 } else if (pred == 0) { 8805 /* Only follow the fall-through branch, since that's where the 8806 * program will go. If needed, push the goto branch for 8807 * simulation under speculative execution. 8808 */ 8809 if (!env->bypass_spec_v1 && 8810 !sanitize_speculative_path(env, insn, 8811 *insn_idx + insn->off + 1, 8812 *insn_idx)) 8813 return -EFAULT; 8814 return 0; 8815 } 8816 8817 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, 8818 false); 8819 if (!other_branch) 8820 return -EFAULT; 8821 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; 8822 8823 /* detect if we are comparing against a constant value so we can adjust 8824 * our min/max values for our dst register. 8825 * this is only legit if both are scalars (or pointers to the same 8826 * object, I suppose, but we don't support that right now), because 8827 * otherwise the different base pointers mean the offsets aren't 8828 * comparable. 8829 */ 8830 if (BPF_SRC(insn->code) == BPF_X) { 8831 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 8832 8833 if (dst_reg->type == SCALAR_VALUE && 8834 src_reg->type == SCALAR_VALUE) { 8835 if (tnum_is_const(src_reg->var_off) || 8836 (is_jmp32 && 8837 tnum_is_const(tnum_subreg(src_reg->var_off)))) 8838 reg_set_min_max(&other_branch_regs[insn->dst_reg], 8839 dst_reg, 8840 src_reg->var_off.value, 8841 tnum_subreg(src_reg->var_off).value, 8842 opcode, is_jmp32); 8843 else if (tnum_is_const(dst_reg->var_off) || 8844 (is_jmp32 && 8845 tnum_is_const(tnum_subreg(dst_reg->var_off)))) 8846 reg_set_min_max_inv(&other_branch_regs[insn->src_reg], 8847 src_reg, 8848 dst_reg->var_off.value, 8849 tnum_subreg(dst_reg->var_off).value, 8850 opcode, is_jmp32); 8851 else if (!is_jmp32 && 8852 (opcode == BPF_JEQ || opcode == BPF_JNE)) 8853 /* Comparing for equality, we can combine knowledge */ 8854 reg_combine_min_max(&other_branch_regs[insn->src_reg], 8855 &other_branch_regs[insn->dst_reg], 8856 src_reg, dst_reg, opcode); 8857 if (src_reg->id && 8858 !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) { 8859 find_equal_scalars(this_branch, src_reg); 8860 find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]); 8861 } 8862 8863 } 8864 } else if (dst_reg->type == SCALAR_VALUE) { 8865 reg_set_min_max(&other_branch_regs[insn->dst_reg], 8866 dst_reg, insn->imm, (u32)insn->imm, 8867 opcode, is_jmp32); 8868 } 8869 8870 if (dst_reg->type == SCALAR_VALUE && dst_reg->id && 8871 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) { 8872 find_equal_scalars(this_branch, dst_reg); 8873 find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]); 8874 } 8875 8876 /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). 8877 * NOTE: these optimizations below are related with pointer comparison 8878 * which will never be JMP32. 8879 */ 8880 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && 8881 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 8882 reg_type_may_be_null(dst_reg->type)) { 8883 /* Mark all identical registers in each branch as either 8884 * safe or unknown depending R == 0 or R != 0 conditional. 8885 */ 8886 mark_ptr_or_null_regs(this_branch, insn->dst_reg, 8887 opcode == BPF_JNE); 8888 mark_ptr_or_null_regs(other_branch, insn->dst_reg, 8889 opcode == BPF_JEQ); 8890 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], 8891 this_branch, other_branch) && 8892 is_pointer_value(env, insn->dst_reg)) { 8893 verbose(env, "R%d pointer comparison prohibited\n", 8894 insn->dst_reg); 8895 return -EACCES; 8896 } 8897 if (env->log.level & BPF_LOG_LEVEL) 8898 print_verifier_state(env, this_branch->frame[this_branch->curframe]); 8899 return 0; 8900 } 8901 8902 /* verify BPF_LD_IMM64 instruction */ 8903 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) 8904 { 8905 struct bpf_insn_aux_data *aux = cur_aux(env); 8906 struct bpf_reg_state *regs = cur_regs(env); 8907 struct bpf_reg_state *dst_reg; 8908 struct bpf_map *map; 8909 int err; 8910 8911 if (BPF_SIZE(insn->code) != BPF_DW) { 8912 verbose(env, "invalid BPF_LD_IMM insn\n"); 8913 return -EINVAL; 8914 } 8915 if (insn->off != 0) { 8916 verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); 8917 return -EINVAL; 8918 } 8919 8920 err = check_reg_arg(env, insn->dst_reg, DST_OP); 8921 if (err) 8922 return err; 8923 8924 dst_reg = ®s[insn->dst_reg]; 8925 if (insn->src_reg == 0) { 8926 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 8927 8928 dst_reg->type = SCALAR_VALUE; 8929 __mark_reg_known(®s[insn->dst_reg], imm); 8930 return 0; 8931 } 8932 8933 if (insn->src_reg == BPF_PSEUDO_BTF_ID) { 8934 mark_reg_known_zero(env, regs, insn->dst_reg); 8935 8936 dst_reg->type = aux->btf_var.reg_type; 8937 switch (dst_reg->type) { 8938 case PTR_TO_MEM: 8939 dst_reg->mem_size = aux->btf_var.mem_size; 8940 break; 8941 case PTR_TO_BTF_ID: 8942 case PTR_TO_PERCPU_BTF_ID: 8943 dst_reg->btf = aux->btf_var.btf; 8944 dst_reg->btf_id = aux->btf_var.btf_id; 8945 break; 8946 default: 8947 verbose(env, "bpf verifier is misconfigured\n"); 8948 return -EFAULT; 8949 } 8950 return 0; 8951 } 8952 8953 if (insn->src_reg == BPF_PSEUDO_FUNC) { 8954 struct bpf_prog_aux *aux = env->prog->aux; 8955 u32 subprogno = insn[1].imm; 8956 8957 if (!aux->func_info) { 8958 verbose(env, "missing btf func_info\n"); 8959 return -EINVAL; 8960 } 8961 if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) { 8962 verbose(env, "callback function not static\n"); 8963 return -EINVAL; 8964 } 8965 8966 dst_reg->type = PTR_TO_FUNC; 8967 dst_reg->subprogno = subprogno; 8968 return 0; 8969 } 8970 8971 map = env->used_maps[aux->map_index]; 8972 mark_reg_known_zero(env, regs, insn->dst_reg); 8973 dst_reg->map_ptr = map; 8974 8975 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE || 8976 insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) { 8977 dst_reg->type = PTR_TO_MAP_VALUE; 8978 dst_reg->off = aux->map_off; 8979 if (map_value_has_spin_lock(map)) 8980 dst_reg->id = ++env->id_gen; 8981 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD || 8982 insn->src_reg == BPF_PSEUDO_MAP_IDX) { 8983 dst_reg->type = CONST_PTR_TO_MAP; 8984 } else { 8985 verbose(env, "bpf verifier is misconfigured\n"); 8986 return -EINVAL; 8987 } 8988 8989 return 0; 8990 } 8991 8992 static bool may_access_skb(enum bpf_prog_type type) 8993 { 8994 switch (type) { 8995 case BPF_PROG_TYPE_SOCKET_FILTER: 8996 case BPF_PROG_TYPE_SCHED_CLS: 8997 case BPF_PROG_TYPE_SCHED_ACT: 8998 return true; 8999 default: 9000 return false; 9001 } 9002 } 9003 9004 /* verify safety of LD_ABS|LD_IND instructions: 9005 * - they can only appear in the programs where ctx == skb 9006 * - since they are wrappers of function calls, they scratch R1-R5 registers, 9007 * preserve R6-R9, and store return value into R0 9008 * 9009 * Implicit input: 9010 * ctx == skb == R6 == CTX 9011 * 9012 * Explicit input: 9013 * SRC == any register 9014 * IMM == 32-bit immediate 9015 * 9016 * Output: 9017 * R0 - 8/16/32-bit skb data converted to cpu endianness 9018 */ 9019 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 9020 { 9021 struct bpf_reg_state *regs = cur_regs(env); 9022 static const int ctx_reg = BPF_REG_6; 9023 u8 mode = BPF_MODE(insn->code); 9024 int i, err; 9025 9026 if (!may_access_skb(resolve_prog_type(env->prog))) { 9027 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 9028 return -EINVAL; 9029 } 9030 9031 if (!env->ops->gen_ld_abs) { 9032 verbose(env, "bpf verifier is misconfigured\n"); 9033 return -EINVAL; 9034 } 9035 9036 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 9037 BPF_SIZE(insn->code) == BPF_DW || 9038 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 9039 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); 9040 return -EINVAL; 9041 } 9042 9043 /* check whether implicit source operand (register R6) is readable */ 9044 err = check_reg_arg(env, ctx_reg, SRC_OP); 9045 if (err) 9046 return err; 9047 9048 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as 9049 * gen_ld_abs() may terminate the program at runtime, leading to 9050 * reference leak. 9051 */ 9052 err = check_reference_leak(env); 9053 if (err) { 9054 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); 9055 return err; 9056 } 9057 9058 if (env->cur_state->active_spin_lock) { 9059 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); 9060 return -EINVAL; 9061 } 9062 9063 if (regs[ctx_reg].type != PTR_TO_CTX) { 9064 verbose(env, 9065 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 9066 return -EINVAL; 9067 } 9068 9069 if (mode == BPF_IND) { 9070 /* check explicit source operand */ 9071 err = check_reg_arg(env, insn->src_reg, SRC_OP); 9072 if (err) 9073 return err; 9074 } 9075 9076 err = check_ctx_reg(env, ®s[ctx_reg], ctx_reg); 9077 if (err < 0) 9078 return err; 9079 9080 /* reset caller saved regs to unreadable */ 9081 for (i = 0; i < CALLER_SAVED_REGS; i++) { 9082 mark_reg_not_init(env, regs, caller_saved[i]); 9083 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 9084 } 9085 9086 /* mark destination R0 register as readable, since it contains 9087 * the value fetched from the packet. 9088 * Already marked as written above. 9089 */ 9090 mark_reg_unknown(env, regs, BPF_REG_0); 9091 /* ld_abs load up to 32-bit skb data. */ 9092 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; 9093 return 0; 9094 } 9095 9096 static int check_return_code(struct bpf_verifier_env *env) 9097 { 9098 struct tnum enforce_attach_type_range = tnum_unknown; 9099 const struct bpf_prog *prog = env->prog; 9100 struct bpf_reg_state *reg; 9101 struct tnum range = tnum_range(0, 1); 9102 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 9103 int err; 9104 const bool is_subprog = env->cur_state->frame[0]->subprogno; 9105 9106 /* LSM and struct_ops func-ptr's return type could be "void" */ 9107 if (!is_subprog && 9108 (prog_type == BPF_PROG_TYPE_STRUCT_OPS || 9109 prog_type == BPF_PROG_TYPE_LSM) && 9110 !prog->aux->attach_func_proto->type) 9111 return 0; 9112 9113 /* eBPF calling convention is such that R0 is used 9114 * to return the value from eBPF program. 9115 * Make sure that it's readable at this time 9116 * of bpf_exit, which means that program wrote 9117 * something into it earlier 9118 */ 9119 err = check_reg_arg(env, BPF_REG_0, SRC_OP); 9120 if (err) 9121 return err; 9122 9123 if (is_pointer_value(env, BPF_REG_0)) { 9124 verbose(env, "R0 leaks addr as return value\n"); 9125 return -EACCES; 9126 } 9127 9128 reg = cur_regs(env) + BPF_REG_0; 9129 if (is_subprog) { 9130 if (reg->type != SCALAR_VALUE) { 9131 verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n", 9132 reg_type_str[reg->type]); 9133 return -EINVAL; 9134 } 9135 return 0; 9136 } 9137 9138 switch (prog_type) { 9139 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 9140 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || 9141 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || 9142 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME || 9143 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || 9144 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || 9145 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME) 9146 range = tnum_range(1, 1); 9147 if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND || 9148 env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND) 9149 range = tnum_range(0, 3); 9150 break; 9151 case BPF_PROG_TYPE_CGROUP_SKB: 9152 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { 9153 range = tnum_range(0, 3); 9154 enforce_attach_type_range = tnum_range(2, 3); 9155 } 9156 break; 9157 case BPF_PROG_TYPE_CGROUP_SOCK: 9158 case BPF_PROG_TYPE_SOCK_OPS: 9159 case BPF_PROG_TYPE_CGROUP_DEVICE: 9160 case BPF_PROG_TYPE_CGROUP_SYSCTL: 9161 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 9162 break; 9163 case BPF_PROG_TYPE_RAW_TRACEPOINT: 9164 if (!env->prog->aux->attach_btf_id) 9165 return 0; 9166 range = tnum_const(0); 9167 break; 9168 case BPF_PROG_TYPE_TRACING: 9169 switch (env->prog->expected_attach_type) { 9170 case BPF_TRACE_FENTRY: 9171 case BPF_TRACE_FEXIT: 9172 range = tnum_const(0); 9173 break; 9174 case BPF_TRACE_RAW_TP: 9175 case BPF_MODIFY_RETURN: 9176 return 0; 9177 case BPF_TRACE_ITER: 9178 break; 9179 default: 9180 return -ENOTSUPP; 9181 } 9182 break; 9183 case BPF_PROG_TYPE_SK_LOOKUP: 9184 range = tnum_range(SK_DROP, SK_PASS); 9185 break; 9186 case BPF_PROG_TYPE_EXT: 9187 /* freplace program can return anything as its return value 9188 * depends on the to-be-replaced kernel func or bpf program. 9189 */ 9190 default: 9191 return 0; 9192 } 9193 9194 if (reg->type != SCALAR_VALUE) { 9195 verbose(env, "At program exit the register R0 is not a known value (%s)\n", 9196 reg_type_str[reg->type]); 9197 return -EINVAL; 9198 } 9199 9200 if (!tnum_in(range, reg->var_off)) { 9201 verbose_invalid_scalar(env, reg, &range, "program exit", "R0"); 9202 return -EINVAL; 9203 } 9204 9205 if (!tnum_is_unknown(enforce_attach_type_range) && 9206 tnum_in(enforce_attach_type_range, reg->var_off)) 9207 env->prog->enforce_expected_attach_type = 1; 9208 return 0; 9209 } 9210 9211 /* non-recursive DFS pseudo code 9212 * 1 procedure DFS-iterative(G,v): 9213 * 2 label v as discovered 9214 * 3 let S be a stack 9215 * 4 S.push(v) 9216 * 5 while S is not empty 9217 * 6 t <- S.pop() 9218 * 7 if t is what we're looking for: 9219 * 8 return t 9220 * 9 for all edges e in G.adjacentEdges(t) do 9221 * 10 if edge e is already labelled 9222 * 11 continue with the next edge 9223 * 12 w <- G.adjacentVertex(t,e) 9224 * 13 if vertex w is not discovered and not explored 9225 * 14 label e as tree-edge 9226 * 15 label w as discovered 9227 * 16 S.push(w) 9228 * 17 continue at 5 9229 * 18 else if vertex w is discovered 9230 * 19 label e as back-edge 9231 * 20 else 9232 * 21 // vertex w is explored 9233 * 22 label e as forward- or cross-edge 9234 * 23 label t as explored 9235 * 24 S.pop() 9236 * 9237 * convention: 9238 * 0x10 - discovered 9239 * 0x11 - discovered and fall-through edge labelled 9240 * 0x12 - discovered and fall-through and branch edges labelled 9241 * 0x20 - explored 9242 */ 9243 9244 enum { 9245 DISCOVERED = 0x10, 9246 EXPLORED = 0x20, 9247 FALLTHROUGH = 1, 9248 BRANCH = 2, 9249 }; 9250 9251 static u32 state_htab_size(struct bpf_verifier_env *env) 9252 { 9253 return env->prog->len; 9254 } 9255 9256 static struct bpf_verifier_state_list **explored_state( 9257 struct bpf_verifier_env *env, 9258 int idx) 9259 { 9260 struct bpf_verifier_state *cur = env->cur_state; 9261 struct bpf_func_state *state = cur->frame[cur->curframe]; 9262 9263 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; 9264 } 9265 9266 static void init_explored_state(struct bpf_verifier_env *env, int idx) 9267 { 9268 env->insn_aux_data[idx].prune_point = true; 9269 } 9270 9271 enum { 9272 DONE_EXPLORING = 0, 9273 KEEP_EXPLORING = 1, 9274 }; 9275 9276 /* t, w, e - match pseudo-code above: 9277 * t - index of current instruction 9278 * w - next instruction 9279 * e - edge 9280 */ 9281 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, 9282 bool loop_ok) 9283 { 9284 int *insn_stack = env->cfg.insn_stack; 9285 int *insn_state = env->cfg.insn_state; 9286 9287 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 9288 return DONE_EXPLORING; 9289 9290 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 9291 return DONE_EXPLORING; 9292 9293 if (w < 0 || w >= env->prog->len) { 9294 verbose_linfo(env, t, "%d: ", t); 9295 verbose(env, "jump out of range from insn %d to %d\n", t, w); 9296 return -EINVAL; 9297 } 9298 9299 if (e == BRANCH) 9300 /* mark branch target for state pruning */ 9301 init_explored_state(env, w); 9302 9303 if (insn_state[w] == 0) { 9304 /* tree-edge */ 9305 insn_state[t] = DISCOVERED | e; 9306 insn_state[w] = DISCOVERED; 9307 if (env->cfg.cur_stack >= env->prog->len) 9308 return -E2BIG; 9309 insn_stack[env->cfg.cur_stack++] = w; 9310 return KEEP_EXPLORING; 9311 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 9312 if (loop_ok && env->bpf_capable) 9313 return DONE_EXPLORING; 9314 verbose_linfo(env, t, "%d: ", t); 9315 verbose_linfo(env, w, "%d: ", w); 9316 verbose(env, "back-edge from insn %d to %d\n", t, w); 9317 return -EINVAL; 9318 } else if (insn_state[w] == EXPLORED) { 9319 /* forward- or cross-edge */ 9320 insn_state[t] = DISCOVERED | e; 9321 } else { 9322 verbose(env, "insn state internal bug\n"); 9323 return -EFAULT; 9324 } 9325 return DONE_EXPLORING; 9326 } 9327 9328 static int visit_func_call_insn(int t, int insn_cnt, 9329 struct bpf_insn *insns, 9330 struct bpf_verifier_env *env, 9331 bool visit_callee) 9332 { 9333 int ret; 9334 9335 ret = push_insn(t, t + 1, FALLTHROUGH, env, false); 9336 if (ret) 9337 return ret; 9338 9339 if (t + 1 < insn_cnt) 9340 init_explored_state(env, t + 1); 9341 if (visit_callee) { 9342 init_explored_state(env, t); 9343 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, 9344 env, false); 9345 } 9346 return ret; 9347 } 9348 9349 /* Visits the instruction at index t and returns one of the following: 9350 * < 0 - an error occurred 9351 * DONE_EXPLORING - the instruction was fully explored 9352 * KEEP_EXPLORING - there is still work to be done before it is fully explored 9353 */ 9354 static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env) 9355 { 9356 struct bpf_insn *insns = env->prog->insnsi; 9357 int ret; 9358 9359 if (bpf_pseudo_func(insns + t)) 9360 return visit_func_call_insn(t, insn_cnt, insns, env, true); 9361 9362 /* All non-branch instructions have a single fall-through edge. */ 9363 if (BPF_CLASS(insns[t].code) != BPF_JMP && 9364 BPF_CLASS(insns[t].code) != BPF_JMP32) 9365 return push_insn(t, t + 1, FALLTHROUGH, env, false); 9366 9367 switch (BPF_OP(insns[t].code)) { 9368 case BPF_EXIT: 9369 return DONE_EXPLORING; 9370 9371 case BPF_CALL: 9372 return visit_func_call_insn(t, insn_cnt, insns, env, 9373 insns[t].src_reg == BPF_PSEUDO_CALL); 9374 9375 case BPF_JA: 9376 if (BPF_SRC(insns[t].code) != BPF_K) 9377 return -EINVAL; 9378 9379 /* unconditional jump with single edge */ 9380 ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env, 9381 true); 9382 if (ret) 9383 return ret; 9384 9385 /* unconditional jmp is not a good pruning point, 9386 * but it's marked, since backtracking needs 9387 * to record jmp history in is_state_visited(). 9388 */ 9389 init_explored_state(env, t + insns[t].off + 1); 9390 /* tell verifier to check for equivalent states 9391 * after every call and jump 9392 */ 9393 if (t + 1 < insn_cnt) 9394 init_explored_state(env, t + 1); 9395 9396 return ret; 9397 9398 default: 9399 /* conditional jump with two edges */ 9400 init_explored_state(env, t); 9401 ret = push_insn(t, t + 1, FALLTHROUGH, env, true); 9402 if (ret) 9403 return ret; 9404 9405 return push_insn(t, t + insns[t].off + 1, BRANCH, env, true); 9406 } 9407 } 9408 9409 /* non-recursive depth-first-search to detect loops in BPF program 9410 * loop == back-edge in directed graph 9411 */ 9412 static int check_cfg(struct bpf_verifier_env *env) 9413 { 9414 int insn_cnt = env->prog->len; 9415 int *insn_stack, *insn_state; 9416 int ret = 0; 9417 int i; 9418 9419 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 9420 if (!insn_state) 9421 return -ENOMEM; 9422 9423 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 9424 if (!insn_stack) { 9425 kvfree(insn_state); 9426 return -ENOMEM; 9427 } 9428 9429 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 9430 insn_stack[0] = 0; /* 0 is the first instruction */ 9431 env->cfg.cur_stack = 1; 9432 9433 while (env->cfg.cur_stack > 0) { 9434 int t = insn_stack[env->cfg.cur_stack - 1]; 9435 9436 ret = visit_insn(t, insn_cnt, env); 9437 switch (ret) { 9438 case DONE_EXPLORING: 9439 insn_state[t] = EXPLORED; 9440 env->cfg.cur_stack--; 9441 break; 9442 case KEEP_EXPLORING: 9443 break; 9444 default: 9445 if (ret > 0) { 9446 verbose(env, "visit_insn internal bug\n"); 9447 ret = -EFAULT; 9448 } 9449 goto err_free; 9450 } 9451 } 9452 9453 if (env->cfg.cur_stack < 0) { 9454 verbose(env, "pop stack internal bug\n"); 9455 ret = -EFAULT; 9456 goto err_free; 9457 } 9458 9459 for (i = 0; i < insn_cnt; i++) { 9460 if (insn_state[i] != EXPLORED) { 9461 verbose(env, "unreachable insn %d\n", i); 9462 ret = -EINVAL; 9463 goto err_free; 9464 } 9465 } 9466 ret = 0; /* cfg looks good */ 9467 9468 err_free: 9469 kvfree(insn_state); 9470 kvfree(insn_stack); 9471 env->cfg.insn_state = env->cfg.insn_stack = NULL; 9472 return ret; 9473 } 9474 9475 static int check_abnormal_return(struct bpf_verifier_env *env) 9476 { 9477 int i; 9478 9479 for (i = 1; i < env->subprog_cnt; i++) { 9480 if (env->subprog_info[i].has_ld_abs) { 9481 verbose(env, "LD_ABS is not allowed in subprogs without BTF\n"); 9482 return -EINVAL; 9483 } 9484 if (env->subprog_info[i].has_tail_call) { 9485 verbose(env, "tail_call is not allowed in subprogs without BTF\n"); 9486 return -EINVAL; 9487 } 9488 } 9489 return 0; 9490 } 9491 9492 /* The minimum supported BTF func info size */ 9493 #define MIN_BPF_FUNCINFO_SIZE 8 9494 #define MAX_FUNCINFO_REC_SIZE 252 9495 9496 static int check_btf_func(struct bpf_verifier_env *env, 9497 const union bpf_attr *attr, 9498 bpfptr_t uattr) 9499 { 9500 const struct btf_type *type, *func_proto, *ret_type; 9501 u32 i, nfuncs, urec_size, min_size; 9502 u32 krec_size = sizeof(struct bpf_func_info); 9503 struct bpf_func_info *krecord; 9504 struct bpf_func_info_aux *info_aux = NULL; 9505 struct bpf_prog *prog; 9506 const struct btf *btf; 9507 bpfptr_t urecord; 9508 u32 prev_offset = 0; 9509 bool scalar_return; 9510 int ret = -ENOMEM; 9511 9512 nfuncs = attr->func_info_cnt; 9513 if (!nfuncs) { 9514 if (check_abnormal_return(env)) 9515 return -EINVAL; 9516 return 0; 9517 } 9518 9519 if (nfuncs != env->subprog_cnt) { 9520 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n"); 9521 return -EINVAL; 9522 } 9523 9524 urec_size = attr->func_info_rec_size; 9525 if (urec_size < MIN_BPF_FUNCINFO_SIZE || 9526 urec_size > MAX_FUNCINFO_REC_SIZE || 9527 urec_size % sizeof(u32)) { 9528 verbose(env, "invalid func info rec size %u\n", urec_size); 9529 return -EINVAL; 9530 } 9531 9532 prog = env->prog; 9533 btf = prog->aux->btf; 9534 9535 urecord = make_bpfptr(attr->func_info, uattr.is_kernel); 9536 min_size = min_t(u32, krec_size, urec_size); 9537 9538 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); 9539 if (!krecord) 9540 return -ENOMEM; 9541 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN); 9542 if (!info_aux) 9543 goto err_free; 9544 9545 for (i = 0; i < nfuncs; i++) { 9546 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size); 9547 if (ret) { 9548 if (ret == -E2BIG) { 9549 verbose(env, "nonzero tailing record in func info"); 9550 /* set the size kernel expects so loader can zero 9551 * out the rest of the record. 9552 */ 9553 if (copy_to_bpfptr_offset(uattr, 9554 offsetof(union bpf_attr, func_info_rec_size), 9555 &min_size, sizeof(min_size))) 9556 ret = -EFAULT; 9557 } 9558 goto err_free; 9559 } 9560 9561 if (copy_from_bpfptr(&krecord[i], urecord, min_size)) { 9562 ret = -EFAULT; 9563 goto err_free; 9564 } 9565 9566 /* check insn_off */ 9567 ret = -EINVAL; 9568 if (i == 0) { 9569 if (krecord[i].insn_off) { 9570 verbose(env, 9571 "nonzero insn_off %u for the first func info record", 9572 krecord[i].insn_off); 9573 goto err_free; 9574 } 9575 } else if (krecord[i].insn_off <= prev_offset) { 9576 verbose(env, 9577 "same or smaller insn offset (%u) than previous func info record (%u)", 9578 krecord[i].insn_off, prev_offset); 9579 goto err_free; 9580 } 9581 9582 if (env->subprog_info[i].start != krecord[i].insn_off) { 9583 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); 9584 goto err_free; 9585 } 9586 9587 /* check type_id */ 9588 type = btf_type_by_id(btf, krecord[i].type_id); 9589 if (!type || !btf_type_is_func(type)) { 9590 verbose(env, "invalid type id %d in func info", 9591 krecord[i].type_id); 9592 goto err_free; 9593 } 9594 info_aux[i].linkage = BTF_INFO_VLEN(type->info); 9595 9596 func_proto = btf_type_by_id(btf, type->type); 9597 if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto))) 9598 /* btf_func_check() already verified it during BTF load */ 9599 goto err_free; 9600 ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL); 9601 scalar_return = 9602 btf_type_is_small_int(ret_type) || btf_type_is_enum(ret_type); 9603 if (i && !scalar_return && env->subprog_info[i].has_ld_abs) { 9604 verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n"); 9605 goto err_free; 9606 } 9607 if (i && !scalar_return && env->subprog_info[i].has_tail_call) { 9608 verbose(env, "tail_call is only allowed in functions that return 'int'.\n"); 9609 goto err_free; 9610 } 9611 9612 prev_offset = krecord[i].insn_off; 9613 bpfptr_add(&urecord, urec_size); 9614 } 9615 9616 prog->aux->func_info = krecord; 9617 prog->aux->func_info_cnt = nfuncs; 9618 prog->aux->func_info_aux = info_aux; 9619 return 0; 9620 9621 err_free: 9622 kvfree(krecord); 9623 kfree(info_aux); 9624 return ret; 9625 } 9626 9627 static void adjust_btf_func(struct bpf_verifier_env *env) 9628 { 9629 struct bpf_prog_aux *aux = env->prog->aux; 9630 int i; 9631 9632 if (!aux->func_info) 9633 return; 9634 9635 for (i = 0; i < env->subprog_cnt; i++) 9636 aux->func_info[i].insn_off = env->subprog_info[i].start; 9637 } 9638 9639 #define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \ 9640 sizeof(((struct bpf_line_info *)(0))->line_col)) 9641 #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE 9642 9643 static int check_btf_line(struct bpf_verifier_env *env, 9644 const union bpf_attr *attr, 9645 bpfptr_t uattr) 9646 { 9647 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0; 9648 struct bpf_subprog_info *sub; 9649 struct bpf_line_info *linfo; 9650 struct bpf_prog *prog; 9651 const struct btf *btf; 9652 bpfptr_t ulinfo; 9653 int err; 9654 9655 nr_linfo = attr->line_info_cnt; 9656 if (!nr_linfo) 9657 return 0; 9658 9659 rec_size = attr->line_info_rec_size; 9660 if (rec_size < MIN_BPF_LINEINFO_SIZE || 9661 rec_size > MAX_LINEINFO_REC_SIZE || 9662 rec_size & (sizeof(u32) - 1)) 9663 return -EINVAL; 9664 9665 /* Need to zero it in case the userspace may 9666 * pass in a smaller bpf_line_info object. 9667 */ 9668 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info), 9669 GFP_KERNEL | __GFP_NOWARN); 9670 if (!linfo) 9671 return -ENOMEM; 9672 9673 prog = env->prog; 9674 btf = prog->aux->btf; 9675 9676 s = 0; 9677 sub = env->subprog_info; 9678 ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel); 9679 expected_size = sizeof(struct bpf_line_info); 9680 ncopy = min_t(u32, expected_size, rec_size); 9681 for (i = 0; i < nr_linfo; i++) { 9682 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size); 9683 if (err) { 9684 if (err == -E2BIG) { 9685 verbose(env, "nonzero tailing record in line_info"); 9686 if (copy_to_bpfptr_offset(uattr, 9687 offsetof(union bpf_attr, line_info_rec_size), 9688 &expected_size, sizeof(expected_size))) 9689 err = -EFAULT; 9690 } 9691 goto err_free; 9692 } 9693 9694 if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) { 9695 err = -EFAULT; 9696 goto err_free; 9697 } 9698 9699 /* 9700 * Check insn_off to ensure 9701 * 1) strictly increasing AND 9702 * 2) bounded by prog->len 9703 * 9704 * The linfo[0].insn_off == 0 check logically falls into 9705 * the later "missing bpf_line_info for func..." case 9706 * because the first linfo[0].insn_off must be the 9707 * first sub also and the first sub must have 9708 * subprog_info[0].start == 0. 9709 */ 9710 if ((i && linfo[i].insn_off <= prev_offset) || 9711 linfo[i].insn_off >= prog->len) { 9712 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", 9713 i, linfo[i].insn_off, prev_offset, 9714 prog->len); 9715 err = -EINVAL; 9716 goto err_free; 9717 } 9718 9719 if (!prog->insnsi[linfo[i].insn_off].code) { 9720 verbose(env, 9721 "Invalid insn code at line_info[%u].insn_off\n", 9722 i); 9723 err = -EINVAL; 9724 goto err_free; 9725 } 9726 9727 if (!btf_name_by_offset(btf, linfo[i].line_off) || 9728 !btf_name_by_offset(btf, linfo[i].file_name_off)) { 9729 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); 9730 err = -EINVAL; 9731 goto err_free; 9732 } 9733 9734 if (s != env->subprog_cnt) { 9735 if (linfo[i].insn_off == sub[s].start) { 9736 sub[s].linfo_idx = i; 9737 s++; 9738 } else if (sub[s].start < linfo[i].insn_off) { 9739 verbose(env, "missing bpf_line_info for func#%u\n", s); 9740 err = -EINVAL; 9741 goto err_free; 9742 } 9743 } 9744 9745 prev_offset = linfo[i].insn_off; 9746 bpfptr_add(&ulinfo, rec_size); 9747 } 9748 9749 if (s != env->subprog_cnt) { 9750 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n", 9751 env->subprog_cnt - s, s); 9752 err = -EINVAL; 9753 goto err_free; 9754 } 9755 9756 prog->aux->linfo = linfo; 9757 prog->aux->nr_linfo = nr_linfo; 9758 9759 return 0; 9760 9761 err_free: 9762 kvfree(linfo); 9763 return err; 9764 } 9765 9766 static int check_btf_info(struct bpf_verifier_env *env, 9767 const union bpf_attr *attr, 9768 bpfptr_t uattr) 9769 { 9770 struct btf *btf; 9771 int err; 9772 9773 if (!attr->func_info_cnt && !attr->line_info_cnt) { 9774 if (check_abnormal_return(env)) 9775 return -EINVAL; 9776 return 0; 9777 } 9778 9779 btf = btf_get_by_fd(attr->prog_btf_fd); 9780 if (IS_ERR(btf)) 9781 return PTR_ERR(btf); 9782 if (btf_is_kernel(btf)) { 9783 btf_put(btf); 9784 return -EACCES; 9785 } 9786 env->prog->aux->btf = btf; 9787 9788 err = check_btf_func(env, attr, uattr); 9789 if (err) 9790 return err; 9791 9792 err = check_btf_line(env, attr, uattr); 9793 if (err) 9794 return err; 9795 9796 return 0; 9797 } 9798 9799 /* check %cur's range satisfies %old's */ 9800 static bool range_within(struct bpf_reg_state *old, 9801 struct bpf_reg_state *cur) 9802 { 9803 return old->umin_value <= cur->umin_value && 9804 old->umax_value >= cur->umax_value && 9805 old->smin_value <= cur->smin_value && 9806 old->smax_value >= cur->smax_value && 9807 old->u32_min_value <= cur->u32_min_value && 9808 old->u32_max_value >= cur->u32_max_value && 9809 old->s32_min_value <= cur->s32_min_value && 9810 old->s32_max_value >= cur->s32_max_value; 9811 } 9812 9813 /* If in the old state two registers had the same id, then they need to have 9814 * the same id in the new state as well. But that id could be different from 9815 * the old state, so we need to track the mapping from old to new ids. 9816 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent 9817 * regs with old id 5 must also have new id 9 for the new state to be safe. But 9818 * regs with a different old id could still have new id 9, we don't care about 9819 * that. 9820 * So we look through our idmap to see if this old id has been seen before. If 9821 * so, we require the new id to match; otherwise, we add the id pair to the map. 9822 */ 9823 static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap) 9824 { 9825 unsigned int i; 9826 9827 for (i = 0; i < BPF_ID_MAP_SIZE; i++) { 9828 if (!idmap[i].old) { 9829 /* Reached an empty slot; haven't seen this id before */ 9830 idmap[i].old = old_id; 9831 idmap[i].cur = cur_id; 9832 return true; 9833 } 9834 if (idmap[i].old == old_id) 9835 return idmap[i].cur == cur_id; 9836 } 9837 /* We ran out of idmap slots, which should be impossible */ 9838 WARN_ON_ONCE(1); 9839 return false; 9840 } 9841 9842 static void clean_func_state(struct bpf_verifier_env *env, 9843 struct bpf_func_state *st) 9844 { 9845 enum bpf_reg_liveness live; 9846 int i, j; 9847 9848 for (i = 0; i < BPF_REG_FP; i++) { 9849 live = st->regs[i].live; 9850 /* liveness must not touch this register anymore */ 9851 st->regs[i].live |= REG_LIVE_DONE; 9852 if (!(live & REG_LIVE_READ)) 9853 /* since the register is unused, clear its state 9854 * to make further comparison simpler 9855 */ 9856 __mark_reg_not_init(env, &st->regs[i]); 9857 } 9858 9859 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { 9860 live = st->stack[i].spilled_ptr.live; 9861 /* liveness must not touch this stack slot anymore */ 9862 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; 9863 if (!(live & REG_LIVE_READ)) { 9864 __mark_reg_not_init(env, &st->stack[i].spilled_ptr); 9865 for (j = 0; j < BPF_REG_SIZE; j++) 9866 st->stack[i].slot_type[j] = STACK_INVALID; 9867 } 9868 } 9869 } 9870 9871 static void clean_verifier_state(struct bpf_verifier_env *env, 9872 struct bpf_verifier_state *st) 9873 { 9874 int i; 9875 9876 if (st->frame[0]->regs[0].live & REG_LIVE_DONE) 9877 /* all regs in this state in all frames were already marked */ 9878 return; 9879 9880 for (i = 0; i <= st->curframe; i++) 9881 clean_func_state(env, st->frame[i]); 9882 } 9883 9884 /* the parentage chains form a tree. 9885 * the verifier states are added to state lists at given insn and 9886 * pushed into state stack for future exploration. 9887 * when the verifier reaches bpf_exit insn some of the verifer states 9888 * stored in the state lists have their final liveness state already, 9889 * but a lot of states will get revised from liveness point of view when 9890 * the verifier explores other branches. 9891 * Example: 9892 * 1: r0 = 1 9893 * 2: if r1 == 100 goto pc+1 9894 * 3: r0 = 2 9895 * 4: exit 9896 * when the verifier reaches exit insn the register r0 in the state list of 9897 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch 9898 * of insn 2 and goes exploring further. At the insn 4 it will walk the 9899 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ. 9900 * 9901 * Since the verifier pushes the branch states as it sees them while exploring 9902 * the program the condition of walking the branch instruction for the second 9903 * time means that all states below this branch were already explored and 9904 * their final liveness marks are already propagated. 9905 * Hence when the verifier completes the search of state list in is_state_visited() 9906 * we can call this clean_live_states() function to mark all liveness states 9907 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state' 9908 * will not be used. 9909 * This function also clears the registers and stack for states that !READ 9910 * to simplify state merging. 9911 * 9912 * Important note here that walking the same branch instruction in the callee 9913 * doesn't meant that the states are DONE. The verifier has to compare 9914 * the callsites 9915 */ 9916 static void clean_live_states(struct bpf_verifier_env *env, int insn, 9917 struct bpf_verifier_state *cur) 9918 { 9919 struct bpf_verifier_state_list *sl; 9920 int i; 9921 9922 sl = *explored_state(env, insn); 9923 while (sl) { 9924 if (sl->state.branches) 9925 goto next; 9926 if (sl->state.insn_idx != insn || 9927 sl->state.curframe != cur->curframe) 9928 goto next; 9929 for (i = 0; i <= cur->curframe; i++) 9930 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) 9931 goto next; 9932 clean_verifier_state(env, &sl->state); 9933 next: 9934 sl = sl->next; 9935 } 9936 } 9937 9938 /* Returns true if (rold safe implies rcur safe) */ 9939 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, 9940 struct bpf_id_pair *idmap) 9941 { 9942 bool equal; 9943 9944 if (!(rold->live & REG_LIVE_READ)) 9945 /* explored state didn't use this */ 9946 return true; 9947 9948 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0; 9949 9950 if (rold->type == PTR_TO_STACK) 9951 /* two stack pointers are equal only if they're pointing to 9952 * the same stack frame, since fp-8 in foo != fp-8 in bar 9953 */ 9954 return equal && rold->frameno == rcur->frameno; 9955 9956 if (equal) 9957 return true; 9958 9959 if (rold->type == NOT_INIT) 9960 /* explored state can't have used this */ 9961 return true; 9962 if (rcur->type == NOT_INIT) 9963 return false; 9964 switch (rold->type) { 9965 case SCALAR_VALUE: 9966 if (rcur->type == SCALAR_VALUE) { 9967 if (!rold->precise && !rcur->precise) 9968 return true; 9969 /* new val must satisfy old val knowledge */ 9970 return range_within(rold, rcur) && 9971 tnum_in(rold->var_off, rcur->var_off); 9972 } else { 9973 /* We're trying to use a pointer in place of a scalar. 9974 * Even if the scalar was unbounded, this could lead to 9975 * pointer leaks because scalars are allowed to leak 9976 * while pointers are not. We could make this safe in 9977 * special cases if root is calling us, but it's 9978 * probably not worth the hassle. 9979 */ 9980 return false; 9981 } 9982 case PTR_TO_MAP_KEY: 9983 case PTR_TO_MAP_VALUE: 9984 /* If the new min/max/var_off satisfy the old ones and 9985 * everything else matches, we are OK. 9986 * 'id' is not compared, since it's only used for maps with 9987 * bpf_spin_lock inside map element and in such cases if 9988 * the rest of the prog is valid for one map element then 9989 * it's valid for all map elements regardless of the key 9990 * used in bpf_map_lookup() 9991 */ 9992 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 9993 range_within(rold, rcur) && 9994 tnum_in(rold->var_off, rcur->var_off); 9995 case PTR_TO_MAP_VALUE_OR_NULL: 9996 /* a PTR_TO_MAP_VALUE could be safe to use as a 9997 * PTR_TO_MAP_VALUE_OR_NULL into the same map. 9998 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- 9999 * checked, doing so could have affected others with the same 10000 * id, and we can't check for that because we lost the id when 10001 * we converted to a PTR_TO_MAP_VALUE. 10002 */ 10003 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) 10004 return false; 10005 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) 10006 return false; 10007 /* Check our ids match any regs they're supposed to */ 10008 return check_ids(rold->id, rcur->id, idmap); 10009 case PTR_TO_PACKET_META: 10010 case PTR_TO_PACKET: 10011 if (rcur->type != rold->type) 10012 return false; 10013 /* We must have at least as much range as the old ptr 10014 * did, so that any accesses which were safe before are 10015 * still safe. This is true even if old range < old off, 10016 * since someone could have accessed through (ptr - k), or 10017 * even done ptr -= k in a register, to get a safe access. 10018 */ 10019 if (rold->range > rcur->range) 10020 return false; 10021 /* If the offsets don't match, we can't trust our alignment; 10022 * nor can we be sure that we won't fall out of range. 10023 */ 10024 if (rold->off != rcur->off) 10025 return false; 10026 /* id relations must be preserved */ 10027 if (rold->id && !check_ids(rold->id, rcur->id, idmap)) 10028 return false; 10029 /* new val must satisfy old val knowledge */ 10030 return range_within(rold, rcur) && 10031 tnum_in(rold->var_off, rcur->var_off); 10032 case PTR_TO_CTX: 10033 case CONST_PTR_TO_MAP: 10034 case PTR_TO_PACKET_END: 10035 case PTR_TO_FLOW_KEYS: 10036 case PTR_TO_SOCKET: 10037 case PTR_TO_SOCKET_OR_NULL: 10038 case PTR_TO_SOCK_COMMON: 10039 case PTR_TO_SOCK_COMMON_OR_NULL: 10040 case PTR_TO_TCP_SOCK: 10041 case PTR_TO_TCP_SOCK_OR_NULL: 10042 case PTR_TO_XDP_SOCK: 10043 /* Only valid matches are exact, which memcmp() above 10044 * would have accepted 10045 */ 10046 default: 10047 /* Don't know what's going on, just say it's not safe */ 10048 return false; 10049 } 10050 10051 /* Shouldn't get here; if we do, say it's not safe */ 10052 WARN_ON_ONCE(1); 10053 return false; 10054 } 10055 10056 static bool stacksafe(struct bpf_func_state *old, 10057 struct bpf_func_state *cur, 10058 struct bpf_id_pair *idmap) 10059 { 10060 int i, spi; 10061 10062 /* walk slots of the explored stack and ignore any additional 10063 * slots in the current stack, since explored(safe) state 10064 * didn't use them 10065 */ 10066 for (i = 0; i < old->allocated_stack; i++) { 10067 spi = i / BPF_REG_SIZE; 10068 10069 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) { 10070 i += BPF_REG_SIZE - 1; 10071 /* explored state didn't use this */ 10072 continue; 10073 } 10074 10075 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) 10076 continue; 10077 10078 /* explored stack has more populated slots than current stack 10079 * and these slots were used 10080 */ 10081 if (i >= cur->allocated_stack) 10082 return false; 10083 10084 /* if old state was safe with misc data in the stack 10085 * it will be safe with zero-initialized stack. 10086 * The opposite is not true 10087 */ 10088 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && 10089 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) 10090 continue; 10091 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != 10092 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) 10093 /* Ex: old explored (safe) state has STACK_SPILL in 10094 * this stack slot, but current has STACK_MISC -> 10095 * this verifier states are not equivalent, 10096 * return false to continue verification of this path 10097 */ 10098 return false; 10099 if (i % BPF_REG_SIZE) 10100 continue; 10101 if (old->stack[spi].slot_type[0] != STACK_SPILL) 10102 continue; 10103 if (!regsafe(&old->stack[spi].spilled_ptr, 10104 &cur->stack[spi].spilled_ptr, 10105 idmap)) 10106 /* when explored and current stack slot are both storing 10107 * spilled registers, check that stored pointers types 10108 * are the same as well. 10109 * Ex: explored safe path could have stored 10110 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} 10111 * but current path has stored: 10112 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} 10113 * such verifier states are not equivalent. 10114 * return false to continue verification of this path 10115 */ 10116 return false; 10117 } 10118 return true; 10119 } 10120 10121 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur) 10122 { 10123 if (old->acquired_refs != cur->acquired_refs) 10124 return false; 10125 return !memcmp(old->refs, cur->refs, 10126 sizeof(*old->refs) * old->acquired_refs); 10127 } 10128 10129 /* compare two verifier states 10130 * 10131 * all states stored in state_list are known to be valid, since 10132 * verifier reached 'bpf_exit' instruction through them 10133 * 10134 * this function is called when verifier exploring different branches of 10135 * execution popped from the state stack. If it sees an old state that has 10136 * more strict register state and more strict stack state then this execution 10137 * branch doesn't need to be explored further, since verifier already 10138 * concluded that more strict state leads to valid finish. 10139 * 10140 * Therefore two states are equivalent if register state is more conservative 10141 * and explored stack state is more conservative than the current one. 10142 * Example: 10143 * explored current 10144 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 10145 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 10146 * 10147 * In other words if current stack state (one being explored) has more 10148 * valid slots than old one that already passed validation, it means 10149 * the verifier can stop exploring and conclude that current state is valid too 10150 * 10151 * Similarly with registers. If explored state has register type as invalid 10152 * whereas register type in current state is meaningful, it means that 10153 * the current state will reach 'bpf_exit' instruction safely 10154 */ 10155 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old, 10156 struct bpf_func_state *cur) 10157 { 10158 int i; 10159 10160 memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch)); 10161 for (i = 0; i < MAX_BPF_REG; i++) 10162 if (!regsafe(&old->regs[i], &cur->regs[i], env->idmap_scratch)) 10163 return false; 10164 10165 if (!stacksafe(old, cur, env->idmap_scratch)) 10166 return false; 10167 10168 if (!refsafe(old, cur)) 10169 return false; 10170 10171 return true; 10172 } 10173 10174 static bool states_equal(struct bpf_verifier_env *env, 10175 struct bpf_verifier_state *old, 10176 struct bpf_verifier_state *cur) 10177 { 10178 int i; 10179 10180 if (old->curframe != cur->curframe) 10181 return false; 10182 10183 /* Verification state from speculative execution simulation 10184 * must never prune a non-speculative execution one. 10185 */ 10186 if (old->speculative && !cur->speculative) 10187 return false; 10188 10189 if (old->active_spin_lock != cur->active_spin_lock) 10190 return false; 10191 10192 /* for states to be equal callsites have to be the same 10193 * and all frame states need to be equivalent 10194 */ 10195 for (i = 0; i <= old->curframe; i++) { 10196 if (old->frame[i]->callsite != cur->frame[i]->callsite) 10197 return false; 10198 if (!func_states_equal(env, old->frame[i], cur->frame[i])) 10199 return false; 10200 } 10201 return true; 10202 } 10203 10204 /* Return 0 if no propagation happened. Return negative error code if error 10205 * happened. Otherwise, return the propagated bit. 10206 */ 10207 static int propagate_liveness_reg(struct bpf_verifier_env *env, 10208 struct bpf_reg_state *reg, 10209 struct bpf_reg_state *parent_reg) 10210 { 10211 u8 parent_flag = parent_reg->live & REG_LIVE_READ; 10212 u8 flag = reg->live & REG_LIVE_READ; 10213 int err; 10214 10215 /* When comes here, read flags of PARENT_REG or REG could be any of 10216 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need 10217 * of propagation if PARENT_REG has strongest REG_LIVE_READ64. 10218 */ 10219 if (parent_flag == REG_LIVE_READ64 || 10220 /* Or if there is no read flag from REG. */ 10221 !flag || 10222 /* Or if the read flag from REG is the same as PARENT_REG. */ 10223 parent_flag == flag) 10224 return 0; 10225 10226 err = mark_reg_read(env, reg, parent_reg, flag); 10227 if (err) 10228 return err; 10229 10230 return flag; 10231 } 10232 10233 /* A write screens off any subsequent reads; but write marks come from the 10234 * straight-line code between a state and its parent. When we arrive at an 10235 * equivalent state (jump target or such) we didn't arrive by the straight-line 10236 * code, so read marks in the state must propagate to the parent regardless 10237 * of the state's write marks. That's what 'parent == state->parent' comparison 10238 * in mark_reg_read() is for. 10239 */ 10240 static int propagate_liveness(struct bpf_verifier_env *env, 10241 const struct bpf_verifier_state *vstate, 10242 struct bpf_verifier_state *vparent) 10243 { 10244 struct bpf_reg_state *state_reg, *parent_reg; 10245 struct bpf_func_state *state, *parent; 10246 int i, frame, err = 0; 10247 10248 if (vparent->curframe != vstate->curframe) { 10249 WARN(1, "propagate_live: parent frame %d current frame %d\n", 10250 vparent->curframe, vstate->curframe); 10251 return -EFAULT; 10252 } 10253 /* Propagate read liveness of registers... */ 10254 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 10255 for (frame = 0; frame <= vstate->curframe; frame++) { 10256 parent = vparent->frame[frame]; 10257 state = vstate->frame[frame]; 10258 parent_reg = parent->regs; 10259 state_reg = state->regs; 10260 /* We don't need to worry about FP liveness, it's read-only */ 10261 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { 10262 err = propagate_liveness_reg(env, &state_reg[i], 10263 &parent_reg[i]); 10264 if (err < 0) 10265 return err; 10266 if (err == REG_LIVE_READ64) 10267 mark_insn_zext(env, &parent_reg[i]); 10268 } 10269 10270 /* Propagate stack slots. */ 10271 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && 10272 i < parent->allocated_stack / BPF_REG_SIZE; i++) { 10273 parent_reg = &parent->stack[i].spilled_ptr; 10274 state_reg = &state->stack[i].spilled_ptr; 10275 err = propagate_liveness_reg(env, state_reg, 10276 parent_reg); 10277 if (err < 0) 10278 return err; 10279 } 10280 } 10281 return 0; 10282 } 10283 10284 /* find precise scalars in the previous equivalent state and 10285 * propagate them into the current state 10286 */ 10287 static int propagate_precision(struct bpf_verifier_env *env, 10288 const struct bpf_verifier_state *old) 10289 { 10290 struct bpf_reg_state *state_reg; 10291 struct bpf_func_state *state; 10292 int i, err = 0; 10293 10294 state = old->frame[old->curframe]; 10295 state_reg = state->regs; 10296 for (i = 0; i < BPF_REG_FP; i++, state_reg++) { 10297 if (state_reg->type != SCALAR_VALUE || 10298 !state_reg->precise) 10299 continue; 10300 if (env->log.level & BPF_LOG_LEVEL2) 10301 verbose(env, "propagating r%d\n", i); 10302 err = mark_chain_precision(env, i); 10303 if (err < 0) 10304 return err; 10305 } 10306 10307 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 10308 if (state->stack[i].slot_type[0] != STACK_SPILL) 10309 continue; 10310 state_reg = &state->stack[i].spilled_ptr; 10311 if (state_reg->type != SCALAR_VALUE || 10312 !state_reg->precise) 10313 continue; 10314 if (env->log.level & BPF_LOG_LEVEL2) 10315 verbose(env, "propagating fp%d\n", 10316 (-i - 1) * BPF_REG_SIZE); 10317 err = mark_chain_precision_stack(env, i); 10318 if (err < 0) 10319 return err; 10320 } 10321 return 0; 10322 } 10323 10324 static bool states_maybe_looping(struct bpf_verifier_state *old, 10325 struct bpf_verifier_state *cur) 10326 { 10327 struct bpf_func_state *fold, *fcur; 10328 int i, fr = cur->curframe; 10329 10330 if (old->curframe != fr) 10331 return false; 10332 10333 fold = old->frame[fr]; 10334 fcur = cur->frame[fr]; 10335 for (i = 0; i < MAX_BPF_REG; i++) 10336 if (memcmp(&fold->regs[i], &fcur->regs[i], 10337 offsetof(struct bpf_reg_state, parent))) 10338 return false; 10339 return true; 10340 } 10341 10342 10343 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) 10344 { 10345 struct bpf_verifier_state_list *new_sl; 10346 struct bpf_verifier_state_list *sl, **pprev; 10347 struct bpf_verifier_state *cur = env->cur_state, *new; 10348 int i, j, err, states_cnt = 0; 10349 bool add_new_state = env->test_state_freq ? true : false; 10350 10351 cur->last_insn_idx = env->prev_insn_idx; 10352 if (!env->insn_aux_data[insn_idx].prune_point) 10353 /* this 'insn_idx' instruction wasn't marked, so we will not 10354 * be doing state search here 10355 */ 10356 return 0; 10357 10358 /* bpf progs typically have pruning point every 4 instructions 10359 * http://vger.kernel.org/bpfconf2019.html#session-1 10360 * Do not add new state for future pruning if the verifier hasn't seen 10361 * at least 2 jumps and at least 8 instructions. 10362 * This heuristics helps decrease 'total_states' and 'peak_states' metric. 10363 * In tests that amounts to up to 50% reduction into total verifier 10364 * memory consumption and 20% verifier time speedup. 10365 */ 10366 if (env->jmps_processed - env->prev_jmps_processed >= 2 && 10367 env->insn_processed - env->prev_insn_processed >= 8) 10368 add_new_state = true; 10369 10370 pprev = explored_state(env, insn_idx); 10371 sl = *pprev; 10372 10373 clean_live_states(env, insn_idx, cur); 10374 10375 while (sl) { 10376 states_cnt++; 10377 if (sl->state.insn_idx != insn_idx) 10378 goto next; 10379 if (sl->state.branches) { 10380 if (states_maybe_looping(&sl->state, cur) && 10381 states_equal(env, &sl->state, cur)) { 10382 verbose_linfo(env, insn_idx, "; "); 10383 verbose(env, "infinite loop detected at insn %d\n", insn_idx); 10384 return -EINVAL; 10385 } 10386 /* if the verifier is processing a loop, avoid adding new state 10387 * too often, since different loop iterations have distinct 10388 * states and may not help future pruning. 10389 * This threshold shouldn't be too low to make sure that 10390 * a loop with large bound will be rejected quickly. 10391 * The most abusive loop will be: 10392 * r1 += 1 10393 * if r1 < 1000000 goto pc-2 10394 * 1M insn_procssed limit / 100 == 10k peak states. 10395 * This threshold shouldn't be too high either, since states 10396 * at the end of the loop are likely to be useful in pruning. 10397 */ 10398 if (env->jmps_processed - env->prev_jmps_processed < 20 && 10399 env->insn_processed - env->prev_insn_processed < 100) 10400 add_new_state = false; 10401 goto miss; 10402 } 10403 if (states_equal(env, &sl->state, cur)) { 10404 sl->hit_cnt++; 10405 /* reached equivalent register/stack state, 10406 * prune the search. 10407 * Registers read by the continuation are read by us. 10408 * If we have any write marks in env->cur_state, they 10409 * will prevent corresponding reads in the continuation 10410 * from reaching our parent (an explored_state). Our 10411 * own state will get the read marks recorded, but 10412 * they'll be immediately forgotten as we're pruning 10413 * this state and will pop a new one. 10414 */ 10415 err = propagate_liveness(env, &sl->state, cur); 10416 10417 /* if previous state reached the exit with precision and 10418 * current state is equivalent to it (except precsion marks) 10419 * the precision needs to be propagated back in 10420 * the current state. 10421 */ 10422 err = err ? : push_jmp_history(env, cur); 10423 err = err ? : propagate_precision(env, &sl->state); 10424 if (err) 10425 return err; 10426 return 1; 10427 } 10428 miss: 10429 /* when new state is not going to be added do not increase miss count. 10430 * Otherwise several loop iterations will remove the state 10431 * recorded earlier. The goal of these heuristics is to have 10432 * states from some iterations of the loop (some in the beginning 10433 * and some at the end) to help pruning. 10434 */ 10435 if (add_new_state) 10436 sl->miss_cnt++; 10437 /* heuristic to determine whether this state is beneficial 10438 * to keep checking from state equivalence point of view. 10439 * Higher numbers increase max_states_per_insn and verification time, 10440 * but do not meaningfully decrease insn_processed. 10441 */ 10442 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) { 10443 /* the state is unlikely to be useful. Remove it to 10444 * speed up verification 10445 */ 10446 *pprev = sl->next; 10447 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) { 10448 u32 br = sl->state.branches; 10449 10450 WARN_ONCE(br, 10451 "BUG live_done but branches_to_explore %d\n", 10452 br); 10453 free_verifier_state(&sl->state, false); 10454 kfree(sl); 10455 env->peak_states--; 10456 } else { 10457 /* cannot free this state, since parentage chain may 10458 * walk it later. Add it for free_list instead to 10459 * be freed at the end of verification 10460 */ 10461 sl->next = env->free_list; 10462 env->free_list = sl; 10463 } 10464 sl = *pprev; 10465 continue; 10466 } 10467 next: 10468 pprev = &sl->next; 10469 sl = *pprev; 10470 } 10471 10472 if (env->max_states_per_insn < states_cnt) 10473 env->max_states_per_insn = states_cnt; 10474 10475 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) 10476 return push_jmp_history(env, cur); 10477 10478 if (!add_new_state) 10479 return push_jmp_history(env, cur); 10480 10481 /* There were no equivalent states, remember the current one. 10482 * Technically the current state is not proven to be safe yet, 10483 * but it will either reach outer most bpf_exit (which means it's safe) 10484 * or it will be rejected. When there are no loops the verifier won't be 10485 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) 10486 * again on the way to bpf_exit. 10487 * When looping the sl->state.branches will be > 0 and this state 10488 * will not be considered for equivalence until branches == 0. 10489 */ 10490 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); 10491 if (!new_sl) 10492 return -ENOMEM; 10493 env->total_states++; 10494 env->peak_states++; 10495 env->prev_jmps_processed = env->jmps_processed; 10496 env->prev_insn_processed = env->insn_processed; 10497 10498 /* add new state to the head of linked list */ 10499 new = &new_sl->state; 10500 err = copy_verifier_state(new, cur); 10501 if (err) { 10502 free_verifier_state(new, false); 10503 kfree(new_sl); 10504 return err; 10505 } 10506 new->insn_idx = insn_idx; 10507 WARN_ONCE(new->branches != 1, 10508 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); 10509 10510 cur->parent = new; 10511 cur->first_insn_idx = insn_idx; 10512 clear_jmp_history(cur); 10513 new_sl->next = *explored_state(env, insn_idx); 10514 *explored_state(env, insn_idx) = new_sl; 10515 /* connect new state to parentage chain. Current frame needs all 10516 * registers connected. Only r6 - r9 of the callers are alive (pushed 10517 * to the stack implicitly by JITs) so in callers' frames connect just 10518 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to 10519 * the state of the call instruction (with WRITTEN set), and r0 comes 10520 * from callee with its full parentage chain, anyway. 10521 */ 10522 /* clear write marks in current state: the writes we did are not writes 10523 * our child did, so they don't screen off its reads from us. 10524 * (There are no read marks in current state, because reads always mark 10525 * their parent and current state never has children yet. Only 10526 * explored_states can get read marks.) 10527 */ 10528 for (j = 0; j <= cur->curframe; j++) { 10529 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) 10530 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; 10531 for (i = 0; i < BPF_REG_FP; i++) 10532 cur->frame[j]->regs[i].live = REG_LIVE_NONE; 10533 } 10534 10535 /* all stack frames are accessible from callee, clear them all */ 10536 for (j = 0; j <= cur->curframe; j++) { 10537 struct bpf_func_state *frame = cur->frame[j]; 10538 struct bpf_func_state *newframe = new->frame[j]; 10539 10540 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { 10541 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; 10542 frame->stack[i].spilled_ptr.parent = 10543 &newframe->stack[i].spilled_ptr; 10544 } 10545 } 10546 return 0; 10547 } 10548 10549 /* Return true if it's OK to have the same insn return a different type. */ 10550 static bool reg_type_mismatch_ok(enum bpf_reg_type type) 10551 { 10552 switch (type) { 10553 case PTR_TO_CTX: 10554 case PTR_TO_SOCKET: 10555 case PTR_TO_SOCKET_OR_NULL: 10556 case PTR_TO_SOCK_COMMON: 10557 case PTR_TO_SOCK_COMMON_OR_NULL: 10558 case PTR_TO_TCP_SOCK: 10559 case PTR_TO_TCP_SOCK_OR_NULL: 10560 case PTR_TO_XDP_SOCK: 10561 case PTR_TO_BTF_ID: 10562 case PTR_TO_BTF_ID_OR_NULL: 10563 return false; 10564 default: 10565 return true; 10566 } 10567 } 10568 10569 /* If an instruction was previously used with particular pointer types, then we 10570 * need to be careful to avoid cases such as the below, where it may be ok 10571 * for one branch accessing the pointer, but not ok for the other branch: 10572 * 10573 * R1 = sock_ptr 10574 * goto X; 10575 * ... 10576 * R1 = some_other_valid_ptr; 10577 * goto X; 10578 * ... 10579 * R2 = *(u32 *)(R1 + 0); 10580 */ 10581 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) 10582 { 10583 return src != prev && (!reg_type_mismatch_ok(src) || 10584 !reg_type_mismatch_ok(prev)); 10585 } 10586 10587 static int do_check(struct bpf_verifier_env *env) 10588 { 10589 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); 10590 struct bpf_verifier_state *state = env->cur_state; 10591 struct bpf_insn *insns = env->prog->insnsi; 10592 struct bpf_reg_state *regs; 10593 int insn_cnt = env->prog->len; 10594 bool do_print_state = false; 10595 int prev_insn_idx = -1; 10596 10597 for (;;) { 10598 struct bpf_insn *insn; 10599 u8 class; 10600 int err; 10601 10602 env->prev_insn_idx = prev_insn_idx; 10603 if (env->insn_idx >= insn_cnt) { 10604 verbose(env, "invalid insn idx %d insn_cnt %d\n", 10605 env->insn_idx, insn_cnt); 10606 return -EFAULT; 10607 } 10608 10609 insn = &insns[env->insn_idx]; 10610 class = BPF_CLASS(insn->code); 10611 10612 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 10613 verbose(env, 10614 "BPF program is too large. Processed %d insn\n", 10615 env->insn_processed); 10616 return -E2BIG; 10617 } 10618 10619 err = is_state_visited(env, env->insn_idx); 10620 if (err < 0) 10621 return err; 10622 if (err == 1) { 10623 /* found equivalent state, can prune the search */ 10624 if (env->log.level & BPF_LOG_LEVEL) { 10625 if (do_print_state) 10626 verbose(env, "\nfrom %d to %d%s: safe\n", 10627 env->prev_insn_idx, env->insn_idx, 10628 env->cur_state->speculative ? 10629 " (speculative execution)" : ""); 10630 else 10631 verbose(env, "%d: safe\n", env->insn_idx); 10632 } 10633 goto process_bpf_exit; 10634 } 10635 10636 if (signal_pending(current)) 10637 return -EAGAIN; 10638 10639 if (need_resched()) 10640 cond_resched(); 10641 10642 if (env->log.level & BPF_LOG_LEVEL2 || 10643 (env->log.level & BPF_LOG_LEVEL && do_print_state)) { 10644 if (env->log.level & BPF_LOG_LEVEL2) 10645 verbose(env, "%d:", env->insn_idx); 10646 else 10647 verbose(env, "\nfrom %d to %d%s:", 10648 env->prev_insn_idx, env->insn_idx, 10649 env->cur_state->speculative ? 10650 " (speculative execution)" : ""); 10651 print_verifier_state(env, state->frame[state->curframe]); 10652 do_print_state = false; 10653 } 10654 10655 if (env->log.level & BPF_LOG_LEVEL) { 10656 const struct bpf_insn_cbs cbs = { 10657 .cb_call = disasm_kfunc_name, 10658 .cb_print = verbose, 10659 .private_data = env, 10660 }; 10661 10662 verbose_linfo(env, env->insn_idx, "; "); 10663 verbose(env, "%d: ", env->insn_idx); 10664 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 10665 } 10666 10667 if (bpf_prog_is_dev_bound(env->prog->aux)) { 10668 err = bpf_prog_offload_verify_insn(env, env->insn_idx, 10669 env->prev_insn_idx); 10670 if (err) 10671 return err; 10672 } 10673 10674 regs = cur_regs(env); 10675 sanitize_mark_insn_seen(env); 10676 prev_insn_idx = env->insn_idx; 10677 10678 if (class == BPF_ALU || class == BPF_ALU64) { 10679 err = check_alu_op(env, insn); 10680 if (err) 10681 return err; 10682 10683 } else if (class == BPF_LDX) { 10684 enum bpf_reg_type *prev_src_type, src_reg_type; 10685 10686 /* check for reserved fields is already done */ 10687 10688 /* check src operand */ 10689 err = check_reg_arg(env, insn->src_reg, SRC_OP); 10690 if (err) 10691 return err; 10692 10693 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 10694 if (err) 10695 return err; 10696 10697 src_reg_type = regs[insn->src_reg].type; 10698 10699 /* check that memory (src_reg + off) is readable, 10700 * the state of dst_reg will be updated by this func 10701 */ 10702 err = check_mem_access(env, env->insn_idx, insn->src_reg, 10703 insn->off, BPF_SIZE(insn->code), 10704 BPF_READ, insn->dst_reg, false); 10705 if (err) 10706 return err; 10707 10708 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type; 10709 10710 if (*prev_src_type == NOT_INIT) { 10711 /* saw a valid insn 10712 * dst_reg = *(u32 *)(src_reg + off) 10713 * save type to validate intersecting paths 10714 */ 10715 *prev_src_type = src_reg_type; 10716 10717 } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) { 10718 /* ABuser program is trying to use the same insn 10719 * dst_reg = *(u32*) (src_reg + off) 10720 * with different pointer types: 10721 * src_reg == ctx in one branch and 10722 * src_reg == stack|map in some other branch. 10723 * Reject it. 10724 */ 10725 verbose(env, "same insn cannot be used with different pointers\n"); 10726 return -EINVAL; 10727 } 10728 10729 } else if (class == BPF_STX) { 10730 enum bpf_reg_type *prev_dst_type, dst_reg_type; 10731 10732 if (BPF_MODE(insn->code) == BPF_ATOMIC) { 10733 err = check_atomic(env, env->insn_idx, insn); 10734 if (err) 10735 return err; 10736 env->insn_idx++; 10737 continue; 10738 } 10739 10740 if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) { 10741 verbose(env, "BPF_STX uses reserved fields\n"); 10742 return -EINVAL; 10743 } 10744 10745 /* check src1 operand */ 10746 err = check_reg_arg(env, insn->src_reg, SRC_OP); 10747 if (err) 10748 return err; 10749 /* check src2 operand */ 10750 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 10751 if (err) 10752 return err; 10753 10754 dst_reg_type = regs[insn->dst_reg].type; 10755 10756 /* check that memory (dst_reg + off) is writeable */ 10757 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 10758 insn->off, BPF_SIZE(insn->code), 10759 BPF_WRITE, insn->src_reg, false); 10760 if (err) 10761 return err; 10762 10763 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type; 10764 10765 if (*prev_dst_type == NOT_INIT) { 10766 *prev_dst_type = dst_reg_type; 10767 } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) { 10768 verbose(env, "same insn cannot be used with different pointers\n"); 10769 return -EINVAL; 10770 } 10771 10772 } else if (class == BPF_ST) { 10773 if (BPF_MODE(insn->code) != BPF_MEM || 10774 insn->src_reg != BPF_REG_0) { 10775 verbose(env, "BPF_ST uses reserved fields\n"); 10776 return -EINVAL; 10777 } 10778 /* check src operand */ 10779 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 10780 if (err) 10781 return err; 10782 10783 if (is_ctx_reg(env, insn->dst_reg)) { 10784 verbose(env, "BPF_ST stores into R%d %s is not allowed\n", 10785 insn->dst_reg, 10786 reg_type_str[reg_state(env, insn->dst_reg)->type]); 10787 return -EACCES; 10788 } 10789 10790 /* check that memory (dst_reg + off) is writeable */ 10791 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 10792 insn->off, BPF_SIZE(insn->code), 10793 BPF_WRITE, -1, false); 10794 if (err) 10795 return err; 10796 10797 } else if (class == BPF_JMP || class == BPF_JMP32) { 10798 u8 opcode = BPF_OP(insn->code); 10799 10800 env->jmps_processed++; 10801 if (opcode == BPF_CALL) { 10802 if (BPF_SRC(insn->code) != BPF_K || 10803 insn->off != 0 || 10804 (insn->src_reg != BPF_REG_0 && 10805 insn->src_reg != BPF_PSEUDO_CALL && 10806 insn->src_reg != BPF_PSEUDO_KFUNC_CALL) || 10807 insn->dst_reg != BPF_REG_0 || 10808 class == BPF_JMP32) { 10809 verbose(env, "BPF_CALL uses reserved fields\n"); 10810 return -EINVAL; 10811 } 10812 10813 if (env->cur_state->active_spin_lock && 10814 (insn->src_reg == BPF_PSEUDO_CALL || 10815 insn->imm != BPF_FUNC_spin_unlock)) { 10816 verbose(env, "function calls are not allowed while holding a lock\n"); 10817 return -EINVAL; 10818 } 10819 if (insn->src_reg == BPF_PSEUDO_CALL) 10820 err = check_func_call(env, insn, &env->insn_idx); 10821 else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) 10822 err = check_kfunc_call(env, insn); 10823 else 10824 err = check_helper_call(env, insn, &env->insn_idx); 10825 if (err) 10826 return err; 10827 } else if (opcode == BPF_JA) { 10828 if (BPF_SRC(insn->code) != BPF_K || 10829 insn->imm != 0 || 10830 insn->src_reg != BPF_REG_0 || 10831 insn->dst_reg != BPF_REG_0 || 10832 class == BPF_JMP32) { 10833 verbose(env, "BPF_JA uses reserved fields\n"); 10834 return -EINVAL; 10835 } 10836 10837 env->insn_idx += insn->off + 1; 10838 continue; 10839 10840 } else if (opcode == BPF_EXIT) { 10841 if (BPF_SRC(insn->code) != BPF_K || 10842 insn->imm != 0 || 10843 insn->src_reg != BPF_REG_0 || 10844 insn->dst_reg != BPF_REG_0 || 10845 class == BPF_JMP32) { 10846 verbose(env, "BPF_EXIT uses reserved fields\n"); 10847 return -EINVAL; 10848 } 10849 10850 if (env->cur_state->active_spin_lock) { 10851 verbose(env, "bpf_spin_unlock is missing\n"); 10852 return -EINVAL; 10853 } 10854 10855 if (state->curframe) { 10856 /* exit from nested function */ 10857 err = prepare_func_exit(env, &env->insn_idx); 10858 if (err) 10859 return err; 10860 do_print_state = true; 10861 continue; 10862 } 10863 10864 err = check_reference_leak(env); 10865 if (err) 10866 return err; 10867 10868 err = check_return_code(env); 10869 if (err) 10870 return err; 10871 process_bpf_exit: 10872 update_branch_counts(env, env->cur_state); 10873 err = pop_stack(env, &prev_insn_idx, 10874 &env->insn_idx, pop_log); 10875 if (err < 0) { 10876 if (err != -ENOENT) 10877 return err; 10878 break; 10879 } else { 10880 do_print_state = true; 10881 continue; 10882 } 10883 } else { 10884 err = check_cond_jmp_op(env, insn, &env->insn_idx); 10885 if (err) 10886 return err; 10887 } 10888 } else if (class == BPF_LD) { 10889 u8 mode = BPF_MODE(insn->code); 10890 10891 if (mode == BPF_ABS || mode == BPF_IND) { 10892 err = check_ld_abs(env, insn); 10893 if (err) 10894 return err; 10895 10896 } else if (mode == BPF_IMM) { 10897 err = check_ld_imm(env, insn); 10898 if (err) 10899 return err; 10900 10901 env->insn_idx++; 10902 sanitize_mark_insn_seen(env); 10903 } else { 10904 verbose(env, "invalid BPF_LD mode\n"); 10905 return -EINVAL; 10906 } 10907 } else { 10908 verbose(env, "unknown insn class %d\n", class); 10909 return -EINVAL; 10910 } 10911 10912 env->insn_idx++; 10913 } 10914 10915 return 0; 10916 } 10917 10918 static int find_btf_percpu_datasec(struct btf *btf) 10919 { 10920 const struct btf_type *t; 10921 const char *tname; 10922 int i, n; 10923 10924 /* 10925 * Both vmlinux and module each have their own ".data..percpu" 10926 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF 10927 * types to look at only module's own BTF types. 10928 */ 10929 n = btf_nr_types(btf); 10930 if (btf_is_module(btf)) 10931 i = btf_nr_types(btf_vmlinux); 10932 else 10933 i = 1; 10934 10935 for(; i < n; i++) { 10936 t = btf_type_by_id(btf, i); 10937 if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC) 10938 continue; 10939 10940 tname = btf_name_by_offset(btf, t->name_off); 10941 if (!strcmp(tname, ".data..percpu")) 10942 return i; 10943 } 10944 10945 return -ENOENT; 10946 } 10947 10948 /* replace pseudo btf_id with kernel symbol address */ 10949 static int check_pseudo_btf_id(struct bpf_verifier_env *env, 10950 struct bpf_insn *insn, 10951 struct bpf_insn_aux_data *aux) 10952 { 10953 const struct btf_var_secinfo *vsi; 10954 const struct btf_type *datasec; 10955 struct btf_mod_pair *btf_mod; 10956 const struct btf_type *t; 10957 const char *sym_name; 10958 bool percpu = false; 10959 u32 type, id = insn->imm; 10960 struct btf *btf; 10961 s32 datasec_id; 10962 u64 addr; 10963 int i, btf_fd, err; 10964 10965 btf_fd = insn[1].imm; 10966 if (btf_fd) { 10967 btf = btf_get_by_fd(btf_fd); 10968 if (IS_ERR(btf)) { 10969 verbose(env, "invalid module BTF object FD specified.\n"); 10970 return -EINVAL; 10971 } 10972 } else { 10973 if (!btf_vmlinux) { 10974 verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n"); 10975 return -EINVAL; 10976 } 10977 btf = btf_vmlinux; 10978 btf_get(btf); 10979 } 10980 10981 t = btf_type_by_id(btf, id); 10982 if (!t) { 10983 verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id); 10984 err = -ENOENT; 10985 goto err_put; 10986 } 10987 10988 if (!btf_type_is_var(t)) { 10989 verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", id); 10990 err = -EINVAL; 10991 goto err_put; 10992 } 10993 10994 sym_name = btf_name_by_offset(btf, t->name_off); 10995 addr = kallsyms_lookup_name(sym_name); 10996 if (!addr) { 10997 verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n", 10998 sym_name); 10999 err = -ENOENT; 11000 goto err_put; 11001 } 11002 11003 datasec_id = find_btf_percpu_datasec(btf); 11004 if (datasec_id > 0) { 11005 datasec = btf_type_by_id(btf, datasec_id); 11006 for_each_vsi(i, datasec, vsi) { 11007 if (vsi->type == id) { 11008 percpu = true; 11009 break; 11010 } 11011 } 11012 } 11013 11014 insn[0].imm = (u32)addr; 11015 insn[1].imm = addr >> 32; 11016 11017 type = t->type; 11018 t = btf_type_skip_modifiers(btf, type, NULL); 11019 if (percpu) { 11020 aux->btf_var.reg_type = PTR_TO_PERCPU_BTF_ID; 11021 aux->btf_var.btf = btf; 11022 aux->btf_var.btf_id = type; 11023 } else if (!btf_type_is_struct(t)) { 11024 const struct btf_type *ret; 11025 const char *tname; 11026 u32 tsize; 11027 11028 /* resolve the type size of ksym. */ 11029 ret = btf_resolve_size(btf, t, &tsize); 11030 if (IS_ERR(ret)) { 11031 tname = btf_name_by_offset(btf, t->name_off); 11032 verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n", 11033 tname, PTR_ERR(ret)); 11034 err = -EINVAL; 11035 goto err_put; 11036 } 11037 aux->btf_var.reg_type = PTR_TO_MEM; 11038 aux->btf_var.mem_size = tsize; 11039 } else { 11040 aux->btf_var.reg_type = PTR_TO_BTF_ID; 11041 aux->btf_var.btf = btf; 11042 aux->btf_var.btf_id = type; 11043 } 11044 11045 /* check whether we recorded this BTF (and maybe module) already */ 11046 for (i = 0; i < env->used_btf_cnt; i++) { 11047 if (env->used_btfs[i].btf == btf) { 11048 btf_put(btf); 11049 return 0; 11050 } 11051 } 11052 11053 if (env->used_btf_cnt >= MAX_USED_BTFS) { 11054 err = -E2BIG; 11055 goto err_put; 11056 } 11057 11058 btf_mod = &env->used_btfs[env->used_btf_cnt]; 11059 btf_mod->btf = btf; 11060 btf_mod->module = NULL; 11061 11062 /* if we reference variables from kernel module, bump its refcount */ 11063 if (btf_is_module(btf)) { 11064 btf_mod->module = btf_try_get_module(btf); 11065 if (!btf_mod->module) { 11066 err = -ENXIO; 11067 goto err_put; 11068 } 11069 } 11070 11071 env->used_btf_cnt++; 11072 11073 return 0; 11074 err_put: 11075 btf_put(btf); 11076 return err; 11077 } 11078 11079 static int check_map_prealloc(struct bpf_map *map) 11080 { 11081 return (map->map_type != BPF_MAP_TYPE_HASH && 11082 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 11083 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || 11084 !(map->map_flags & BPF_F_NO_PREALLOC); 11085 } 11086 11087 static bool is_tracing_prog_type(enum bpf_prog_type type) 11088 { 11089 switch (type) { 11090 case BPF_PROG_TYPE_KPROBE: 11091 case BPF_PROG_TYPE_TRACEPOINT: 11092 case BPF_PROG_TYPE_PERF_EVENT: 11093 case BPF_PROG_TYPE_RAW_TRACEPOINT: 11094 return true; 11095 default: 11096 return false; 11097 } 11098 } 11099 11100 static bool is_preallocated_map(struct bpf_map *map) 11101 { 11102 if (!check_map_prealloc(map)) 11103 return false; 11104 if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta)) 11105 return false; 11106 return true; 11107 } 11108 11109 static int check_map_prog_compatibility(struct bpf_verifier_env *env, 11110 struct bpf_map *map, 11111 struct bpf_prog *prog) 11112 11113 { 11114 enum bpf_prog_type prog_type = resolve_prog_type(prog); 11115 /* 11116 * Validate that trace type programs use preallocated hash maps. 11117 * 11118 * For programs attached to PERF events this is mandatory as the 11119 * perf NMI can hit any arbitrary code sequence. 11120 * 11121 * All other trace types using preallocated hash maps are unsafe as 11122 * well because tracepoint or kprobes can be inside locked regions 11123 * of the memory allocator or at a place where a recursion into the 11124 * memory allocator would see inconsistent state. 11125 * 11126 * On RT enabled kernels run-time allocation of all trace type 11127 * programs is strictly prohibited due to lock type constraints. On 11128 * !RT kernels it is allowed for backwards compatibility reasons for 11129 * now, but warnings are emitted so developers are made aware of 11130 * the unsafety and can fix their programs before this is enforced. 11131 */ 11132 if (is_tracing_prog_type(prog_type) && !is_preallocated_map(map)) { 11133 if (prog_type == BPF_PROG_TYPE_PERF_EVENT) { 11134 verbose(env, "perf_event programs can only use preallocated hash map\n"); 11135 return -EINVAL; 11136 } 11137 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 11138 verbose(env, "trace type programs can only use preallocated hash map\n"); 11139 return -EINVAL; 11140 } 11141 WARN_ONCE(1, "trace type BPF program uses run-time allocation\n"); 11142 verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n"); 11143 } 11144 11145 if (map_value_has_spin_lock(map)) { 11146 if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) { 11147 verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n"); 11148 return -EINVAL; 11149 } 11150 11151 if (is_tracing_prog_type(prog_type)) { 11152 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); 11153 return -EINVAL; 11154 } 11155 11156 if (prog->aux->sleepable) { 11157 verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n"); 11158 return -EINVAL; 11159 } 11160 } 11161 11162 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && 11163 !bpf_offload_prog_map_match(prog, map)) { 11164 verbose(env, "offload device mismatch between prog and map\n"); 11165 return -EINVAL; 11166 } 11167 11168 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 11169 verbose(env, "bpf_struct_ops map cannot be used in prog\n"); 11170 return -EINVAL; 11171 } 11172 11173 if (prog->aux->sleepable) 11174 switch (map->map_type) { 11175 case BPF_MAP_TYPE_HASH: 11176 case BPF_MAP_TYPE_LRU_HASH: 11177 case BPF_MAP_TYPE_ARRAY: 11178 case BPF_MAP_TYPE_PERCPU_HASH: 11179 case BPF_MAP_TYPE_PERCPU_ARRAY: 11180 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 11181 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 11182 case BPF_MAP_TYPE_HASH_OF_MAPS: 11183 if (!is_preallocated_map(map)) { 11184 verbose(env, 11185 "Sleepable programs can only use preallocated maps\n"); 11186 return -EINVAL; 11187 } 11188 break; 11189 case BPF_MAP_TYPE_RINGBUF: 11190 break; 11191 default: 11192 verbose(env, 11193 "Sleepable programs can only use array, hash, and ringbuf maps\n"); 11194 return -EINVAL; 11195 } 11196 11197 return 0; 11198 } 11199 11200 static bool bpf_map_is_cgroup_storage(struct bpf_map *map) 11201 { 11202 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || 11203 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); 11204 } 11205 11206 /* find and rewrite pseudo imm in ld_imm64 instructions: 11207 * 11208 * 1. if it accesses map FD, replace it with actual map pointer. 11209 * 2. if it accesses btf_id of a VAR, replace it with pointer to the var. 11210 * 11211 * NOTE: btf_vmlinux is required for converting pseudo btf_id. 11212 */ 11213 static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) 11214 { 11215 struct bpf_insn *insn = env->prog->insnsi; 11216 int insn_cnt = env->prog->len; 11217 int i, j, err; 11218 11219 err = bpf_prog_calc_tag(env->prog); 11220 if (err) 11221 return err; 11222 11223 for (i = 0; i < insn_cnt; i++, insn++) { 11224 if (BPF_CLASS(insn->code) == BPF_LDX && 11225 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 11226 verbose(env, "BPF_LDX uses reserved fields\n"); 11227 return -EINVAL; 11228 } 11229 11230 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 11231 struct bpf_insn_aux_data *aux; 11232 struct bpf_map *map; 11233 struct fd f; 11234 u64 addr; 11235 u32 fd; 11236 11237 if (i == insn_cnt - 1 || insn[1].code != 0 || 11238 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 11239 insn[1].off != 0) { 11240 verbose(env, "invalid bpf_ld_imm64 insn\n"); 11241 return -EINVAL; 11242 } 11243 11244 if (insn[0].src_reg == 0) 11245 /* valid generic load 64-bit imm */ 11246 goto next_insn; 11247 11248 if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) { 11249 aux = &env->insn_aux_data[i]; 11250 err = check_pseudo_btf_id(env, insn, aux); 11251 if (err) 11252 return err; 11253 goto next_insn; 11254 } 11255 11256 if (insn[0].src_reg == BPF_PSEUDO_FUNC) { 11257 aux = &env->insn_aux_data[i]; 11258 aux->ptr_type = PTR_TO_FUNC; 11259 goto next_insn; 11260 } 11261 11262 /* In final convert_pseudo_ld_imm64() step, this is 11263 * converted into regular 64-bit imm load insn. 11264 */ 11265 switch (insn[0].src_reg) { 11266 case BPF_PSEUDO_MAP_VALUE: 11267 case BPF_PSEUDO_MAP_IDX_VALUE: 11268 break; 11269 case BPF_PSEUDO_MAP_FD: 11270 case BPF_PSEUDO_MAP_IDX: 11271 if (insn[1].imm == 0) 11272 break; 11273 fallthrough; 11274 default: 11275 verbose(env, "unrecognized bpf_ld_imm64 insn\n"); 11276 return -EINVAL; 11277 } 11278 11279 switch (insn[0].src_reg) { 11280 case BPF_PSEUDO_MAP_IDX_VALUE: 11281 case BPF_PSEUDO_MAP_IDX: 11282 if (bpfptr_is_null(env->fd_array)) { 11283 verbose(env, "fd_idx without fd_array is invalid\n"); 11284 return -EPROTO; 11285 } 11286 if (copy_from_bpfptr_offset(&fd, env->fd_array, 11287 insn[0].imm * sizeof(fd), 11288 sizeof(fd))) 11289 return -EFAULT; 11290 break; 11291 default: 11292 fd = insn[0].imm; 11293 break; 11294 } 11295 11296 f = fdget(fd); 11297 map = __bpf_map_get(f); 11298 if (IS_ERR(map)) { 11299 verbose(env, "fd %d is not pointing to valid bpf_map\n", 11300 insn[0].imm); 11301 return PTR_ERR(map); 11302 } 11303 11304 err = check_map_prog_compatibility(env, map, env->prog); 11305 if (err) { 11306 fdput(f); 11307 return err; 11308 } 11309 11310 aux = &env->insn_aux_data[i]; 11311 if (insn[0].src_reg == BPF_PSEUDO_MAP_FD || 11312 insn[0].src_reg == BPF_PSEUDO_MAP_IDX) { 11313 addr = (unsigned long)map; 11314 } else { 11315 u32 off = insn[1].imm; 11316 11317 if (off >= BPF_MAX_VAR_OFF) { 11318 verbose(env, "direct value offset of %u is not allowed\n", off); 11319 fdput(f); 11320 return -EINVAL; 11321 } 11322 11323 if (!map->ops->map_direct_value_addr) { 11324 verbose(env, "no direct value access support for this map type\n"); 11325 fdput(f); 11326 return -EINVAL; 11327 } 11328 11329 err = map->ops->map_direct_value_addr(map, &addr, off); 11330 if (err) { 11331 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n", 11332 map->value_size, off); 11333 fdput(f); 11334 return err; 11335 } 11336 11337 aux->map_off = off; 11338 addr += off; 11339 } 11340 11341 insn[0].imm = (u32)addr; 11342 insn[1].imm = addr >> 32; 11343 11344 /* check whether we recorded this map already */ 11345 for (j = 0; j < env->used_map_cnt; j++) { 11346 if (env->used_maps[j] == map) { 11347 aux->map_index = j; 11348 fdput(f); 11349 goto next_insn; 11350 } 11351 } 11352 11353 if (env->used_map_cnt >= MAX_USED_MAPS) { 11354 fdput(f); 11355 return -E2BIG; 11356 } 11357 11358 /* hold the map. If the program is rejected by verifier, 11359 * the map will be released by release_maps() or it 11360 * will be used by the valid program until it's unloaded 11361 * and all maps are released in free_used_maps() 11362 */ 11363 bpf_map_inc(map); 11364 11365 aux->map_index = env->used_map_cnt; 11366 env->used_maps[env->used_map_cnt++] = map; 11367 11368 if (bpf_map_is_cgroup_storage(map) && 11369 bpf_cgroup_storage_assign(env->prog->aux, map)) { 11370 verbose(env, "only one cgroup storage of each type is allowed\n"); 11371 fdput(f); 11372 return -EBUSY; 11373 } 11374 11375 fdput(f); 11376 next_insn: 11377 insn++; 11378 i++; 11379 continue; 11380 } 11381 11382 /* Basic sanity check before we invest more work here. */ 11383 if (!bpf_opcode_in_insntable(insn->code)) { 11384 verbose(env, "unknown opcode %02x\n", insn->code); 11385 return -EINVAL; 11386 } 11387 } 11388 11389 /* now all pseudo BPF_LD_IMM64 instructions load valid 11390 * 'struct bpf_map *' into a register instead of user map_fd. 11391 * These pointers will be used later by verifier to validate map access. 11392 */ 11393 return 0; 11394 } 11395 11396 /* drop refcnt of maps used by the rejected program */ 11397 static void release_maps(struct bpf_verifier_env *env) 11398 { 11399 __bpf_free_used_maps(env->prog->aux, env->used_maps, 11400 env->used_map_cnt); 11401 } 11402 11403 /* drop refcnt of maps used by the rejected program */ 11404 static void release_btfs(struct bpf_verifier_env *env) 11405 { 11406 __bpf_free_used_btfs(env->prog->aux, env->used_btfs, 11407 env->used_btf_cnt); 11408 } 11409 11410 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 11411 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) 11412 { 11413 struct bpf_insn *insn = env->prog->insnsi; 11414 int insn_cnt = env->prog->len; 11415 int i; 11416 11417 for (i = 0; i < insn_cnt; i++, insn++) { 11418 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) 11419 continue; 11420 if (insn->src_reg == BPF_PSEUDO_FUNC) 11421 continue; 11422 insn->src_reg = 0; 11423 } 11424 } 11425 11426 /* single env->prog->insni[off] instruction was replaced with the range 11427 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying 11428 * [0, off) and [off, end) to new locations, so the patched range stays zero 11429 */ 11430 static int adjust_insn_aux_data(struct bpf_verifier_env *env, 11431 struct bpf_prog *new_prog, u32 off, u32 cnt) 11432 { 11433 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; 11434 struct bpf_insn *insn = new_prog->insnsi; 11435 u32 old_seen = old_data[off].seen; 11436 u32 prog_len; 11437 int i; 11438 11439 /* aux info at OFF always needs adjustment, no matter fast path 11440 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the 11441 * original insn at old prog. 11442 */ 11443 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); 11444 11445 if (cnt == 1) 11446 return 0; 11447 prog_len = new_prog->len; 11448 new_data = vzalloc(array_size(prog_len, 11449 sizeof(struct bpf_insn_aux_data))); 11450 if (!new_data) 11451 return -ENOMEM; 11452 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 11453 memcpy(new_data + off + cnt - 1, old_data + off, 11454 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 11455 for (i = off; i < off + cnt - 1; i++) { 11456 /* Expand insni[off]'s seen count to the patched range. */ 11457 new_data[i].seen = old_seen; 11458 new_data[i].zext_dst = insn_has_def32(env, insn + i); 11459 } 11460 env->insn_aux_data = new_data; 11461 vfree(old_data); 11462 return 0; 11463 } 11464 11465 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) 11466 { 11467 int i; 11468 11469 if (len == 1) 11470 return; 11471 /* NOTE: fake 'exit' subprog should be updated as well. */ 11472 for (i = 0; i <= env->subprog_cnt; i++) { 11473 if (env->subprog_info[i].start <= off) 11474 continue; 11475 env->subprog_info[i].start += len - 1; 11476 } 11477 } 11478 11479 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len) 11480 { 11481 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; 11482 int i, sz = prog->aux->size_poke_tab; 11483 struct bpf_jit_poke_descriptor *desc; 11484 11485 for (i = 0; i < sz; i++) { 11486 desc = &tab[i]; 11487 if (desc->insn_idx <= off) 11488 continue; 11489 desc->insn_idx += len - 1; 11490 } 11491 } 11492 11493 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 11494 const struct bpf_insn *patch, u32 len) 11495 { 11496 struct bpf_prog *new_prog; 11497 11498 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); 11499 if (IS_ERR(new_prog)) { 11500 if (PTR_ERR(new_prog) == -ERANGE) 11501 verbose(env, 11502 "insn %d cannot be patched due to 16-bit range\n", 11503 env->insn_aux_data[off].orig_idx); 11504 return NULL; 11505 } 11506 if (adjust_insn_aux_data(env, new_prog, off, len)) 11507 return NULL; 11508 adjust_subprog_starts(env, off, len); 11509 adjust_poke_descs(new_prog, off, len); 11510 return new_prog; 11511 } 11512 11513 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env, 11514 u32 off, u32 cnt) 11515 { 11516 int i, j; 11517 11518 /* find first prog starting at or after off (first to remove) */ 11519 for (i = 0; i < env->subprog_cnt; i++) 11520 if (env->subprog_info[i].start >= off) 11521 break; 11522 /* find first prog starting at or after off + cnt (first to stay) */ 11523 for (j = i; j < env->subprog_cnt; j++) 11524 if (env->subprog_info[j].start >= off + cnt) 11525 break; 11526 /* if j doesn't start exactly at off + cnt, we are just removing 11527 * the front of previous prog 11528 */ 11529 if (env->subprog_info[j].start != off + cnt) 11530 j--; 11531 11532 if (j > i) { 11533 struct bpf_prog_aux *aux = env->prog->aux; 11534 int move; 11535 11536 /* move fake 'exit' subprog as well */ 11537 move = env->subprog_cnt + 1 - j; 11538 11539 memmove(env->subprog_info + i, 11540 env->subprog_info + j, 11541 sizeof(*env->subprog_info) * move); 11542 env->subprog_cnt -= j - i; 11543 11544 /* remove func_info */ 11545 if (aux->func_info) { 11546 move = aux->func_info_cnt - j; 11547 11548 memmove(aux->func_info + i, 11549 aux->func_info + j, 11550 sizeof(*aux->func_info) * move); 11551 aux->func_info_cnt -= j - i; 11552 /* func_info->insn_off is set after all code rewrites, 11553 * in adjust_btf_func() - no need to adjust 11554 */ 11555 } 11556 } else { 11557 /* convert i from "first prog to remove" to "first to adjust" */ 11558 if (env->subprog_info[i].start == off) 11559 i++; 11560 } 11561 11562 /* update fake 'exit' subprog as well */ 11563 for (; i <= env->subprog_cnt; i++) 11564 env->subprog_info[i].start -= cnt; 11565 11566 return 0; 11567 } 11568 11569 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off, 11570 u32 cnt) 11571 { 11572 struct bpf_prog *prog = env->prog; 11573 u32 i, l_off, l_cnt, nr_linfo; 11574 struct bpf_line_info *linfo; 11575 11576 nr_linfo = prog->aux->nr_linfo; 11577 if (!nr_linfo) 11578 return 0; 11579 11580 linfo = prog->aux->linfo; 11581 11582 /* find first line info to remove, count lines to be removed */ 11583 for (i = 0; i < nr_linfo; i++) 11584 if (linfo[i].insn_off >= off) 11585 break; 11586 11587 l_off = i; 11588 l_cnt = 0; 11589 for (; i < nr_linfo; i++) 11590 if (linfo[i].insn_off < off + cnt) 11591 l_cnt++; 11592 else 11593 break; 11594 11595 /* First live insn doesn't match first live linfo, it needs to "inherit" 11596 * last removed linfo. prog is already modified, so prog->len == off 11597 * means no live instructions after (tail of the program was removed). 11598 */ 11599 if (prog->len != off && l_cnt && 11600 (i == nr_linfo || linfo[i].insn_off != off + cnt)) { 11601 l_cnt--; 11602 linfo[--i].insn_off = off + cnt; 11603 } 11604 11605 /* remove the line info which refer to the removed instructions */ 11606 if (l_cnt) { 11607 memmove(linfo + l_off, linfo + i, 11608 sizeof(*linfo) * (nr_linfo - i)); 11609 11610 prog->aux->nr_linfo -= l_cnt; 11611 nr_linfo = prog->aux->nr_linfo; 11612 } 11613 11614 /* pull all linfo[i].insn_off >= off + cnt in by cnt */ 11615 for (i = l_off; i < nr_linfo; i++) 11616 linfo[i].insn_off -= cnt; 11617 11618 /* fix up all subprogs (incl. 'exit') which start >= off */ 11619 for (i = 0; i <= env->subprog_cnt; i++) 11620 if (env->subprog_info[i].linfo_idx > l_off) { 11621 /* program may have started in the removed region but 11622 * may not be fully removed 11623 */ 11624 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) 11625 env->subprog_info[i].linfo_idx -= l_cnt; 11626 else 11627 env->subprog_info[i].linfo_idx = l_off; 11628 } 11629 11630 return 0; 11631 } 11632 11633 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) 11634 { 11635 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 11636 unsigned int orig_prog_len = env->prog->len; 11637 int err; 11638 11639 if (bpf_prog_is_dev_bound(env->prog->aux)) 11640 bpf_prog_offload_remove_insns(env, off, cnt); 11641 11642 err = bpf_remove_insns(env->prog, off, cnt); 11643 if (err) 11644 return err; 11645 11646 err = adjust_subprog_starts_after_remove(env, off, cnt); 11647 if (err) 11648 return err; 11649 11650 err = bpf_adj_linfo_after_remove(env, off, cnt); 11651 if (err) 11652 return err; 11653 11654 memmove(aux_data + off, aux_data + off + cnt, 11655 sizeof(*aux_data) * (orig_prog_len - off - cnt)); 11656 11657 return 0; 11658 } 11659 11660 /* The verifier does more data flow analysis than llvm and will not 11661 * explore branches that are dead at run time. Malicious programs can 11662 * have dead code too. Therefore replace all dead at-run-time code 11663 * with 'ja -1'. 11664 * 11665 * Just nops are not optimal, e.g. if they would sit at the end of the 11666 * program and through another bug we would manage to jump there, then 11667 * we'd execute beyond program memory otherwise. Returning exception 11668 * code also wouldn't work since we can have subprogs where the dead 11669 * code could be located. 11670 */ 11671 static void sanitize_dead_code(struct bpf_verifier_env *env) 11672 { 11673 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 11674 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); 11675 struct bpf_insn *insn = env->prog->insnsi; 11676 const int insn_cnt = env->prog->len; 11677 int i; 11678 11679 for (i = 0; i < insn_cnt; i++) { 11680 if (aux_data[i].seen) 11681 continue; 11682 memcpy(insn + i, &trap, sizeof(trap)); 11683 } 11684 } 11685 11686 static bool insn_is_cond_jump(u8 code) 11687 { 11688 u8 op; 11689 11690 if (BPF_CLASS(code) == BPF_JMP32) 11691 return true; 11692 11693 if (BPF_CLASS(code) != BPF_JMP) 11694 return false; 11695 11696 op = BPF_OP(code); 11697 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; 11698 } 11699 11700 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) 11701 { 11702 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 11703 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 11704 struct bpf_insn *insn = env->prog->insnsi; 11705 const int insn_cnt = env->prog->len; 11706 int i; 11707 11708 for (i = 0; i < insn_cnt; i++, insn++) { 11709 if (!insn_is_cond_jump(insn->code)) 11710 continue; 11711 11712 if (!aux_data[i + 1].seen) 11713 ja.off = insn->off; 11714 else if (!aux_data[i + 1 + insn->off].seen) 11715 ja.off = 0; 11716 else 11717 continue; 11718 11719 if (bpf_prog_is_dev_bound(env->prog->aux)) 11720 bpf_prog_offload_replace_insn(env, i, &ja); 11721 11722 memcpy(insn, &ja, sizeof(ja)); 11723 } 11724 } 11725 11726 static int opt_remove_dead_code(struct bpf_verifier_env *env) 11727 { 11728 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 11729 int insn_cnt = env->prog->len; 11730 int i, err; 11731 11732 for (i = 0; i < insn_cnt; i++) { 11733 int j; 11734 11735 j = 0; 11736 while (i + j < insn_cnt && !aux_data[i + j].seen) 11737 j++; 11738 if (!j) 11739 continue; 11740 11741 err = verifier_remove_insns(env, i, j); 11742 if (err) 11743 return err; 11744 insn_cnt = env->prog->len; 11745 } 11746 11747 return 0; 11748 } 11749 11750 static int opt_remove_nops(struct bpf_verifier_env *env) 11751 { 11752 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 11753 struct bpf_insn *insn = env->prog->insnsi; 11754 int insn_cnt = env->prog->len; 11755 int i, err; 11756 11757 for (i = 0; i < insn_cnt; i++) { 11758 if (memcmp(&insn[i], &ja, sizeof(ja))) 11759 continue; 11760 11761 err = verifier_remove_insns(env, i, 1); 11762 if (err) 11763 return err; 11764 insn_cnt--; 11765 i--; 11766 } 11767 11768 return 0; 11769 } 11770 11771 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, 11772 const union bpf_attr *attr) 11773 { 11774 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4]; 11775 struct bpf_insn_aux_data *aux = env->insn_aux_data; 11776 int i, patch_len, delta = 0, len = env->prog->len; 11777 struct bpf_insn *insns = env->prog->insnsi; 11778 struct bpf_prog *new_prog; 11779 bool rnd_hi32; 11780 11781 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; 11782 zext_patch[1] = BPF_ZEXT_REG(0); 11783 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0); 11784 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 11785 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX); 11786 for (i = 0; i < len; i++) { 11787 int adj_idx = i + delta; 11788 struct bpf_insn insn; 11789 int load_reg; 11790 11791 insn = insns[adj_idx]; 11792 load_reg = insn_def_regno(&insn); 11793 if (!aux[adj_idx].zext_dst) { 11794 u8 code, class; 11795 u32 imm_rnd; 11796 11797 if (!rnd_hi32) 11798 continue; 11799 11800 code = insn.code; 11801 class = BPF_CLASS(code); 11802 if (load_reg == -1) 11803 continue; 11804 11805 /* NOTE: arg "reg" (the fourth one) is only used for 11806 * BPF_STX + SRC_OP, so it is safe to pass NULL 11807 * here. 11808 */ 11809 if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) { 11810 if (class == BPF_LD && 11811 BPF_MODE(code) == BPF_IMM) 11812 i++; 11813 continue; 11814 } 11815 11816 /* ctx load could be transformed into wider load. */ 11817 if (class == BPF_LDX && 11818 aux[adj_idx].ptr_type == PTR_TO_CTX) 11819 continue; 11820 11821 imm_rnd = get_random_int(); 11822 rnd_hi32_patch[0] = insn; 11823 rnd_hi32_patch[1].imm = imm_rnd; 11824 rnd_hi32_patch[3].dst_reg = load_reg; 11825 patch = rnd_hi32_patch; 11826 patch_len = 4; 11827 goto apply_patch_buffer; 11828 } 11829 11830 /* Add in an zero-extend instruction if a) the JIT has requested 11831 * it or b) it's a CMPXCHG. 11832 * 11833 * The latter is because: BPF_CMPXCHG always loads a value into 11834 * R0, therefore always zero-extends. However some archs' 11835 * equivalent instruction only does this load when the 11836 * comparison is successful. This detail of CMPXCHG is 11837 * orthogonal to the general zero-extension behaviour of the 11838 * CPU, so it's treated independently of bpf_jit_needs_zext. 11839 */ 11840 if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn)) 11841 continue; 11842 11843 if (WARN_ON(load_reg == -1)) { 11844 verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n"); 11845 return -EFAULT; 11846 } 11847 11848 zext_patch[0] = insn; 11849 zext_patch[1].dst_reg = load_reg; 11850 zext_patch[1].src_reg = load_reg; 11851 patch = zext_patch; 11852 patch_len = 2; 11853 apply_patch_buffer: 11854 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len); 11855 if (!new_prog) 11856 return -ENOMEM; 11857 env->prog = new_prog; 11858 insns = new_prog->insnsi; 11859 aux = env->insn_aux_data; 11860 delta += patch_len - 1; 11861 } 11862 11863 return 0; 11864 } 11865 11866 /* convert load instructions that access fields of a context type into a 11867 * sequence of instructions that access fields of the underlying structure: 11868 * struct __sk_buff -> struct sk_buff 11869 * struct bpf_sock_ops -> struct sock 11870 */ 11871 static int convert_ctx_accesses(struct bpf_verifier_env *env) 11872 { 11873 const struct bpf_verifier_ops *ops = env->ops; 11874 int i, cnt, size, ctx_field_size, delta = 0; 11875 const int insn_cnt = env->prog->len; 11876 struct bpf_insn insn_buf[16], *insn; 11877 u32 target_size, size_default, off; 11878 struct bpf_prog *new_prog; 11879 enum bpf_access_type type; 11880 bool is_narrower_load; 11881 11882 if (ops->gen_prologue || env->seen_direct_write) { 11883 if (!ops->gen_prologue) { 11884 verbose(env, "bpf verifier is misconfigured\n"); 11885 return -EINVAL; 11886 } 11887 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 11888 env->prog); 11889 if (cnt >= ARRAY_SIZE(insn_buf)) { 11890 verbose(env, "bpf verifier is misconfigured\n"); 11891 return -EINVAL; 11892 } else if (cnt) { 11893 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 11894 if (!new_prog) 11895 return -ENOMEM; 11896 11897 env->prog = new_prog; 11898 delta += cnt - 1; 11899 } 11900 } 11901 11902 if (bpf_prog_is_dev_bound(env->prog->aux)) 11903 return 0; 11904 11905 insn = env->prog->insnsi + delta; 11906 11907 for (i = 0; i < insn_cnt; i++, insn++) { 11908 bpf_convert_ctx_access_t convert_ctx_access; 11909 11910 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || 11911 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || 11912 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 11913 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) 11914 type = BPF_READ; 11915 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || 11916 insn->code == (BPF_STX | BPF_MEM | BPF_H) || 11917 insn->code == (BPF_STX | BPF_MEM | BPF_W) || 11918 insn->code == (BPF_STX | BPF_MEM | BPF_DW)) 11919 type = BPF_WRITE; 11920 else 11921 continue; 11922 11923 if (type == BPF_WRITE && 11924 env->insn_aux_data[i + delta].sanitize_stack_off) { 11925 struct bpf_insn patch[] = { 11926 /* Sanitize suspicious stack slot with zero. 11927 * There are no memory dependencies for this store, 11928 * since it's only using frame pointer and immediate 11929 * constant of zero 11930 */ 11931 BPF_ST_MEM(BPF_DW, BPF_REG_FP, 11932 env->insn_aux_data[i + delta].sanitize_stack_off, 11933 0), 11934 /* the original STX instruction will immediately 11935 * overwrite the same stack slot with appropriate value 11936 */ 11937 *insn, 11938 }; 11939 11940 cnt = ARRAY_SIZE(patch); 11941 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); 11942 if (!new_prog) 11943 return -ENOMEM; 11944 11945 delta += cnt - 1; 11946 env->prog = new_prog; 11947 insn = new_prog->insnsi + i + delta; 11948 continue; 11949 } 11950 11951 switch (env->insn_aux_data[i + delta].ptr_type) { 11952 case PTR_TO_CTX: 11953 if (!ops->convert_ctx_access) 11954 continue; 11955 convert_ctx_access = ops->convert_ctx_access; 11956 break; 11957 case PTR_TO_SOCKET: 11958 case PTR_TO_SOCK_COMMON: 11959 convert_ctx_access = bpf_sock_convert_ctx_access; 11960 break; 11961 case PTR_TO_TCP_SOCK: 11962 convert_ctx_access = bpf_tcp_sock_convert_ctx_access; 11963 break; 11964 case PTR_TO_XDP_SOCK: 11965 convert_ctx_access = bpf_xdp_sock_convert_ctx_access; 11966 break; 11967 case PTR_TO_BTF_ID: 11968 if (type == BPF_READ) { 11969 insn->code = BPF_LDX | BPF_PROBE_MEM | 11970 BPF_SIZE((insn)->code); 11971 env->prog->aux->num_exentries++; 11972 } else if (resolve_prog_type(env->prog) != BPF_PROG_TYPE_STRUCT_OPS) { 11973 verbose(env, "Writes through BTF pointers are not allowed\n"); 11974 return -EINVAL; 11975 } 11976 continue; 11977 default: 11978 continue; 11979 } 11980 11981 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; 11982 size = BPF_LDST_BYTES(insn); 11983 11984 /* If the read access is a narrower load of the field, 11985 * convert to a 4/8-byte load, to minimum program type specific 11986 * convert_ctx_access changes. If conversion is successful, 11987 * we will apply proper mask to the result. 11988 */ 11989 is_narrower_load = size < ctx_field_size; 11990 size_default = bpf_ctx_off_adjust_machine(ctx_field_size); 11991 off = insn->off; 11992 if (is_narrower_load) { 11993 u8 size_code; 11994 11995 if (type == BPF_WRITE) { 11996 verbose(env, "bpf verifier narrow ctx access misconfigured\n"); 11997 return -EINVAL; 11998 } 11999 12000 size_code = BPF_H; 12001 if (ctx_field_size == 4) 12002 size_code = BPF_W; 12003 else if (ctx_field_size == 8) 12004 size_code = BPF_DW; 12005 12006 insn->off = off & ~(size_default - 1); 12007 insn->code = BPF_LDX | BPF_MEM | size_code; 12008 } 12009 12010 target_size = 0; 12011 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, 12012 &target_size); 12013 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || 12014 (ctx_field_size && !target_size)) { 12015 verbose(env, "bpf verifier is misconfigured\n"); 12016 return -EINVAL; 12017 } 12018 12019 if (is_narrower_load && size < target_size) { 12020 u8 shift = bpf_ctx_narrow_access_offset( 12021 off, size, size_default) * 8; 12022 if (ctx_field_size <= 4) { 12023 if (shift) 12024 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, 12025 insn->dst_reg, 12026 shift); 12027 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 12028 (1 << size * 8) - 1); 12029 } else { 12030 if (shift) 12031 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, 12032 insn->dst_reg, 12033 shift); 12034 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, 12035 (1ULL << size * 8) - 1); 12036 } 12037 } 12038 12039 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 12040 if (!new_prog) 12041 return -ENOMEM; 12042 12043 delta += cnt - 1; 12044 12045 /* keep walking new program and skip insns we just inserted */ 12046 env->prog = new_prog; 12047 insn = new_prog->insnsi + i + delta; 12048 } 12049 12050 return 0; 12051 } 12052 12053 static int jit_subprogs(struct bpf_verifier_env *env) 12054 { 12055 struct bpf_prog *prog = env->prog, **func, *tmp; 12056 int i, j, subprog_start, subprog_end = 0, len, subprog; 12057 struct bpf_map *map_ptr; 12058 struct bpf_insn *insn; 12059 void *old_bpf_func; 12060 int err, num_exentries; 12061 12062 if (env->subprog_cnt <= 1) 12063 return 0; 12064 12065 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 12066 if (bpf_pseudo_func(insn)) { 12067 env->insn_aux_data[i].call_imm = insn->imm; 12068 /* subprog is encoded in insn[1].imm */ 12069 continue; 12070 } 12071 12072 if (!bpf_pseudo_call(insn)) 12073 continue; 12074 /* Upon error here we cannot fall back to interpreter but 12075 * need a hard reject of the program. Thus -EFAULT is 12076 * propagated in any case. 12077 */ 12078 subprog = find_subprog(env, i + insn->imm + 1); 12079 if (subprog < 0) { 12080 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 12081 i + insn->imm + 1); 12082 return -EFAULT; 12083 } 12084 /* temporarily remember subprog id inside insn instead of 12085 * aux_data, since next loop will split up all insns into funcs 12086 */ 12087 insn->off = subprog; 12088 /* remember original imm in case JIT fails and fallback 12089 * to interpreter will be needed 12090 */ 12091 env->insn_aux_data[i].call_imm = insn->imm; 12092 /* point imm to __bpf_call_base+1 from JITs point of view */ 12093 insn->imm = 1; 12094 } 12095 12096 err = bpf_prog_alloc_jited_linfo(prog); 12097 if (err) 12098 goto out_undo_insn; 12099 12100 err = -ENOMEM; 12101 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); 12102 if (!func) 12103 goto out_undo_insn; 12104 12105 for (i = 0; i < env->subprog_cnt; i++) { 12106 subprog_start = subprog_end; 12107 subprog_end = env->subprog_info[i + 1].start; 12108 12109 len = subprog_end - subprog_start; 12110 /* BPF_PROG_RUN doesn't call subprogs directly, 12111 * hence main prog stats include the runtime of subprogs. 12112 * subprogs don't have IDs and not reachable via prog_get_next_id 12113 * func[i]->stats will never be accessed and stays NULL 12114 */ 12115 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER); 12116 if (!func[i]) 12117 goto out_free; 12118 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], 12119 len * sizeof(struct bpf_insn)); 12120 func[i]->type = prog->type; 12121 func[i]->len = len; 12122 if (bpf_prog_calc_tag(func[i])) 12123 goto out_free; 12124 func[i]->is_func = 1; 12125 func[i]->aux->func_idx = i; 12126 /* Below members will be freed only at prog->aux */ 12127 func[i]->aux->btf = prog->aux->btf; 12128 func[i]->aux->func_info = prog->aux->func_info; 12129 func[i]->aux->poke_tab = prog->aux->poke_tab; 12130 func[i]->aux->size_poke_tab = prog->aux->size_poke_tab; 12131 12132 for (j = 0; j < prog->aux->size_poke_tab; j++) { 12133 struct bpf_jit_poke_descriptor *poke; 12134 12135 poke = &prog->aux->poke_tab[j]; 12136 if (poke->insn_idx < subprog_end && 12137 poke->insn_idx >= subprog_start) 12138 poke->aux = func[i]->aux; 12139 } 12140 12141 /* Use bpf_prog_F_tag to indicate functions in stack traces. 12142 * Long term would need debug info to populate names 12143 */ 12144 func[i]->aux->name[0] = 'F'; 12145 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; 12146 func[i]->jit_requested = 1; 12147 func[i]->aux->kfunc_tab = prog->aux->kfunc_tab; 12148 func[i]->aux->linfo = prog->aux->linfo; 12149 func[i]->aux->nr_linfo = prog->aux->nr_linfo; 12150 func[i]->aux->jited_linfo = prog->aux->jited_linfo; 12151 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; 12152 num_exentries = 0; 12153 insn = func[i]->insnsi; 12154 for (j = 0; j < func[i]->len; j++, insn++) { 12155 if (BPF_CLASS(insn->code) == BPF_LDX && 12156 BPF_MODE(insn->code) == BPF_PROBE_MEM) 12157 num_exentries++; 12158 } 12159 func[i]->aux->num_exentries = num_exentries; 12160 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable; 12161 func[i] = bpf_int_jit_compile(func[i]); 12162 if (!func[i]->jited) { 12163 err = -ENOTSUPP; 12164 goto out_free; 12165 } 12166 cond_resched(); 12167 } 12168 12169 /* at this point all bpf functions were successfully JITed 12170 * now populate all bpf_calls with correct addresses and 12171 * run last pass of JIT 12172 */ 12173 for (i = 0; i < env->subprog_cnt; i++) { 12174 insn = func[i]->insnsi; 12175 for (j = 0; j < func[i]->len; j++, insn++) { 12176 if (bpf_pseudo_func(insn)) { 12177 subprog = insn[1].imm; 12178 insn[0].imm = (u32)(long)func[subprog]->bpf_func; 12179 insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32; 12180 continue; 12181 } 12182 if (!bpf_pseudo_call(insn)) 12183 continue; 12184 subprog = insn->off; 12185 insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) - 12186 __bpf_call_base; 12187 } 12188 12189 /* we use the aux data to keep a list of the start addresses 12190 * of the JITed images for each function in the program 12191 * 12192 * for some architectures, such as powerpc64, the imm field 12193 * might not be large enough to hold the offset of the start 12194 * address of the callee's JITed image from __bpf_call_base 12195 * 12196 * in such cases, we can lookup the start address of a callee 12197 * by using its subprog id, available from the off field of 12198 * the call instruction, as an index for this list 12199 */ 12200 func[i]->aux->func = func; 12201 func[i]->aux->func_cnt = env->subprog_cnt; 12202 } 12203 for (i = 0; i < env->subprog_cnt; i++) { 12204 old_bpf_func = func[i]->bpf_func; 12205 tmp = bpf_int_jit_compile(func[i]); 12206 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { 12207 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); 12208 err = -ENOTSUPP; 12209 goto out_free; 12210 } 12211 cond_resched(); 12212 } 12213 12214 /* finally lock prog and jit images for all functions and 12215 * populate kallsysm 12216 */ 12217 for (i = 0; i < env->subprog_cnt; i++) { 12218 bpf_prog_lock_ro(func[i]); 12219 bpf_prog_kallsyms_add(func[i]); 12220 } 12221 12222 /* Last step: make now unused interpreter insns from main 12223 * prog consistent for later dump requests, so they can 12224 * later look the same as if they were interpreted only. 12225 */ 12226 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 12227 if (bpf_pseudo_func(insn)) { 12228 insn[0].imm = env->insn_aux_data[i].call_imm; 12229 insn[1].imm = find_subprog(env, i + insn[0].imm + 1); 12230 continue; 12231 } 12232 if (!bpf_pseudo_call(insn)) 12233 continue; 12234 insn->off = env->insn_aux_data[i].call_imm; 12235 subprog = find_subprog(env, i + insn->off + 1); 12236 insn->imm = subprog; 12237 } 12238 12239 prog->jited = 1; 12240 prog->bpf_func = func[0]->bpf_func; 12241 prog->aux->func = func; 12242 prog->aux->func_cnt = env->subprog_cnt; 12243 bpf_prog_jit_attempt_done(prog); 12244 return 0; 12245 out_free: 12246 /* We failed JIT'ing, so at this point we need to unregister poke 12247 * descriptors from subprogs, so that kernel is not attempting to 12248 * patch it anymore as we're freeing the subprog JIT memory. 12249 */ 12250 for (i = 0; i < prog->aux->size_poke_tab; i++) { 12251 map_ptr = prog->aux->poke_tab[i].tail_call.map; 12252 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux); 12253 } 12254 /* At this point we're guaranteed that poke descriptors are not 12255 * live anymore. We can just unlink its descriptor table as it's 12256 * released with the main prog. 12257 */ 12258 for (i = 0; i < env->subprog_cnt; i++) { 12259 if (!func[i]) 12260 continue; 12261 func[i]->aux->poke_tab = NULL; 12262 bpf_jit_free(func[i]); 12263 } 12264 kfree(func); 12265 out_undo_insn: 12266 /* cleanup main prog to be interpreted */ 12267 prog->jit_requested = 0; 12268 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 12269 if (!bpf_pseudo_call(insn)) 12270 continue; 12271 insn->off = 0; 12272 insn->imm = env->insn_aux_data[i].call_imm; 12273 } 12274 bpf_prog_jit_attempt_done(prog); 12275 return err; 12276 } 12277 12278 static int fixup_call_args(struct bpf_verifier_env *env) 12279 { 12280 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 12281 struct bpf_prog *prog = env->prog; 12282 struct bpf_insn *insn = prog->insnsi; 12283 bool has_kfunc_call = bpf_prog_has_kfunc_call(prog); 12284 int i, depth; 12285 #endif 12286 int err = 0; 12287 12288 if (env->prog->jit_requested && 12289 !bpf_prog_is_dev_bound(env->prog->aux)) { 12290 err = jit_subprogs(env); 12291 if (err == 0) 12292 return 0; 12293 if (err == -EFAULT) 12294 return err; 12295 } 12296 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 12297 if (has_kfunc_call) { 12298 verbose(env, "calling kernel functions are not allowed in non-JITed programs\n"); 12299 return -EINVAL; 12300 } 12301 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) { 12302 /* When JIT fails the progs with bpf2bpf calls and tail_calls 12303 * have to be rejected, since interpreter doesn't support them yet. 12304 */ 12305 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); 12306 return -EINVAL; 12307 } 12308 for (i = 0; i < prog->len; i++, insn++) { 12309 if (bpf_pseudo_func(insn)) { 12310 /* When JIT fails the progs with callback calls 12311 * have to be rejected, since interpreter doesn't support them yet. 12312 */ 12313 verbose(env, "callbacks are not allowed in non-JITed programs\n"); 12314 return -EINVAL; 12315 } 12316 12317 if (!bpf_pseudo_call(insn)) 12318 continue; 12319 depth = get_callee_stack_depth(env, insn, i); 12320 if (depth < 0) 12321 return depth; 12322 bpf_patch_call_args(insn, depth); 12323 } 12324 err = 0; 12325 #endif 12326 return err; 12327 } 12328 12329 static int fixup_kfunc_call(struct bpf_verifier_env *env, 12330 struct bpf_insn *insn) 12331 { 12332 const struct bpf_kfunc_desc *desc; 12333 12334 /* insn->imm has the btf func_id. Replace it with 12335 * an address (relative to __bpf_base_call). 12336 */ 12337 desc = find_kfunc_desc(env->prog, insn->imm); 12338 if (!desc) { 12339 verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n", 12340 insn->imm); 12341 return -EFAULT; 12342 } 12343 12344 insn->imm = desc->imm; 12345 12346 return 0; 12347 } 12348 12349 /* Do various post-verification rewrites in a single program pass. 12350 * These rewrites simplify JIT and interpreter implementations. 12351 */ 12352 static int do_misc_fixups(struct bpf_verifier_env *env) 12353 { 12354 struct bpf_prog *prog = env->prog; 12355 bool expect_blinding = bpf_jit_blinding_enabled(prog); 12356 struct bpf_insn *insn = prog->insnsi; 12357 const struct bpf_func_proto *fn; 12358 const int insn_cnt = prog->len; 12359 const struct bpf_map_ops *ops; 12360 struct bpf_insn_aux_data *aux; 12361 struct bpf_insn insn_buf[16]; 12362 struct bpf_prog *new_prog; 12363 struct bpf_map *map_ptr; 12364 int i, ret, cnt, delta = 0; 12365 12366 for (i = 0; i < insn_cnt; i++, insn++) { 12367 /* Make divide-by-zero exceptions impossible. */ 12368 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || 12369 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 12370 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || 12371 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 12372 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 12373 bool isdiv = BPF_OP(insn->code) == BPF_DIV; 12374 struct bpf_insn *patchlet; 12375 struct bpf_insn chk_and_div[] = { 12376 /* [R,W]x div 0 -> 0 */ 12377 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | 12378 BPF_JNE | BPF_K, insn->src_reg, 12379 0, 2, 0), 12380 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), 12381 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 12382 *insn, 12383 }; 12384 struct bpf_insn chk_and_mod[] = { 12385 /* [R,W]x mod 0 -> [R,W]x */ 12386 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | 12387 BPF_JEQ | BPF_K, insn->src_reg, 12388 0, 1 + (is64 ? 0 : 1), 0), 12389 *insn, 12390 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 12391 BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), 12392 }; 12393 12394 patchlet = isdiv ? chk_and_div : chk_and_mod; 12395 cnt = isdiv ? ARRAY_SIZE(chk_and_div) : 12396 ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); 12397 12398 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); 12399 if (!new_prog) 12400 return -ENOMEM; 12401 12402 delta += cnt - 1; 12403 env->prog = prog = new_prog; 12404 insn = new_prog->insnsi + i + delta; 12405 continue; 12406 } 12407 12408 /* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */ 12409 if (BPF_CLASS(insn->code) == BPF_LD && 12410 (BPF_MODE(insn->code) == BPF_ABS || 12411 BPF_MODE(insn->code) == BPF_IND)) { 12412 cnt = env->ops->gen_ld_abs(insn, insn_buf); 12413 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 12414 verbose(env, "bpf verifier is misconfigured\n"); 12415 return -EINVAL; 12416 } 12417 12418 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 12419 if (!new_prog) 12420 return -ENOMEM; 12421 12422 delta += cnt - 1; 12423 env->prog = prog = new_prog; 12424 insn = new_prog->insnsi + i + delta; 12425 continue; 12426 } 12427 12428 /* Rewrite pointer arithmetic to mitigate speculation attacks. */ 12429 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || 12430 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { 12431 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; 12432 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; 12433 struct bpf_insn *patch = &insn_buf[0]; 12434 bool issrc, isneg, isimm; 12435 u32 off_reg; 12436 12437 aux = &env->insn_aux_data[i + delta]; 12438 if (!aux->alu_state || 12439 aux->alu_state == BPF_ALU_NON_POINTER) 12440 continue; 12441 12442 isneg = aux->alu_state & BPF_ALU_NEG_VALUE; 12443 issrc = (aux->alu_state & BPF_ALU_SANITIZE) == 12444 BPF_ALU_SANITIZE_SRC; 12445 isimm = aux->alu_state & BPF_ALU_IMMEDIATE; 12446 12447 off_reg = issrc ? insn->src_reg : insn->dst_reg; 12448 if (isimm) { 12449 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); 12450 } else { 12451 if (isneg) 12452 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 12453 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); 12454 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); 12455 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); 12456 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); 12457 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); 12458 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); 12459 } 12460 if (!issrc) 12461 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg); 12462 insn->src_reg = BPF_REG_AX; 12463 if (isneg) 12464 insn->code = insn->code == code_add ? 12465 code_sub : code_add; 12466 *patch++ = *insn; 12467 if (issrc && isneg && !isimm) 12468 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 12469 cnt = patch - insn_buf; 12470 12471 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 12472 if (!new_prog) 12473 return -ENOMEM; 12474 12475 delta += cnt - 1; 12476 env->prog = prog = new_prog; 12477 insn = new_prog->insnsi + i + delta; 12478 continue; 12479 } 12480 12481 if (insn->code != (BPF_JMP | BPF_CALL)) 12482 continue; 12483 if (insn->src_reg == BPF_PSEUDO_CALL) 12484 continue; 12485 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { 12486 ret = fixup_kfunc_call(env, insn); 12487 if (ret) 12488 return ret; 12489 continue; 12490 } 12491 12492 if (insn->imm == BPF_FUNC_get_route_realm) 12493 prog->dst_needed = 1; 12494 if (insn->imm == BPF_FUNC_get_prandom_u32) 12495 bpf_user_rnd_init_once(); 12496 if (insn->imm == BPF_FUNC_override_return) 12497 prog->kprobe_override = 1; 12498 if (insn->imm == BPF_FUNC_tail_call) { 12499 /* If we tail call into other programs, we 12500 * cannot make any assumptions since they can 12501 * be replaced dynamically during runtime in 12502 * the program array. 12503 */ 12504 prog->cb_access = 1; 12505 if (!allow_tail_call_in_subprogs(env)) 12506 prog->aux->stack_depth = MAX_BPF_STACK; 12507 prog->aux->max_pkt_offset = MAX_PACKET_OFF; 12508 12509 /* mark bpf_tail_call as different opcode to avoid 12510 * conditional branch in the interpreter for every normal 12511 * call and to prevent accidental JITing by JIT compiler 12512 * that doesn't support bpf_tail_call yet 12513 */ 12514 insn->imm = 0; 12515 insn->code = BPF_JMP | BPF_TAIL_CALL; 12516 12517 aux = &env->insn_aux_data[i + delta]; 12518 if (env->bpf_capable && !expect_blinding && 12519 prog->jit_requested && 12520 !bpf_map_key_poisoned(aux) && 12521 !bpf_map_ptr_poisoned(aux) && 12522 !bpf_map_ptr_unpriv(aux)) { 12523 struct bpf_jit_poke_descriptor desc = { 12524 .reason = BPF_POKE_REASON_TAIL_CALL, 12525 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state), 12526 .tail_call.key = bpf_map_key_immediate(aux), 12527 .insn_idx = i + delta, 12528 }; 12529 12530 ret = bpf_jit_add_poke_descriptor(prog, &desc); 12531 if (ret < 0) { 12532 verbose(env, "adding tail call poke descriptor failed\n"); 12533 return ret; 12534 } 12535 12536 insn->imm = ret + 1; 12537 continue; 12538 } 12539 12540 if (!bpf_map_ptr_unpriv(aux)) 12541 continue; 12542 12543 /* instead of changing every JIT dealing with tail_call 12544 * emit two extra insns: 12545 * if (index >= max_entries) goto out; 12546 * index &= array->index_mask; 12547 * to avoid out-of-bounds cpu speculation 12548 */ 12549 if (bpf_map_ptr_poisoned(aux)) { 12550 verbose(env, "tail_call abusing map_ptr\n"); 12551 return -EINVAL; 12552 } 12553 12554 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 12555 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, 12556 map_ptr->max_entries, 2); 12557 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, 12558 container_of(map_ptr, 12559 struct bpf_array, 12560 map)->index_mask); 12561 insn_buf[2] = *insn; 12562 cnt = 3; 12563 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 12564 if (!new_prog) 12565 return -ENOMEM; 12566 12567 delta += cnt - 1; 12568 env->prog = prog = new_prog; 12569 insn = new_prog->insnsi + i + delta; 12570 continue; 12571 } 12572 12573 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup 12574 * and other inlining handlers are currently limited to 64 bit 12575 * only. 12576 */ 12577 if (prog->jit_requested && BITS_PER_LONG == 64 && 12578 (insn->imm == BPF_FUNC_map_lookup_elem || 12579 insn->imm == BPF_FUNC_map_update_elem || 12580 insn->imm == BPF_FUNC_map_delete_elem || 12581 insn->imm == BPF_FUNC_map_push_elem || 12582 insn->imm == BPF_FUNC_map_pop_elem || 12583 insn->imm == BPF_FUNC_map_peek_elem || 12584 insn->imm == BPF_FUNC_redirect_map)) { 12585 aux = &env->insn_aux_data[i + delta]; 12586 if (bpf_map_ptr_poisoned(aux)) 12587 goto patch_call_imm; 12588 12589 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 12590 ops = map_ptr->ops; 12591 if (insn->imm == BPF_FUNC_map_lookup_elem && 12592 ops->map_gen_lookup) { 12593 cnt = ops->map_gen_lookup(map_ptr, insn_buf); 12594 if (cnt == -EOPNOTSUPP) 12595 goto patch_map_ops_generic; 12596 if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) { 12597 verbose(env, "bpf verifier is misconfigured\n"); 12598 return -EINVAL; 12599 } 12600 12601 new_prog = bpf_patch_insn_data(env, i + delta, 12602 insn_buf, cnt); 12603 if (!new_prog) 12604 return -ENOMEM; 12605 12606 delta += cnt - 1; 12607 env->prog = prog = new_prog; 12608 insn = new_prog->insnsi + i + delta; 12609 continue; 12610 } 12611 12612 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, 12613 (void *(*)(struct bpf_map *map, void *key))NULL)); 12614 BUILD_BUG_ON(!__same_type(ops->map_delete_elem, 12615 (int (*)(struct bpf_map *map, void *key))NULL)); 12616 BUILD_BUG_ON(!__same_type(ops->map_update_elem, 12617 (int (*)(struct bpf_map *map, void *key, void *value, 12618 u64 flags))NULL)); 12619 BUILD_BUG_ON(!__same_type(ops->map_push_elem, 12620 (int (*)(struct bpf_map *map, void *value, 12621 u64 flags))NULL)); 12622 BUILD_BUG_ON(!__same_type(ops->map_pop_elem, 12623 (int (*)(struct bpf_map *map, void *value))NULL)); 12624 BUILD_BUG_ON(!__same_type(ops->map_peek_elem, 12625 (int (*)(struct bpf_map *map, void *value))NULL)); 12626 BUILD_BUG_ON(!__same_type(ops->map_redirect, 12627 (int (*)(struct bpf_map *map, u32 ifindex, u64 flags))NULL)); 12628 12629 patch_map_ops_generic: 12630 switch (insn->imm) { 12631 case BPF_FUNC_map_lookup_elem: 12632 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - 12633 __bpf_call_base; 12634 continue; 12635 case BPF_FUNC_map_update_elem: 12636 insn->imm = BPF_CAST_CALL(ops->map_update_elem) - 12637 __bpf_call_base; 12638 continue; 12639 case BPF_FUNC_map_delete_elem: 12640 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - 12641 __bpf_call_base; 12642 continue; 12643 case BPF_FUNC_map_push_elem: 12644 insn->imm = BPF_CAST_CALL(ops->map_push_elem) - 12645 __bpf_call_base; 12646 continue; 12647 case BPF_FUNC_map_pop_elem: 12648 insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - 12649 __bpf_call_base; 12650 continue; 12651 case BPF_FUNC_map_peek_elem: 12652 insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - 12653 __bpf_call_base; 12654 continue; 12655 case BPF_FUNC_redirect_map: 12656 insn->imm = BPF_CAST_CALL(ops->map_redirect) - 12657 __bpf_call_base; 12658 continue; 12659 } 12660 12661 goto patch_call_imm; 12662 } 12663 12664 /* Implement bpf_jiffies64 inline. */ 12665 if (prog->jit_requested && BITS_PER_LONG == 64 && 12666 insn->imm == BPF_FUNC_jiffies64) { 12667 struct bpf_insn ld_jiffies_addr[2] = { 12668 BPF_LD_IMM64(BPF_REG_0, 12669 (unsigned long)&jiffies), 12670 }; 12671 12672 insn_buf[0] = ld_jiffies_addr[0]; 12673 insn_buf[1] = ld_jiffies_addr[1]; 12674 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, 12675 BPF_REG_0, 0); 12676 cnt = 3; 12677 12678 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 12679 cnt); 12680 if (!new_prog) 12681 return -ENOMEM; 12682 12683 delta += cnt - 1; 12684 env->prog = prog = new_prog; 12685 insn = new_prog->insnsi + i + delta; 12686 continue; 12687 } 12688 12689 patch_call_imm: 12690 fn = env->ops->get_func_proto(insn->imm, env->prog); 12691 /* all functions that have prototype and verifier allowed 12692 * programs to call them, must be real in-kernel functions 12693 */ 12694 if (!fn->func) { 12695 verbose(env, 12696 "kernel subsystem misconfigured func %s#%d\n", 12697 func_id_name(insn->imm), insn->imm); 12698 return -EFAULT; 12699 } 12700 insn->imm = fn->func - __bpf_call_base; 12701 } 12702 12703 /* Since poke tab is now finalized, publish aux to tracker. */ 12704 for (i = 0; i < prog->aux->size_poke_tab; i++) { 12705 map_ptr = prog->aux->poke_tab[i].tail_call.map; 12706 if (!map_ptr->ops->map_poke_track || 12707 !map_ptr->ops->map_poke_untrack || 12708 !map_ptr->ops->map_poke_run) { 12709 verbose(env, "bpf verifier is misconfigured\n"); 12710 return -EINVAL; 12711 } 12712 12713 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); 12714 if (ret < 0) { 12715 verbose(env, "tracking tail call prog failed\n"); 12716 return ret; 12717 } 12718 } 12719 12720 sort_kfunc_descs_by_imm(env->prog); 12721 12722 return 0; 12723 } 12724 12725 static void free_states(struct bpf_verifier_env *env) 12726 { 12727 struct bpf_verifier_state_list *sl, *sln; 12728 int i; 12729 12730 sl = env->free_list; 12731 while (sl) { 12732 sln = sl->next; 12733 free_verifier_state(&sl->state, false); 12734 kfree(sl); 12735 sl = sln; 12736 } 12737 env->free_list = NULL; 12738 12739 if (!env->explored_states) 12740 return; 12741 12742 for (i = 0; i < state_htab_size(env); i++) { 12743 sl = env->explored_states[i]; 12744 12745 while (sl) { 12746 sln = sl->next; 12747 free_verifier_state(&sl->state, false); 12748 kfree(sl); 12749 sl = sln; 12750 } 12751 env->explored_states[i] = NULL; 12752 } 12753 } 12754 12755 /* The verifier is using insn_aux_data[] to store temporary data during 12756 * verification and to store information for passes that run after the 12757 * verification like dead code sanitization. do_check_common() for subprogram N 12758 * may analyze many other subprograms. sanitize_insn_aux_data() clears all 12759 * temporary data after do_check_common() finds that subprogram N cannot be 12760 * verified independently. pass_cnt counts the number of times 12761 * do_check_common() was run and insn->aux->seen tells the pass number 12762 * insn_aux_data was touched. These variables are compared to clear temporary 12763 * data from failed pass. For testing and experiments do_check_common() can be 12764 * run multiple times even when prior attempt to verify is unsuccessful. 12765 * 12766 * Note that special handling is needed on !env->bypass_spec_v1 if this is 12767 * ever called outside of error path with subsequent program rejection. 12768 */ 12769 static void sanitize_insn_aux_data(struct bpf_verifier_env *env) 12770 { 12771 struct bpf_insn *insn = env->prog->insnsi; 12772 struct bpf_insn_aux_data *aux; 12773 int i, class; 12774 12775 for (i = 0; i < env->prog->len; i++) { 12776 class = BPF_CLASS(insn[i].code); 12777 if (class != BPF_LDX && class != BPF_STX) 12778 continue; 12779 aux = &env->insn_aux_data[i]; 12780 if (aux->seen != env->pass_cnt) 12781 continue; 12782 memset(aux, 0, offsetof(typeof(*aux), orig_idx)); 12783 } 12784 } 12785 12786 static int do_check_common(struct bpf_verifier_env *env, int subprog) 12787 { 12788 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); 12789 struct bpf_verifier_state *state; 12790 struct bpf_reg_state *regs; 12791 int ret, i; 12792 12793 env->prev_linfo = NULL; 12794 env->pass_cnt++; 12795 12796 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); 12797 if (!state) 12798 return -ENOMEM; 12799 state->curframe = 0; 12800 state->speculative = false; 12801 state->branches = 1; 12802 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); 12803 if (!state->frame[0]) { 12804 kfree(state); 12805 return -ENOMEM; 12806 } 12807 env->cur_state = state; 12808 init_func_state(env, state->frame[0], 12809 BPF_MAIN_FUNC /* callsite */, 12810 0 /* frameno */, 12811 subprog); 12812 12813 regs = state->frame[state->curframe]->regs; 12814 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { 12815 ret = btf_prepare_func_args(env, subprog, regs); 12816 if (ret) 12817 goto out; 12818 for (i = BPF_REG_1; i <= BPF_REG_5; i++) { 12819 if (regs[i].type == PTR_TO_CTX) 12820 mark_reg_known_zero(env, regs, i); 12821 else if (regs[i].type == SCALAR_VALUE) 12822 mark_reg_unknown(env, regs, i); 12823 else if (regs[i].type == PTR_TO_MEM_OR_NULL) { 12824 const u32 mem_size = regs[i].mem_size; 12825 12826 mark_reg_known_zero(env, regs, i); 12827 regs[i].mem_size = mem_size; 12828 regs[i].id = ++env->id_gen; 12829 } 12830 } 12831 } else { 12832 /* 1st arg to a function */ 12833 regs[BPF_REG_1].type = PTR_TO_CTX; 12834 mark_reg_known_zero(env, regs, BPF_REG_1); 12835 ret = btf_check_subprog_arg_match(env, subprog, regs); 12836 if (ret == -EFAULT) 12837 /* unlikely verifier bug. abort. 12838 * ret == 0 and ret < 0 are sadly acceptable for 12839 * main() function due to backward compatibility. 12840 * Like socket filter program may be written as: 12841 * int bpf_prog(struct pt_regs *ctx) 12842 * and never dereference that ctx in the program. 12843 * 'struct pt_regs' is a type mismatch for socket 12844 * filter that should be using 'struct __sk_buff'. 12845 */ 12846 goto out; 12847 } 12848 12849 ret = do_check(env); 12850 out: 12851 /* check for NULL is necessary, since cur_state can be freed inside 12852 * do_check() under memory pressure. 12853 */ 12854 if (env->cur_state) { 12855 free_verifier_state(env->cur_state, true); 12856 env->cur_state = NULL; 12857 } 12858 while (!pop_stack(env, NULL, NULL, false)); 12859 if (!ret && pop_log) 12860 bpf_vlog_reset(&env->log, 0); 12861 free_states(env); 12862 if (ret) 12863 /* clean aux data in case subprog was rejected */ 12864 sanitize_insn_aux_data(env); 12865 return ret; 12866 } 12867 12868 /* Verify all global functions in a BPF program one by one based on their BTF. 12869 * All global functions must pass verification. Otherwise the whole program is rejected. 12870 * Consider: 12871 * int bar(int); 12872 * int foo(int f) 12873 * { 12874 * return bar(f); 12875 * } 12876 * int bar(int b) 12877 * { 12878 * ... 12879 * } 12880 * foo() will be verified first for R1=any_scalar_value. During verification it 12881 * will be assumed that bar() already verified successfully and call to bar() 12882 * from foo() will be checked for type match only. Later bar() will be verified 12883 * independently to check that it's safe for R1=any_scalar_value. 12884 */ 12885 static int do_check_subprogs(struct bpf_verifier_env *env) 12886 { 12887 struct bpf_prog_aux *aux = env->prog->aux; 12888 int i, ret; 12889 12890 if (!aux->func_info) 12891 return 0; 12892 12893 for (i = 1; i < env->subprog_cnt; i++) { 12894 if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL) 12895 continue; 12896 env->insn_idx = env->subprog_info[i].start; 12897 WARN_ON_ONCE(env->insn_idx == 0); 12898 ret = do_check_common(env, i); 12899 if (ret) { 12900 return ret; 12901 } else if (env->log.level & BPF_LOG_LEVEL) { 12902 verbose(env, 12903 "Func#%d is safe for any args that match its prototype\n", 12904 i); 12905 } 12906 } 12907 return 0; 12908 } 12909 12910 static int do_check_main(struct bpf_verifier_env *env) 12911 { 12912 int ret; 12913 12914 env->insn_idx = 0; 12915 ret = do_check_common(env, 0); 12916 if (!ret) 12917 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; 12918 return ret; 12919 } 12920 12921 12922 static void print_verification_stats(struct bpf_verifier_env *env) 12923 { 12924 int i; 12925 12926 if (env->log.level & BPF_LOG_STATS) { 12927 verbose(env, "verification time %lld usec\n", 12928 div_u64(env->verification_time, 1000)); 12929 verbose(env, "stack depth "); 12930 for (i = 0; i < env->subprog_cnt; i++) { 12931 u32 depth = env->subprog_info[i].stack_depth; 12932 12933 verbose(env, "%d", depth); 12934 if (i + 1 < env->subprog_cnt) 12935 verbose(env, "+"); 12936 } 12937 verbose(env, "\n"); 12938 } 12939 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d " 12940 "total_states %d peak_states %d mark_read %d\n", 12941 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, 12942 env->max_states_per_insn, env->total_states, 12943 env->peak_states, env->longest_mark_read_walk); 12944 } 12945 12946 static int check_struct_ops_btf_id(struct bpf_verifier_env *env) 12947 { 12948 const struct btf_type *t, *func_proto; 12949 const struct bpf_struct_ops *st_ops; 12950 const struct btf_member *member; 12951 struct bpf_prog *prog = env->prog; 12952 u32 btf_id, member_idx; 12953 const char *mname; 12954 12955 if (!prog->gpl_compatible) { 12956 verbose(env, "struct ops programs must have a GPL compatible license\n"); 12957 return -EINVAL; 12958 } 12959 12960 btf_id = prog->aux->attach_btf_id; 12961 st_ops = bpf_struct_ops_find(btf_id); 12962 if (!st_ops) { 12963 verbose(env, "attach_btf_id %u is not a supported struct\n", 12964 btf_id); 12965 return -ENOTSUPP; 12966 } 12967 12968 t = st_ops->type; 12969 member_idx = prog->expected_attach_type; 12970 if (member_idx >= btf_type_vlen(t)) { 12971 verbose(env, "attach to invalid member idx %u of struct %s\n", 12972 member_idx, st_ops->name); 12973 return -EINVAL; 12974 } 12975 12976 member = &btf_type_member(t)[member_idx]; 12977 mname = btf_name_by_offset(btf_vmlinux, member->name_off); 12978 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type, 12979 NULL); 12980 if (!func_proto) { 12981 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n", 12982 mname, member_idx, st_ops->name); 12983 return -EINVAL; 12984 } 12985 12986 if (st_ops->check_member) { 12987 int err = st_ops->check_member(t, member); 12988 12989 if (err) { 12990 verbose(env, "attach to unsupported member %s of struct %s\n", 12991 mname, st_ops->name); 12992 return err; 12993 } 12994 } 12995 12996 prog->aux->attach_func_proto = func_proto; 12997 prog->aux->attach_func_name = mname; 12998 env->ops = st_ops->verifier_ops; 12999 13000 return 0; 13001 } 13002 #define SECURITY_PREFIX "security_" 13003 13004 static int check_attach_modify_return(unsigned long addr, const char *func_name) 13005 { 13006 if (within_error_injection_list(addr) || 13007 !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1)) 13008 return 0; 13009 13010 return -EINVAL; 13011 } 13012 13013 /* list of non-sleepable functions that are otherwise on 13014 * ALLOW_ERROR_INJECTION list 13015 */ 13016 BTF_SET_START(btf_non_sleepable_error_inject) 13017 /* Three functions below can be called from sleepable and non-sleepable context. 13018 * Assume non-sleepable from bpf safety point of view. 13019 */ 13020 BTF_ID(func, __add_to_page_cache_locked) 13021 BTF_ID(func, should_fail_alloc_page) 13022 BTF_ID(func, should_failslab) 13023 BTF_SET_END(btf_non_sleepable_error_inject) 13024 13025 static int check_non_sleepable_error_inject(u32 btf_id) 13026 { 13027 return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id); 13028 } 13029 13030 int bpf_check_attach_target(struct bpf_verifier_log *log, 13031 const struct bpf_prog *prog, 13032 const struct bpf_prog *tgt_prog, 13033 u32 btf_id, 13034 struct bpf_attach_target_info *tgt_info) 13035 { 13036 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; 13037 const char prefix[] = "btf_trace_"; 13038 int ret = 0, subprog = -1, i; 13039 const struct btf_type *t; 13040 bool conservative = true; 13041 const char *tname; 13042 struct btf *btf; 13043 long addr = 0; 13044 13045 if (!btf_id) { 13046 bpf_log(log, "Tracing programs must provide btf_id\n"); 13047 return -EINVAL; 13048 } 13049 btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf; 13050 if (!btf) { 13051 bpf_log(log, 13052 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n"); 13053 return -EINVAL; 13054 } 13055 t = btf_type_by_id(btf, btf_id); 13056 if (!t) { 13057 bpf_log(log, "attach_btf_id %u is invalid\n", btf_id); 13058 return -EINVAL; 13059 } 13060 tname = btf_name_by_offset(btf, t->name_off); 13061 if (!tname) { 13062 bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id); 13063 return -EINVAL; 13064 } 13065 if (tgt_prog) { 13066 struct bpf_prog_aux *aux = tgt_prog->aux; 13067 13068 for (i = 0; i < aux->func_info_cnt; i++) 13069 if (aux->func_info[i].type_id == btf_id) { 13070 subprog = i; 13071 break; 13072 } 13073 if (subprog == -1) { 13074 bpf_log(log, "Subprog %s doesn't exist\n", tname); 13075 return -EINVAL; 13076 } 13077 conservative = aux->func_info_aux[subprog].unreliable; 13078 if (prog_extension) { 13079 if (conservative) { 13080 bpf_log(log, 13081 "Cannot replace static functions\n"); 13082 return -EINVAL; 13083 } 13084 if (!prog->jit_requested) { 13085 bpf_log(log, 13086 "Extension programs should be JITed\n"); 13087 return -EINVAL; 13088 } 13089 } 13090 if (!tgt_prog->jited) { 13091 bpf_log(log, "Can attach to only JITed progs\n"); 13092 return -EINVAL; 13093 } 13094 if (tgt_prog->type == prog->type) { 13095 /* Cannot fentry/fexit another fentry/fexit program. 13096 * Cannot attach program extension to another extension. 13097 * It's ok to attach fentry/fexit to extension program. 13098 */ 13099 bpf_log(log, "Cannot recursively attach\n"); 13100 return -EINVAL; 13101 } 13102 if (tgt_prog->type == BPF_PROG_TYPE_TRACING && 13103 prog_extension && 13104 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY || 13105 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) { 13106 /* Program extensions can extend all program types 13107 * except fentry/fexit. The reason is the following. 13108 * The fentry/fexit programs are used for performance 13109 * analysis, stats and can be attached to any program 13110 * type except themselves. When extension program is 13111 * replacing XDP function it is necessary to allow 13112 * performance analysis of all functions. Both original 13113 * XDP program and its program extension. Hence 13114 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is 13115 * allowed. If extending of fentry/fexit was allowed it 13116 * would be possible to create long call chain 13117 * fentry->extension->fentry->extension beyond 13118 * reasonable stack size. Hence extending fentry is not 13119 * allowed. 13120 */ 13121 bpf_log(log, "Cannot extend fentry/fexit\n"); 13122 return -EINVAL; 13123 } 13124 } else { 13125 if (prog_extension) { 13126 bpf_log(log, "Cannot replace kernel functions\n"); 13127 return -EINVAL; 13128 } 13129 } 13130 13131 switch (prog->expected_attach_type) { 13132 case BPF_TRACE_RAW_TP: 13133 if (tgt_prog) { 13134 bpf_log(log, 13135 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n"); 13136 return -EINVAL; 13137 } 13138 if (!btf_type_is_typedef(t)) { 13139 bpf_log(log, "attach_btf_id %u is not a typedef\n", 13140 btf_id); 13141 return -EINVAL; 13142 } 13143 if (strncmp(prefix, tname, sizeof(prefix) - 1)) { 13144 bpf_log(log, "attach_btf_id %u points to wrong type name %s\n", 13145 btf_id, tname); 13146 return -EINVAL; 13147 } 13148 tname += sizeof(prefix) - 1; 13149 t = btf_type_by_id(btf, t->type); 13150 if (!btf_type_is_ptr(t)) 13151 /* should never happen in valid vmlinux build */ 13152 return -EINVAL; 13153 t = btf_type_by_id(btf, t->type); 13154 if (!btf_type_is_func_proto(t)) 13155 /* should never happen in valid vmlinux build */ 13156 return -EINVAL; 13157 13158 break; 13159 case BPF_TRACE_ITER: 13160 if (!btf_type_is_func(t)) { 13161 bpf_log(log, "attach_btf_id %u is not a function\n", 13162 btf_id); 13163 return -EINVAL; 13164 } 13165 t = btf_type_by_id(btf, t->type); 13166 if (!btf_type_is_func_proto(t)) 13167 return -EINVAL; 13168 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); 13169 if (ret) 13170 return ret; 13171 break; 13172 default: 13173 if (!prog_extension) 13174 return -EINVAL; 13175 fallthrough; 13176 case BPF_MODIFY_RETURN: 13177 case BPF_LSM_MAC: 13178 case BPF_TRACE_FENTRY: 13179 case BPF_TRACE_FEXIT: 13180 if (!btf_type_is_func(t)) { 13181 bpf_log(log, "attach_btf_id %u is not a function\n", 13182 btf_id); 13183 return -EINVAL; 13184 } 13185 if (prog_extension && 13186 btf_check_type_match(log, prog, btf, t)) 13187 return -EINVAL; 13188 t = btf_type_by_id(btf, t->type); 13189 if (!btf_type_is_func_proto(t)) 13190 return -EINVAL; 13191 13192 if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) && 13193 (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type || 13194 prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type)) 13195 return -EINVAL; 13196 13197 if (tgt_prog && conservative) 13198 t = NULL; 13199 13200 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); 13201 if (ret < 0) 13202 return ret; 13203 13204 if (tgt_prog) { 13205 if (subprog == 0) 13206 addr = (long) tgt_prog->bpf_func; 13207 else 13208 addr = (long) tgt_prog->aux->func[subprog]->bpf_func; 13209 } else { 13210 addr = kallsyms_lookup_name(tname); 13211 if (!addr) { 13212 bpf_log(log, 13213 "The address of function %s cannot be found\n", 13214 tname); 13215 return -ENOENT; 13216 } 13217 } 13218 13219 if (prog->aux->sleepable) { 13220 ret = -EINVAL; 13221 switch (prog->type) { 13222 case BPF_PROG_TYPE_TRACING: 13223 /* fentry/fexit/fmod_ret progs can be sleepable only if they are 13224 * attached to ALLOW_ERROR_INJECTION and are not in denylist. 13225 */ 13226 if (!check_non_sleepable_error_inject(btf_id) && 13227 within_error_injection_list(addr)) 13228 ret = 0; 13229 break; 13230 case BPF_PROG_TYPE_LSM: 13231 /* LSM progs check that they are attached to bpf_lsm_*() funcs. 13232 * Only some of them are sleepable. 13233 */ 13234 if (bpf_lsm_is_sleepable_hook(btf_id)) 13235 ret = 0; 13236 break; 13237 default: 13238 break; 13239 } 13240 if (ret) { 13241 bpf_log(log, "%s is not sleepable\n", tname); 13242 return ret; 13243 } 13244 } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) { 13245 if (tgt_prog) { 13246 bpf_log(log, "can't modify return codes of BPF programs\n"); 13247 return -EINVAL; 13248 } 13249 ret = check_attach_modify_return(addr, tname); 13250 if (ret) { 13251 bpf_log(log, "%s() is not modifiable\n", tname); 13252 return ret; 13253 } 13254 } 13255 13256 break; 13257 } 13258 tgt_info->tgt_addr = addr; 13259 tgt_info->tgt_name = tname; 13260 tgt_info->tgt_type = t; 13261 return 0; 13262 } 13263 13264 BTF_SET_START(btf_id_deny) 13265 BTF_ID_UNUSED 13266 #ifdef CONFIG_SMP 13267 BTF_ID(func, migrate_disable) 13268 BTF_ID(func, migrate_enable) 13269 #endif 13270 #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU 13271 BTF_ID(func, rcu_read_unlock_strict) 13272 #endif 13273 BTF_SET_END(btf_id_deny) 13274 13275 static int check_attach_btf_id(struct bpf_verifier_env *env) 13276 { 13277 struct bpf_prog *prog = env->prog; 13278 struct bpf_prog *tgt_prog = prog->aux->dst_prog; 13279 struct bpf_attach_target_info tgt_info = {}; 13280 u32 btf_id = prog->aux->attach_btf_id; 13281 struct bpf_trampoline *tr; 13282 int ret; 13283 u64 key; 13284 13285 if (prog->type == BPF_PROG_TYPE_SYSCALL) { 13286 if (prog->aux->sleepable) 13287 /* attach_btf_id checked to be zero already */ 13288 return 0; 13289 verbose(env, "Syscall programs can only be sleepable\n"); 13290 return -EINVAL; 13291 } 13292 13293 if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING && 13294 prog->type != BPF_PROG_TYPE_LSM) { 13295 verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n"); 13296 return -EINVAL; 13297 } 13298 13299 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) 13300 return check_struct_ops_btf_id(env); 13301 13302 if (prog->type != BPF_PROG_TYPE_TRACING && 13303 prog->type != BPF_PROG_TYPE_LSM && 13304 prog->type != BPF_PROG_TYPE_EXT) 13305 return 0; 13306 13307 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info); 13308 if (ret) 13309 return ret; 13310 13311 if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) { 13312 /* to make freplace equivalent to their targets, they need to 13313 * inherit env->ops and expected_attach_type for the rest of the 13314 * verification 13315 */ 13316 env->ops = bpf_verifier_ops[tgt_prog->type]; 13317 prog->expected_attach_type = tgt_prog->expected_attach_type; 13318 } 13319 13320 /* store info about the attachment target that will be used later */ 13321 prog->aux->attach_func_proto = tgt_info.tgt_type; 13322 prog->aux->attach_func_name = tgt_info.tgt_name; 13323 13324 if (tgt_prog) { 13325 prog->aux->saved_dst_prog_type = tgt_prog->type; 13326 prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type; 13327 } 13328 13329 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) { 13330 prog->aux->attach_btf_trace = true; 13331 return 0; 13332 } else if (prog->expected_attach_type == BPF_TRACE_ITER) { 13333 if (!bpf_iter_prog_supported(prog)) 13334 return -EINVAL; 13335 return 0; 13336 } 13337 13338 if (prog->type == BPF_PROG_TYPE_LSM) { 13339 ret = bpf_lsm_verify_prog(&env->log, prog); 13340 if (ret < 0) 13341 return ret; 13342 } else if (prog->type == BPF_PROG_TYPE_TRACING && 13343 btf_id_set_contains(&btf_id_deny, btf_id)) { 13344 return -EINVAL; 13345 } 13346 13347 key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id); 13348 tr = bpf_trampoline_get(key, &tgt_info); 13349 if (!tr) 13350 return -ENOMEM; 13351 13352 prog->aux->dst_trampoline = tr; 13353 return 0; 13354 } 13355 13356 struct btf *bpf_get_btf_vmlinux(void) 13357 { 13358 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { 13359 mutex_lock(&bpf_verifier_lock); 13360 if (!btf_vmlinux) 13361 btf_vmlinux = btf_parse_vmlinux(); 13362 mutex_unlock(&bpf_verifier_lock); 13363 } 13364 return btf_vmlinux; 13365 } 13366 13367 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr) 13368 { 13369 u64 start_time = ktime_get_ns(); 13370 struct bpf_verifier_env *env; 13371 struct bpf_verifier_log *log; 13372 int i, len, ret = -EINVAL; 13373 bool is_priv; 13374 13375 /* no program is valid */ 13376 if (ARRAY_SIZE(bpf_verifier_ops) == 0) 13377 return -EINVAL; 13378 13379 /* 'struct bpf_verifier_env' can be global, but since it's not small, 13380 * allocate/free it every time bpf_check() is called 13381 */ 13382 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 13383 if (!env) 13384 return -ENOMEM; 13385 log = &env->log; 13386 13387 len = (*prog)->len; 13388 env->insn_aux_data = 13389 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len)); 13390 ret = -ENOMEM; 13391 if (!env->insn_aux_data) 13392 goto err_free_env; 13393 for (i = 0; i < len; i++) 13394 env->insn_aux_data[i].orig_idx = i; 13395 env->prog = *prog; 13396 env->ops = bpf_verifier_ops[env->prog->type]; 13397 env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel); 13398 is_priv = bpf_capable(); 13399 13400 bpf_get_btf_vmlinux(); 13401 13402 /* grab the mutex to protect few globals used by verifier */ 13403 if (!is_priv) 13404 mutex_lock(&bpf_verifier_lock); 13405 13406 if (attr->log_level || attr->log_buf || attr->log_size) { 13407 /* user requested verbose verifier output 13408 * and supplied buffer to store the verification trace 13409 */ 13410 log->level = attr->log_level; 13411 log->ubuf = (char __user *) (unsigned long) attr->log_buf; 13412 log->len_total = attr->log_size; 13413 13414 ret = -EINVAL; 13415 /* log attributes have to be sane */ 13416 if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 || 13417 !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK) 13418 goto err_unlock; 13419 } 13420 13421 if (IS_ERR(btf_vmlinux)) { 13422 /* Either gcc or pahole or kernel are broken. */ 13423 verbose(env, "in-kernel BTF is malformed\n"); 13424 ret = PTR_ERR(btf_vmlinux); 13425 goto skip_full_check; 13426 } 13427 13428 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); 13429 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 13430 env->strict_alignment = true; 13431 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) 13432 env->strict_alignment = false; 13433 13434 env->allow_ptr_leaks = bpf_allow_ptr_leaks(); 13435 env->allow_uninit_stack = bpf_allow_uninit_stack(); 13436 env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access(); 13437 env->bypass_spec_v1 = bpf_bypass_spec_v1(); 13438 env->bypass_spec_v4 = bpf_bypass_spec_v4(); 13439 env->bpf_capable = bpf_capable(); 13440 13441 if (is_priv) 13442 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; 13443 13444 env->explored_states = kvcalloc(state_htab_size(env), 13445 sizeof(struct bpf_verifier_state_list *), 13446 GFP_USER); 13447 ret = -ENOMEM; 13448 if (!env->explored_states) 13449 goto skip_full_check; 13450 13451 ret = add_subprog_and_kfunc(env); 13452 if (ret < 0) 13453 goto skip_full_check; 13454 13455 ret = check_subprogs(env); 13456 if (ret < 0) 13457 goto skip_full_check; 13458 13459 ret = check_btf_info(env, attr, uattr); 13460 if (ret < 0) 13461 goto skip_full_check; 13462 13463 ret = check_attach_btf_id(env); 13464 if (ret) 13465 goto skip_full_check; 13466 13467 ret = resolve_pseudo_ldimm64(env); 13468 if (ret < 0) 13469 goto skip_full_check; 13470 13471 if (bpf_prog_is_dev_bound(env->prog->aux)) { 13472 ret = bpf_prog_offload_verifier_prep(env->prog); 13473 if (ret) 13474 goto skip_full_check; 13475 } 13476 13477 ret = check_cfg(env); 13478 if (ret < 0) 13479 goto skip_full_check; 13480 13481 ret = do_check_subprogs(env); 13482 ret = ret ?: do_check_main(env); 13483 13484 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux)) 13485 ret = bpf_prog_offload_finalize(env); 13486 13487 skip_full_check: 13488 kvfree(env->explored_states); 13489 13490 if (ret == 0) 13491 ret = check_max_stack_depth(env); 13492 13493 /* instruction rewrites happen after this point */ 13494 if (is_priv) { 13495 if (ret == 0) 13496 opt_hard_wire_dead_code_branches(env); 13497 if (ret == 0) 13498 ret = opt_remove_dead_code(env); 13499 if (ret == 0) 13500 ret = opt_remove_nops(env); 13501 } else { 13502 if (ret == 0) 13503 sanitize_dead_code(env); 13504 } 13505 13506 if (ret == 0) 13507 /* program is valid, convert *(u32*)(ctx + off) accesses */ 13508 ret = convert_ctx_accesses(env); 13509 13510 if (ret == 0) 13511 ret = do_misc_fixups(env); 13512 13513 /* do 32-bit optimization after insn patching has done so those patched 13514 * insns could be handled correctly. 13515 */ 13516 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) { 13517 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr); 13518 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret 13519 : false; 13520 } 13521 13522 if (ret == 0) 13523 ret = fixup_call_args(env); 13524 13525 env->verification_time = ktime_get_ns() - start_time; 13526 print_verification_stats(env); 13527 13528 if (log->level && bpf_verifier_log_full(log)) 13529 ret = -ENOSPC; 13530 if (log->level && !log->ubuf) { 13531 ret = -EFAULT; 13532 goto err_release_maps; 13533 } 13534 13535 if (ret) 13536 goto err_release_maps; 13537 13538 if (env->used_map_cnt) { 13539 /* if program passed verifier, update used_maps in bpf_prog_info */ 13540 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 13541 sizeof(env->used_maps[0]), 13542 GFP_KERNEL); 13543 13544 if (!env->prog->aux->used_maps) { 13545 ret = -ENOMEM; 13546 goto err_release_maps; 13547 } 13548 13549 memcpy(env->prog->aux->used_maps, env->used_maps, 13550 sizeof(env->used_maps[0]) * env->used_map_cnt); 13551 env->prog->aux->used_map_cnt = env->used_map_cnt; 13552 } 13553 if (env->used_btf_cnt) { 13554 /* if program passed verifier, update used_btfs in bpf_prog_aux */ 13555 env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt, 13556 sizeof(env->used_btfs[0]), 13557 GFP_KERNEL); 13558 if (!env->prog->aux->used_btfs) { 13559 ret = -ENOMEM; 13560 goto err_release_maps; 13561 } 13562 13563 memcpy(env->prog->aux->used_btfs, env->used_btfs, 13564 sizeof(env->used_btfs[0]) * env->used_btf_cnt); 13565 env->prog->aux->used_btf_cnt = env->used_btf_cnt; 13566 } 13567 if (env->used_map_cnt || env->used_btf_cnt) { 13568 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 13569 * bpf_ld_imm64 instructions 13570 */ 13571 convert_pseudo_ld_imm64(env); 13572 } 13573 13574 adjust_btf_func(env); 13575 13576 err_release_maps: 13577 if (!env->prog->aux->used_maps) 13578 /* if we didn't copy map pointers into bpf_prog_info, release 13579 * them now. Otherwise free_used_maps() will release them. 13580 */ 13581 release_maps(env); 13582 if (!env->prog->aux->used_btfs) 13583 release_btfs(env); 13584 13585 /* extension progs temporarily inherit the attach_type of their targets 13586 for verification purposes, so set it back to zero before returning 13587 */ 13588 if (env->prog->type == BPF_PROG_TYPE_EXT) 13589 env->prog->expected_attach_type = 0; 13590 13591 *prog = env->prog; 13592 err_unlock: 13593 if (!is_priv) 13594 mutex_unlock(&bpf_verifier_lock); 13595 vfree(env->insn_aux_data); 13596 err_free_env: 13597 kfree(env); 13598 return ret; 13599 } 13600