1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io 5 */ 6 #include <uapi/linux/btf.h> 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/slab.h> 10 #include <linux/bpf.h> 11 #include <linux/btf.h> 12 #include <linux/bpf_verifier.h> 13 #include <linux/filter.h> 14 #include <net/netlink.h> 15 #include <linux/file.h> 16 #include <linux/vmalloc.h> 17 #include <linux/stringify.h> 18 #include <linux/bsearch.h> 19 #include <linux/sort.h> 20 #include <linux/perf_event.h> 21 #include <linux/ctype.h> 22 #include <linux/error-injection.h> 23 #include <linux/bpf_lsm.h> 24 25 #include "disasm.h" 26 27 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { 28 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 29 [_id] = & _name ## _verifier_ops, 30 #define BPF_MAP_TYPE(_id, _ops) 31 #define BPF_LINK_TYPE(_id, _name) 32 #include <linux/bpf_types.h> 33 #undef BPF_PROG_TYPE 34 #undef BPF_MAP_TYPE 35 #undef BPF_LINK_TYPE 36 }; 37 38 /* bpf_check() is a static code analyzer that walks eBPF program 39 * instruction by instruction and updates register/stack state. 40 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 41 * 42 * The first pass is depth-first-search to check that the program is a DAG. 43 * It rejects the following programs: 44 * - larger than BPF_MAXINSNS insns 45 * - if loop is present (detected via back-edge) 46 * - unreachable insns exist (shouldn't be a forest. program = one function) 47 * - out of bounds or malformed jumps 48 * The second pass is all possible path descent from the 1st insn. 49 * Since it's analyzing all pathes through the program, the length of the 50 * analysis is limited to 64k insn, which may be hit even if total number of 51 * insn is less then 4K, but there are too many branches that change stack/regs. 52 * Number of 'branches to be analyzed' is limited to 1k 53 * 54 * On entry to each instruction, each register has a type, and the instruction 55 * changes the types of the registers depending on instruction semantics. 56 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 57 * copied to R1. 58 * 59 * All registers are 64-bit. 60 * R0 - return register 61 * R1-R5 argument passing registers 62 * R6-R9 callee saved registers 63 * R10 - frame pointer read-only 64 * 65 * At the start of BPF program the register R1 contains a pointer to bpf_context 66 * and has type PTR_TO_CTX. 67 * 68 * Verifier tracks arithmetic operations on pointers in case: 69 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 70 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 71 * 1st insn copies R10 (which has FRAME_PTR) type into R1 72 * and 2nd arithmetic instruction is pattern matched to recognize 73 * that it wants to construct a pointer to some element within stack. 74 * So after 2nd insn, the register R1 has type PTR_TO_STACK 75 * (and -20 constant is saved for further stack bounds checking). 76 * Meaning that this reg is a pointer to stack plus known immediate constant. 77 * 78 * Most of the time the registers have SCALAR_VALUE type, which 79 * means the register has some value, but it's not a valid pointer. 80 * (like pointer plus pointer becomes SCALAR_VALUE type) 81 * 82 * When verifier sees load or store instructions the type of base register 83 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are 84 * four pointer types recognized by check_mem_access() function. 85 * 86 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 87 * and the range of [ptr, ptr + map's value_size) is accessible. 88 * 89 * registers used to pass values to function calls are checked against 90 * function argument constraints. 91 * 92 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 93 * It means that the register type passed to this function must be 94 * PTR_TO_STACK and it will be used inside the function as 95 * 'pointer to map element key' 96 * 97 * For example the argument constraints for bpf_map_lookup_elem(): 98 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 99 * .arg1_type = ARG_CONST_MAP_PTR, 100 * .arg2_type = ARG_PTR_TO_MAP_KEY, 101 * 102 * ret_type says that this function returns 'pointer to map elem value or null' 103 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 104 * 2nd argument should be a pointer to stack, which will be used inside 105 * the helper function as a pointer to map element key. 106 * 107 * On the kernel side the helper function looks like: 108 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 109 * { 110 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 111 * void *key = (void *) (unsigned long) r2; 112 * void *value; 113 * 114 * here kernel can access 'key' and 'map' pointers safely, knowing that 115 * [key, key + map->key_size) bytes are valid and were initialized on 116 * the stack of eBPF program. 117 * } 118 * 119 * Corresponding eBPF program may look like: 120 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 121 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 122 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 123 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 124 * here verifier looks at prototype of map_lookup_elem() and sees: 125 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 126 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 127 * 128 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 129 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 130 * and were initialized prior to this call. 131 * If it's ok, then verifier allows this BPF_CALL insn and looks at 132 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 133 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 134 * returns ether pointer to map value or NULL. 135 * 136 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 137 * insn, the register holding that pointer in the true branch changes state to 138 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 139 * branch. See check_cond_jmp_op(). 140 * 141 * After the call R0 is set to return type of the function and registers R1-R5 142 * are set to NOT_INIT to indicate that they are no longer readable. 143 * 144 * The following reference types represent a potential reference to a kernel 145 * resource which, after first being allocated, must be checked and freed by 146 * the BPF program: 147 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET 148 * 149 * When the verifier sees a helper call return a reference type, it allocates a 150 * pointer id for the reference and stores it in the current function state. 151 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into 152 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type 153 * passes through a NULL-check conditional. For the branch wherein the state is 154 * changed to CONST_IMM, the verifier releases the reference. 155 * 156 * For each helper function that allocates a reference, such as 157 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as 158 * bpf_sk_release(). When a reference type passes into the release function, 159 * the verifier also releases the reference. If any unchecked or unreleased 160 * reference remains at the end of the program, the verifier rejects it. 161 */ 162 163 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 164 struct bpf_verifier_stack_elem { 165 /* verifer state is 'st' 166 * before processing instruction 'insn_idx' 167 * and after processing instruction 'prev_insn_idx' 168 */ 169 struct bpf_verifier_state st; 170 int insn_idx; 171 int prev_insn_idx; 172 struct bpf_verifier_stack_elem *next; 173 /* length of verifier log at the time this state was pushed on stack */ 174 u32 log_pos; 175 }; 176 177 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192 178 #define BPF_COMPLEXITY_LIMIT_STATES 64 179 180 #define BPF_MAP_KEY_POISON (1ULL << 63) 181 #define BPF_MAP_KEY_SEEN (1ULL << 62) 182 183 #define BPF_MAP_PTR_UNPRIV 1UL 184 #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ 185 POISON_POINTER_DELTA)) 186 #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) 187 188 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) 189 { 190 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON; 191 } 192 193 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) 194 { 195 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV; 196 } 197 198 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, 199 const struct bpf_map *map, bool unpriv) 200 { 201 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); 202 unpriv |= bpf_map_ptr_unpriv(aux); 203 aux->map_ptr_state = (unsigned long)map | 204 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); 205 } 206 207 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux) 208 { 209 return aux->map_key_state & BPF_MAP_KEY_POISON; 210 } 211 212 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux) 213 { 214 return !(aux->map_key_state & BPF_MAP_KEY_SEEN); 215 } 216 217 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux) 218 { 219 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON); 220 } 221 222 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state) 223 { 224 bool poisoned = bpf_map_key_poisoned(aux); 225 226 aux->map_key_state = state | BPF_MAP_KEY_SEEN | 227 (poisoned ? BPF_MAP_KEY_POISON : 0ULL); 228 } 229 230 struct bpf_call_arg_meta { 231 struct bpf_map *map_ptr; 232 bool raw_mode; 233 bool pkt_access; 234 int regno; 235 int access_size; 236 int mem_size; 237 u64 msize_max_value; 238 int ref_obj_id; 239 int func_id; 240 u32 btf_id; 241 }; 242 243 struct btf *btf_vmlinux; 244 245 static DEFINE_MUTEX(bpf_verifier_lock); 246 247 static const struct bpf_line_info * 248 find_linfo(const struct bpf_verifier_env *env, u32 insn_off) 249 { 250 const struct bpf_line_info *linfo; 251 const struct bpf_prog *prog; 252 u32 i, nr_linfo; 253 254 prog = env->prog; 255 nr_linfo = prog->aux->nr_linfo; 256 257 if (!nr_linfo || insn_off >= prog->len) 258 return NULL; 259 260 linfo = prog->aux->linfo; 261 for (i = 1; i < nr_linfo; i++) 262 if (insn_off < linfo[i].insn_off) 263 break; 264 265 return &linfo[i - 1]; 266 } 267 268 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, 269 va_list args) 270 { 271 unsigned int n; 272 273 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); 274 275 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, 276 "verifier log line truncated - local buffer too short\n"); 277 278 n = min(log->len_total - log->len_used - 1, n); 279 log->kbuf[n] = '\0'; 280 281 if (log->level == BPF_LOG_KERNEL) { 282 pr_err("BPF:%s\n", log->kbuf); 283 return; 284 } 285 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) 286 log->len_used += n; 287 else 288 log->ubuf = NULL; 289 } 290 291 static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos) 292 { 293 char zero = 0; 294 295 if (!bpf_verifier_log_needed(log)) 296 return; 297 298 log->len_used = new_pos; 299 if (put_user(zero, log->ubuf + new_pos)) 300 log->ubuf = NULL; 301 } 302 303 /* log_level controls verbosity level of eBPF verifier. 304 * bpf_verifier_log_write() is used to dump the verification trace to the log, 305 * so the user can figure out what's wrong with the program 306 */ 307 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, 308 const char *fmt, ...) 309 { 310 va_list args; 311 312 if (!bpf_verifier_log_needed(&env->log)) 313 return; 314 315 va_start(args, fmt); 316 bpf_verifier_vlog(&env->log, fmt, args); 317 va_end(args); 318 } 319 EXPORT_SYMBOL_GPL(bpf_verifier_log_write); 320 321 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) 322 { 323 struct bpf_verifier_env *env = private_data; 324 va_list args; 325 326 if (!bpf_verifier_log_needed(&env->log)) 327 return; 328 329 va_start(args, fmt); 330 bpf_verifier_vlog(&env->log, fmt, args); 331 va_end(args); 332 } 333 334 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log, 335 const char *fmt, ...) 336 { 337 va_list args; 338 339 if (!bpf_verifier_log_needed(log)) 340 return; 341 342 va_start(args, fmt); 343 bpf_verifier_vlog(log, fmt, args); 344 va_end(args); 345 } 346 347 static const char *ltrim(const char *s) 348 { 349 while (isspace(*s)) 350 s++; 351 352 return s; 353 } 354 355 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env, 356 u32 insn_off, 357 const char *prefix_fmt, ...) 358 { 359 const struct bpf_line_info *linfo; 360 361 if (!bpf_verifier_log_needed(&env->log)) 362 return; 363 364 linfo = find_linfo(env, insn_off); 365 if (!linfo || linfo == env->prev_linfo) 366 return; 367 368 if (prefix_fmt) { 369 va_list args; 370 371 va_start(args, prefix_fmt); 372 bpf_verifier_vlog(&env->log, prefix_fmt, args); 373 va_end(args); 374 } 375 376 verbose(env, "%s\n", 377 ltrim(btf_name_by_offset(env->prog->aux->btf, 378 linfo->line_off))); 379 380 env->prev_linfo = linfo; 381 } 382 383 static bool type_is_pkt_pointer(enum bpf_reg_type type) 384 { 385 return type == PTR_TO_PACKET || 386 type == PTR_TO_PACKET_META; 387 } 388 389 static bool type_is_sk_pointer(enum bpf_reg_type type) 390 { 391 return type == PTR_TO_SOCKET || 392 type == PTR_TO_SOCK_COMMON || 393 type == PTR_TO_TCP_SOCK || 394 type == PTR_TO_XDP_SOCK; 395 } 396 397 static bool reg_type_not_null(enum bpf_reg_type type) 398 { 399 return type == PTR_TO_SOCKET || 400 type == PTR_TO_TCP_SOCK || 401 type == PTR_TO_MAP_VALUE || 402 type == PTR_TO_SOCK_COMMON; 403 } 404 405 static bool reg_type_may_be_null(enum bpf_reg_type type) 406 { 407 return type == PTR_TO_MAP_VALUE_OR_NULL || 408 type == PTR_TO_SOCKET_OR_NULL || 409 type == PTR_TO_SOCK_COMMON_OR_NULL || 410 type == PTR_TO_TCP_SOCK_OR_NULL || 411 type == PTR_TO_BTF_ID_OR_NULL || 412 type == PTR_TO_MEM_OR_NULL; 413 } 414 415 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) 416 { 417 return reg->type == PTR_TO_MAP_VALUE && 418 map_value_has_spin_lock(reg->map_ptr); 419 } 420 421 static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type) 422 { 423 return type == PTR_TO_SOCKET || 424 type == PTR_TO_SOCKET_OR_NULL || 425 type == PTR_TO_TCP_SOCK || 426 type == PTR_TO_TCP_SOCK_OR_NULL || 427 type == PTR_TO_MEM || 428 type == PTR_TO_MEM_OR_NULL; 429 } 430 431 static bool arg_type_may_be_refcounted(enum bpf_arg_type type) 432 { 433 return type == ARG_PTR_TO_SOCK_COMMON; 434 } 435 436 /* Determine whether the function releases some resources allocated by another 437 * function call. The first reference type argument will be assumed to be 438 * released by release_reference(). 439 */ 440 static bool is_release_function(enum bpf_func_id func_id) 441 { 442 return func_id == BPF_FUNC_sk_release || 443 func_id == BPF_FUNC_ringbuf_submit || 444 func_id == BPF_FUNC_ringbuf_discard; 445 } 446 447 static bool may_be_acquire_function(enum bpf_func_id func_id) 448 { 449 return func_id == BPF_FUNC_sk_lookup_tcp || 450 func_id == BPF_FUNC_sk_lookup_udp || 451 func_id == BPF_FUNC_skc_lookup_tcp || 452 func_id == BPF_FUNC_map_lookup_elem || 453 func_id == BPF_FUNC_ringbuf_reserve; 454 } 455 456 static bool is_acquire_function(enum bpf_func_id func_id, 457 const struct bpf_map *map) 458 { 459 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC; 460 461 if (func_id == BPF_FUNC_sk_lookup_tcp || 462 func_id == BPF_FUNC_sk_lookup_udp || 463 func_id == BPF_FUNC_skc_lookup_tcp || 464 func_id == BPF_FUNC_ringbuf_reserve) 465 return true; 466 467 if (func_id == BPF_FUNC_map_lookup_elem && 468 (map_type == BPF_MAP_TYPE_SOCKMAP || 469 map_type == BPF_MAP_TYPE_SOCKHASH)) 470 return true; 471 472 return false; 473 } 474 475 static bool is_ptr_cast_function(enum bpf_func_id func_id) 476 { 477 return func_id == BPF_FUNC_tcp_sock || 478 func_id == BPF_FUNC_sk_fullsock; 479 } 480 481 /* string representation of 'enum bpf_reg_type' */ 482 static const char * const reg_type_str[] = { 483 [NOT_INIT] = "?", 484 [SCALAR_VALUE] = "inv", 485 [PTR_TO_CTX] = "ctx", 486 [CONST_PTR_TO_MAP] = "map_ptr", 487 [PTR_TO_MAP_VALUE] = "map_value", 488 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", 489 [PTR_TO_STACK] = "fp", 490 [PTR_TO_PACKET] = "pkt", 491 [PTR_TO_PACKET_META] = "pkt_meta", 492 [PTR_TO_PACKET_END] = "pkt_end", 493 [PTR_TO_FLOW_KEYS] = "flow_keys", 494 [PTR_TO_SOCKET] = "sock", 495 [PTR_TO_SOCKET_OR_NULL] = "sock_or_null", 496 [PTR_TO_SOCK_COMMON] = "sock_common", 497 [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null", 498 [PTR_TO_TCP_SOCK] = "tcp_sock", 499 [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null", 500 [PTR_TO_TP_BUFFER] = "tp_buffer", 501 [PTR_TO_XDP_SOCK] = "xdp_sock", 502 [PTR_TO_BTF_ID] = "ptr_", 503 [PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_", 504 [PTR_TO_MEM] = "mem", 505 [PTR_TO_MEM_OR_NULL] = "mem_or_null", 506 }; 507 508 static char slot_type_char[] = { 509 [STACK_INVALID] = '?', 510 [STACK_SPILL] = 'r', 511 [STACK_MISC] = 'm', 512 [STACK_ZERO] = '0', 513 }; 514 515 static void print_liveness(struct bpf_verifier_env *env, 516 enum bpf_reg_liveness live) 517 { 518 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE)) 519 verbose(env, "_"); 520 if (live & REG_LIVE_READ) 521 verbose(env, "r"); 522 if (live & REG_LIVE_WRITTEN) 523 verbose(env, "w"); 524 if (live & REG_LIVE_DONE) 525 verbose(env, "D"); 526 } 527 528 static struct bpf_func_state *func(struct bpf_verifier_env *env, 529 const struct bpf_reg_state *reg) 530 { 531 struct bpf_verifier_state *cur = env->cur_state; 532 533 return cur->frame[reg->frameno]; 534 } 535 536 const char *kernel_type_name(u32 id) 537 { 538 return btf_name_by_offset(btf_vmlinux, 539 btf_type_by_id(btf_vmlinux, id)->name_off); 540 } 541 542 static void print_verifier_state(struct bpf_verifier_env *env, 543 const struct bpf_func_state *state) 544 { 545 const struct bpf_reg_state *reg; 546 enum bpf_reg_type t; 547 int i; 548 549 if (state->frameno) 550 verbose(env, " frame%d:", state->frameno); 551 for (i = 0; i < MAX_BPF_REG; i++) { 552 reg = &state->regs[i]; 553 t = reg->type; 554 if (t == NOT_INIT) 555 continue; 556 verbose(env, " R%d", i); 557 print_liveness(env, reg->live); 558 verbose(env, "=%s", reg_type_str[t]); 559 if (t == SCALAR_VALUE && reg->precise) 560 verbose(env, "P"); 561 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && 562 tnum_is_const(reg->var_off)) { 563 /* reg->off should be 0 for SCALAR_VALUE */ 564 verbose(env, "%lld", reg->var_off.value + reg->off); 565 } else { 566 if (t == PTR_TO_BTF_ID || t == PTR_TO_BTF_ID_OR_NULL) 567 verbose(env, "%s", kernel_type_name(reg->btf_id)); 568 verbose(env, "(id=%d", reg->id); 569 if (reg_type_may_be_refcounted_or_null(t)) 570 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id); 571 if (t != SCALAR_VALUE) 572 verbose(env, ",off=%d", reg->off); 573 if (type_is_pkt_pointer(t)) 574 verbose(env, ",r=%d", reg->range); 575 else if (t == CONST_PTR_TO_MAP || 576 t == PTR_TO_MAP_VALUE || 577 t == PTR_TO_MAP_VALUE_OR_NULL) 578 verbose(env, ",ks=%d,vs=%d", 579 reg->map_ptr->key_size, 580 reg->map_ptr->value_size); 581 if (tnum_is_const(reg->var_off)) { 582 /* Typically an immediate SCALAR_VALUE, but 583 * could be a pointer whose offset is too big 584 * for reg->off 585 */ 586 verbose(env, ",imm=%llx", reg->var_off.value); 587 } else { 588 if (reg->smin_value != reg->umin_value && 589 reg->smin_value != S64_MIN) 590 verbose(env, ",smin_value=%lld", 591 (long long)reg->smin_value); 592 if (reg->smax_value != reg->umax_value && 593 reg->smax_value != S64_MAX) 594 verbose(env, ",smax_value=%lld", 595 (long long)reg->smax_value); 596 if (reg->umin_value != 0) 597 verbose(env, ",umin_value=%llu", 598 (unsigned long long)reg->umin_value); 599 if (reg->umax_value != U64_MAX) 600 verbose(env, ",umax_value=%llu", 601 (unsigned long long)reg->umax_value); 602 if (!tnum_is_unknown(reg->var_off)) { 603 char tn_buf[48]; 604 605 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 606 verbose(env, ",var_off=%s", tn_buf); 607 } 608 if (reg->s32_min_value != reg->smin_value && 609 reg->s32_min_value != S32_MIN) 610 verbose(env, ",s32_min_value=%d", 611 (int)(reg->s32_min_value)); 612 if (reg->s32_max_value != reg->smax_value && 613 reg->s32_max_value != S32_MAX) 614 verbose(env, ",s32_max_value=%d", 615 (int)(reg->s32_max_value)); 616 if (reg->u32_min_value != reg->umin_value && 617 reg->u32_min_value != U32_MIN) 618 verbose(env, ",u32_min_value=%d", 619 (int)(reg->u32_min_value)); 620 if (reg->u32_max_value != reg->umax_value && 621 reg->u32_max_value != U32_MAX) 622 verbose(env, ",u32_max_value=%d", 623 (int)(reg->u32_max_value)); 624 } 625 verbose(env, ")"); 626 } 627 } 628 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 629 char types_buf[BPF_REG_SIZE + 1]; 630 bool valid = false; 631 int j; 632 633 for (j = 0; j < BPF_REG_SIZE; j++) { 634 if (state->stack[i].slot_type[j] != STACK_INVALID) 635 valid = true; 636 types_buf[j] = slot_type_char[ 637 state->stack[i].slot_type[j]]; 638 } 639 types_buf[BPF_REG_SIZE] = 0; 640 if (!valid) 641 continue; 642 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); 643 print_liveness(env, state->stack[i].spilled_ptr.live); 644 if (state->stack[i].slot_type[0] == STACK_SPILL) { 645 reg = &state->stack[i].spilled_ptr; 646 t = reg->type; 647 verbose(env, "=%s", reg_type_str[t]); 648 if (t == SCALAR_VALUE && reg->precise) 649 verbose(env, "P"); 650 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) 651 verbose(env, "%lld", reg->var_off.value + reg->off); 652 } else { 653 verbose(env, "=%s", types_buf); 654 } 655 } 656 if (state->acquired_refs && state->refs[0].id) { 657 verbose(env, " refs=%d", state->refs[0].id); 658 for (i = 1; i < state->acquired_refs; i++) 659 if (state->refs[i].id) 660 verbose(env, ",%d", state->refs[i].id); 661 } 662 verbose(env, "\n"); 663 } 664 665 #define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \ 666 static int copy_##NAME##_state(struct bpf_func_state *dst, \ 667 const struct bpf_func_state *src) \ 668 { \ 669 if (!src->FIELD) \ 670 return 0; \ 671 if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \ 672 /* internal bug, make state invalid to reject the program */ \ 673 memset(dst, 0, sizeof(*dst)); \ 674 return -EFAULT; \ 675 } \ 676 memcpy(dst->FIELD, src->FIELD, \ 677 sizeof(*src->FIELD) * (src->COUNT / SIZE)); \ 678 return 0; \ 679 } 680 /* copy_reference_state() */ 681 COPY_STATE_FN(reference, acquired_refs, refs, 1) 682 /* copy_stack_state() */ 683 COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) 684 #undef COPY_STATE_FN 685 686 #define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \ 687 static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \ 688 bool copy_old) \ 689 { \ 690 u32 old_size = state->COUNT; \ 691 struct bpf_##NAME##_state *new_##FIELD; \ 692 int slot = size / SIZE; \ 693 \ 694 if (size <= old_size || !size) { \ 695 if (copy_old) \ 696 return 0; \ 697 state->COUNT = slot * SIZE; \ 698 if (!size && old_size) { \ 699 kfree(state->FIELD); \ 700 state->FIELD = NULL; \ 701 } \ 702 return 0; \ 703 } \ 704 new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \ 705 GFP_KERNEL); \ 706 if (!new_##FIELD) \ 707 return -ENOMEM; \ 708 if (copy_old) { \ 709 if (state->FIELD) \ 710 memcpy(new_##FIELD, state->FIELD, \ 711 sizeof(*new_##FIELD) * (old_size / SIZE)); \ 712 memset(new_##FIELD + old_size / SIZE, 0, \ 713 sizeof(*new_##FIELD) * (size - old_size) / SIZE); \ 714 } \ 715 state->COUNT = slot * SIZE; \ 716 kfree(state->FIELD); \ 717 state->FIELD = new_##FIELD; \ 718 return 0; \ 719 } 720 /* realloc_reference_state() */ 721 REALLOC_STATE_FN(reference, acquired_refs, refs, 1) 722 /* realloc_stack_state() */ 723 REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) 724 #undef REALLOC_STATE_FN 725 726 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to 727 * make it consume minimal amount of memory. check_stack_write() access from 728 * the program calls into realloc_func_state() to grow the stack size. 729 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state 730 * which realloc_stack_state() copies over. It points to previous 731 * bpf_verifier_state which is never reallocated. 732 */ 733 static int realloc_func_state(struct bpf_func_state *state, int stack_size, 734 int refs_size, bool copy_old) 735 { 736 int err = realloc_reference_state(state, refs_size, copy_old); 737 if (err) 738 return err; 739 return realloc_stack_state(state, stack_size, copy_old); 740 } 741 742 /* Acquire a pointer id from the env and update the state->refs to include 743 * this new pointer reference. 744 * On success, returns a valid pointer id to associate with the register 745 * On failure, returns a negative errno. 746 */ 747 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) 748 { 749 struct bpf_func_state *state = cur_func(env); 750 int new_ofs = state->acquired_refs; 751 int id, err; 752 753 err = realloc_reference_state(state, state->acquired_refs + 1, true); 754 if (err) 755 return err; 756 id = ++env->id_gen; 757 state->refs[new_ofs].id = id; 758 state->refs[new_ofs].insn_idx = insn_idx; 759 760 return id; 761 } 762 763 /* release function corresponding to acquire_reference_state(). Idempotent. */ 764 static int release_reference_state(struct bpf_func_state *state, int ptr_id) 765 { 766 int i, last_idx; 767 768 last_idx = state->acquired_refs - 1; 769 for (i = 0; i < state->acquired_refs; i++) { 770 if (state->refs[i].id == ptr_id) { 771 if (last_idx && i != last_idx) 772 memcpy(&state->refs[i], &state->refs[last_idx], 773 sizeof(*state->refs)); 774 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); 775 state->acquired_refs--; 776 return 0; 777 } 778 } 779 return -EINVAL; 780 } 781 782 static int transfer_reference_state(struct bpf_func_state *dst, 783 struct bpf_func_state *src) 784 { 785 int err = realloc_reference_state(dst, src->acquired_refs, false); 786 if (err) 787 return err; 788 err = copy_reference_state(dst, src); 789 if (err) 790 return err; 791 return 0; 792 } 793 794 static void free_func_state(struct bpf_func_state *state) 795 { 796 if (!state) 797 return; 798 kfree(state->refs); 799 kfree(state->stack); 800 kfree(state); 801 } 802 803 static void clear_jmp_history(struct bpf_verifier_state *state) 804 { 805 kfree(state->jmp_history); 806 state->jmp_history = NULL; 807 state->jmp_history_cnt = 0; 808 } 809 810 static void free_verifier_state(struct bpf_verifier_state *state, 811 bool free_self) 812 { 813 int i; 814 815 for (i = 0; i <= state->curframe; i++) { 816 free_func_state(state->frame[i]); 817 state->frame[i] = NULL; 818 } 819 clear_jmp_history(state); 820 if (free_self) 821 kfree(state); 822 } 823 824 /* copy verifier state from src to dst growing dst stack space 825 * when necessary to accommodate larger src stack 826 */ 827 static int copy_func_state(struct bpf_func_state *dst, 828 const struct bpf_func_state *src) 829 { 830 int err; 831 832 err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs, 833 false); 834 if (err) 835 return err; 836 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); 837 err = copy_reference_state(dst, src); 838 if (err) 839 return err; 840 return copy_stack_state(dst, src); 841 } 842 843 static int copy_verifier_state(struct bpf_verifier_state *dst_state, 844 const struct bpf_verifier_state *src) 845 { 846 struct bpf_func_state *dst; 847 u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt; 848 int i, err; 849 850 if (dst_state->jmp_history_cnt < src->jmp_history_cnt) { 851 kfree(dst_state->jmp_history); 852 dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER); 853 if (!dst_state->jmp_history) 854 return -ENOMEM; 855 } 856 memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz); 857 dst_state->jmp_history_cnt = src->jmp_history_cnt; 858 859 /* if dst has more stack frames then src frame, free them */ 860 for (i = src->curframe + 1; i <= dst_state->curframe; i++) { 861 free_func_state(dst_state->frame[i]); 862 dst_state->frame[i] = NULL; 863 } 864 dst_state->speculative = src->speculative; 865 dst_state->curframe = src->curframe; 866 dst_state->active_spin_lock = src->active_spin_lock; 867 dst_state->branches = src->branches; 868 dst_state->parent = src->parent; 869 dst_state->first_insn_idx = src->first_insn_idx; 870 dst_state->last_insn_idx = src->last_insn_idx; 871 for (i = 0; i <= src->curframe; i++) { 872 dst = dst_state->frame[i]; 873 if (!dst) { 874 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 875 if (!dst) 876 return -ENOMEM; 877 dst_state->frame[i] = dst; 878 } 879 err = copy_func_state(dst, src->frame[i]); 880 if (err) 881 return err; 882 } 883 return 0; 884 } 885 886 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) 887 { 888 while (st) { 889 u32 br = --st->branches; 890 891 /* WARN_ON(br > 1) technically makes sense here, 892 * but see comment in push_stack(), hence: 893 */ 894 WARN_ONCE((int)br < 0, 895 "BUG update_branch_counts:branches_to_explore=%d\n", 896 br); 897 if (br) 898 break; 899 st = st->parent; 900 } 901 } 902 903 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, 904 int *insn_idx, bool pop_log) 905 { 906 struct bpf_verifier_state *cur = env->cur_state; 907 struct bpf_verifier_stack_elem *elem, *head = env->head; 908 int err; 909 910 if (env->head == NULL) 911 return -ENOENT; 912 913 if (cur) { 914 err = copy_verifier_state(cur, &head->st); 915 if (err) 916 return err; 917 } 918 if (pop_log) 919 bpf_vlog_reset(&env->log, head->log_pos); 920 if (insn_idx) 921 *insn_idx = head->insn_idx; 922 if (prev_insn_idx) 923 *prev_insn_idx = head->prev_insn_idx; 924 elem = head->next; 925 free_verifier_state(&head->st, false); 926 kfree(head); 927 env->head = elem; 928 env->stack_size--; 929 return 0; 930 } 931 932 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, 933 int insn_idx, int prev_insn_idx, 934 bool speculative) 935 { 936 struct bpf_verifier_state *cur = env->cur_state; 937 struct bpf_verifier_stack_elem *elem; 938 int err; 939 940 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 941 if (!elem) 942 goto err; 943 944 elem->insn_idx = insn_idx; 945 elem->prev_insn_idx = prev_insn_idx; 946 elem->next = env->head; 947 elem->log_pos = env->log.len_used; 948 env->head = elem; 949 env->stack_size++; 950 err = copy_verifier_state(&elem->st, cur); 951 if (err) 952 goto err; 953 elem->st.speculative |= speculative; 954 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { 955 verbose(env, "The sequence of %d jumps is too complex.\n", 956 env->stack_size); 957 goto err; 958 } 959 if (elem->st.parent) { 960 ++elem->st.parent->branches; 961 /* WARN_ON(branches > 2) technically makes sense here, 962 * but 963 * 1. speculative states will bump 'branches' for non-branch 964 * instructions 965 * 2. is_state_visited() heuristics may decide not to create 966 * a new state for a sequence of branches and all such current 967 * and cloned states will be pointing to a single parent state 968 * which might have large 'branches' count. 969 */ 970 } 971 return &elem->st; 972 err: 973 free_verifier_state(env->cur_state, true); 974 env->cur_state = NULL; 975 /* pop all elements and return */ 976 while (!pop_stack(env, NULL, NULL, false)); 977 return NULL; 978 } 979 980 #define CALLER_SAVED_REGS 6 981 static const int caller_saved[CALLER_SAVED_REGS] = { 982 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 983 }; 984 985 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 986 struct bpf_reg_state *reg); 987 988 /* Mark the unknown part of a register (variable offset or scalar value) as 989 * known to have the value @imm. 990 */ 991 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) 992 { 993 /* Clear id, off, and union(map_ptr, range) */ 994 memset(((u8 *)reg) + sizeof(reg->type), 0, 995 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); 996 reg->var_off = tnum_const(imm); 997 reg->smin_value = (s64)imm; 998 reg->smax_value = (s64)imm; 999 reg->umin_value = imm; 1000 reg->umax_value = imm; 1001 1002 reg->s32_min_value = (s32)imm; 1003 reg->s32_max_value = (s32)imm; 1004 reg->u32_min_value = (u32)imm; 1005 reg->u32_max_value = (u32)imm; 1006 } 1007 1008 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm) 1009 { 1010 reg->var_off = tnum_const_subreg(reg->var_off, imm); 1011 reg->s32_min_value = (s32)imm; 1012 reg->s32_max_value = (s32)imm; 1013 reg->u32_min_value = (u32)imm; 1014 reg->u32_max_value = (u32)imm; 1015 } 1016 1017 /* Mark the 'variable offset' part of a register as zero. This should be 1018 * used only on registers holding a pointer type. 1019 */ 1020 static void __mark_reg_known_zero(struct bpf_reg_state *reg) 1021 { 1022 __mark_reg_known(reg, 0); 1023 } 1024 1025 static void __mark_reg_const_zero(struct bpf_reg_state *reg) 1026 { 1027 __mark_reg_known(reg, 0); 1028 reg->type = SCALAR_VALUE; 1029 } 1030 1031 static void mark_reg_known_zero(struct bpf_verifier_env *env, 1032 struct bpf_reg_state *regs, u32 regno) 1033 { 1034 if (WARN_ON(regno >= MAX_BPF_REG)) { 1035 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); 1036 /* Something bad happened, let's kill all regs */ 1037 for (regno = 0; regno < MAX_BPF_REG; regno++) 1038 __mark_reg_not_init(env, regs + regno); 1039 return; 1040 } 1041 __mark_reg_known_zero(regs + regno); 1042 } 1043 1044 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) 1045 { 1046 return type_is_pkt_pointer(reg->type); 1047 } 1048 1049 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) 1050 { 1051 return reg_is_pkt_pointer(reg) || 1052 reg->type == PTR_TO_PACKET_END; 1053 } 1054 1055 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ 1056 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, 1057 enum bpf_reg_type which) 1058 { 1059 /* The register can already have a range from prior markings. 1060 * This is fine as long as it hasn't been advanced from its 1061 * origin. 1062 */ 1063 return reg->type == which && 1064 reg->id == 0 && 1065 reg->off == 0 && 1066 tnum_equals_const(reg->var_off, 0); 1067 } 1068 1069 /* Reset the min/max bounds of a register */ 1070 static void __mark_reg_unbounded(struct bpf_reg_state *reg) 1071 { 1072 reg->smin_value = S64_MIN; 1073 reg->smax_value = S64_MAX; 1074 reg->umin_value = 0; 1075 reg->umax_value = U64_MAX; 1076 1077 reg->s32_min_value = S32_MIN; 1078 reg->s32_max_value = S32_MAX; 1079 reg->u32_min_value = 0; 1080 reg->u32_max_value = U32_MAX; 1081 } 1082 1083 static void __mark_reg64_unbounded(struct bpf_reg_state *reg) 1084 { 1085 reg->smin_value = S64_MIN; 1086 reg->smax_value = S64_MAX; 1087 reg->umin_value = 0; 1088 reg->umax_value = U64_MAX; 1089 } 1090 1091 static void __mark_reg32_unbounded(struct bpf_reg_state *reg) 1092 { 1093 reg->s32_min_value = S32_MIN; 1094 reg->s32_max_value = S32_MAX; 1095 reg->u32_min_value = 0; 1096 reg->u32_max_value = U32_MAX; 1097 } 1098 1099 static void __update_reg32_bounds(struct bpf_reg_state *reg) 1100 { 1101 struct tnum var32_off = tnum_subreg(reg->var_off); 1102 1103 /* min signed is max(sign bit) | min(other bits) */ 1104 reg->s32_min_value = max_t(s32, reg->s32_min_value, 1105 var32_off.value | (var32_off.mask & S32_MIN)); 1106 /* max signed is min(sign bit) | max(other bits) */ 1107 reg->s32_max_value = min_t(s32, reg->s32_max_value, 1108 var32_off.value | (var32_off.mask & S32_MAX)); 1109 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value); 1110 reg->u32_max_value = min(reg->u32_max_value, 1111 (u32)(var32_off.value | var32_off.mask)); 1112 } 1113 1114 static void __update_reg64_bounds(struct bpf_reg_state *reg) 1115 { 1116 /* min signed is max(sign bit) | min(other bits) */ 1117 reg->smin_value = max_t(s64, reg->smin_value, 1118 reg->var_off.value | (reg->var_off.mask & S64_MIN)); 1119 /* max signed is min(sign bit) | max(other bits) */ 1120 reg->smax_value = min_t(s64, reg->smax_value, 1121 reg->var_off.value | (reg->var_off.mask & S64_MAX)); 1122 reg->umin_value = max(reg->umin_value, reg->var_off.value); 1123 reg->umax_value = min(reg->umax_value, 1124 reg->var_off.value | reg->var_off.mask); 1125 } 1126 1127 static void __update_reg_bounds(struct bpf_reg_state *reg) 1128 { 1129 __update_reg32_bounds(reg); 1130 __update_reg64_bounds(reg); 1131 } 1132 1133 /* Uses signed min/max values to inform unsigned, and vice-versa */ 1134 static void __reg32_deduce_bounds(struct bpf_reg_state *reg) 1135 { 1136 /* Learn sign from signed bounds. 1137 * If we cannot cross the sign boundary, then signed and unsigned bounds 1138 * are the same, so combine. This works even in the negative case, e.g. 1139 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 1140 */ 1141 if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) { 1142 reg->s32_min_value = reg->u32_min_value = 1143 max_t(u32, reg->s32_min_value, reg->u32_min_value); 1144 reg->s32_max_value = reg->u32_max_value = 1145 min_t(u32, reg->s32_max_value, reg->u32_max_value); 1146 return; 1147 } 1148 /* Learn sign from unsigned bounds. Signed bounds cross the sign 1149 * boundary, so we must be careful. 1150 */ 1151 if ((s32)reg->u32_max_value >= 0) { 1152 /* Positive. We can't learn anything from the smin, but smax 1153 * is positive, hence safe. 1154 */ 1155 reg->s32_min_value = reg->u32_min_value; 1156 reg->s32_max_value = reg->u32_max_value = 1157 min_t(u32, reg->s32_max_value, reg->u32_max_value); 1158 } else if ((s32)reg->u32_min_value < 0) { 1159 /* Negative. We can't learn anything from the smax, but smin 1160 * is negative, hence safe. 1161 */ 1162 reg->s32_min_value = reg->u32_min_value = 1163 max_t(u32, reg->s32_min_value, reg->u32_min_value); 1164 reg->s32_max_value = reg->u32_max_value; 1165 } 1166 } 1167 1168 static void __reg64_deduce_bounds(struct bpf_reg_state *reg) 1169 { 1170 /* Learn sign from signed bounds. 1171 * If we cannot cross the sign boundary, then signed and unsigned bounds 1172 * are the same, so combine. This works even in the negative case, e.g. 1173 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 1174 */ 1175 if (reg->smin_value >= 0 || reg->smax_value < 0) { 1176 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 1177 reg->umin_value); 1178 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 1179 reg->umax_value); 1180 return; 1181 } 1182 /* Learn sign from unsigned bounds. Signed bounds cross the sign 1183 * boundary, so we must be careful. 1184 */ 1185 if ((s64)reg->umax_value >= 0) { 1186 /* Positive. We can't learn anything from the smin, but smax 1187 * is positive, hence safe. 1188 */ 1189 reg->smin_value = reg->umin_value; 1190 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 1191 reg->umax_value); 1192 } else if ((s64)reg->umin_value < 0) { 1193 /* Negative. We can't learn anything from the smax, but smin 1194 * is negative, hence safe. 1195 */ 1196 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 1197 reg->umin_value); 1198 reg->smax_value = reg->umax_value; 1199 } 1200 } 1201 1202 static void __reg_deduce_bounds(struct bpf_reg_state *reg) 1203 { 1204 __reg32_deduce_bounds(reg); 1205 __reg64_deduce_bounds(reg); 1206 } 1207 1208 /* Attempts to improve var_off based on unsigned min/max information */ 1209 static void __reg_bound_offset(struct bpf_reg_state *reg) 1210 { 1211 struct tnum var64_off = tnum_intersect(reg->var_off, 1212 tnum_range(reg->umin_value, 1213 reg->umax_value)); 1214 struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off), 1215 tnum_range(reg->u32_min_value, 1216 reg->u32_max_value)); 1217 1218 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); 1219 } 1220 1221 static void __reg_assign_32_into_64(struct bpf_reg_state *reg) 1222 { 1223 reg->umin_value = reg->u32_min_value; 1224 reg->umax_value = reg->u32_max_value; 1225 /* Attempt to pull 32-bit signed bounds into 64-bit bounds 1226 * but must be positive otherwise set to worse case bounds 1227 * and refine later from tnum. 1228 */ 1229 if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0) 1230 reg->smax_value = reg->s32_max_value; 1231 else 1232 reg->smax_value = U32_MAX; 1233 if (reg->s32_min_value >= 0) 1234 reg->smin_value = reg->s32_min_value; 1235 else 1236 reg->smin_value = 0; 1237 } 1238 1239 static void __reg_combine_32_into_64(struct bpf_reg_state *reg) 1240 { 1241 /* special case when 64-bit register has upper 32-bit register 1242 * zeroed. Typically happens after zext or <<32, >>32 sequence 1243 * allowing us to use 32-bit bounds directly, 1244 */ 1245 if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) { 1246 __reg_assign_32_into_64(reg); 1247 } else { 1248 /* Otherwise the best we can do is push lower 32bit known and 1249 * unknown bits into register (var_off set from jmp logic) 1250 * then learn as much as possible from the 64-bit tnum 1251 * known and unknown bits. The previous smin/smax bounds are 1252 * invalid here because of jmp32 compare so mark them unknown 1253 * so they do not impact tnum bounds calculation. 1254 */ 1255 __mark_reg64_unbounded(reg); 1256 __update_reg_bounds(reg); 1257 } 1258 1259 /* Intersecting with the old var_off might have improved our bounds 1260 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 1261 * then new var_off is (0; 0x7f...fc) which improves our umax. 1262 */ 1263 __reg_deduce_bounds(reg); 1264 __reg_bound_offset(reg); 1265 __update_reg_bounds(reg); 1266 } 1267 1268 static bool __reg64_bound_s32(s64 a) 1269 { 1270 if (a > S32_MIN && a < S32_MAX) 1271 return true; 1272 return false; 1273 } 1274 1275 static bool __reg64_bound_u32(u64 a) 1276 { 1277 if (a > U32_MIN && a < U32_MAX) 1278 return true; 1279 return false; 1280 } 1281 1282 static void __reg_combine_64_into_32(struct bpf_reg_state *reg) 1283 { 1284 __mark_reg32_unbounded(reg); 1285 1286 if (__reg64_bound_s32(reg->smin_value)) 1287 reg->s32_min_value = (s32)reg->smin_value; 1288 if (__reg64_bound_s32(reg->smax_value)) 1289 reg->s32_max_value = (s32)reg->smax_value; 1290 if (__reg64_bound_u32(reg->umin_value)) 1291 reg->u32_min_value = (u32)reg->umin_value; 1292 if (__reg64_bound_u32(reg->umax_value)) 1293 reg->u32_max_value = (u32)reg->umax_value; 1294 1295 /* Intersecting with the old var_off might have improved our bounds 1296 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 1297 * then new var_off is (0; 0x7f...fc) which improves our umax. 1298 */ 1299 __reg_deduce_bounds(reg); 1300 __reg_bound_offset(reg); 1301 __update_reg_bounds(reg); 1302 } 1303 1304 /* Mark a register as having a completely unknown (scalar) value. */ 1305 static void __mark_reg_unknown(const struct bpf_verifier_env *env, 1306 struct bpf_reg_state *reg) 1307 { 1308 /* 1309 * Clear type, id, off, and union(map_ptr, range) and 1310 * padding between 'type' and union 1311 */ 1312 memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); 1313 reg->type = SCALAR_VALUE; 1314 reg->var_off = tnum_unknown; 1315 reg->frameno = 0; 1316 reg->precise = env->subprog_cnt > 1 || !env->bpf_capable; 1317 __mark_reg_unbounded(reg); 1318 } 1319 1320 static void mark_reg_unknown(struct bpf_verifier_env *env, 1321 struct bpf_reg_state *regs, u32 regno) 1322 { 1323 if (WARN_ON(regno >= MAX_BPF_REG)) { 1324 verbose(env, "mark_reg_unknown(regs, %u)\n", regno); 1325 /* Something bad happened, let's kill all regs except FP */ 1326 for (regno = 0; regno < BPF_REG_FP; regno++) 1327 __mark_reg_not_init(env, regs + regno); 1328 return; 1329 } 1330 __mark_reg_unknown(env, regs + regno); 1331 } 1332 1333 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 1334 struct bpf_reg_state *reg) 1335 { 1336 __mark_reg_unknown(env, reg); 1337 reg->type = NOT_INIT; 1338 } 1339 1340 static void mark_reg_not_init(struct bpf_verifier_env *env, 1341 struct bpf_reg_state *regs, u32 regno) 1342 { 1343 if (WARN_ON(regno >= MAX_BPF_REG)) { 1344 verbose(env, "mark_reg_not_init(regs, %u)\n", regno); 1345 /* Something bad happened, let's kill all regs except FP */ 1346 for (regno = 0; regno < BPF_REG_FP; regno++) 1347 __mark_reg_not_init(env, regs + regno); 1348 return; 1349 } 1350 __mark_reg_not_init(env, regs + regno); 1351 } 1352 1353 static void mark_btf_ld_reg(struct bpf_verifier_env *env, 1354 struct bpf_reg_state *regs, u32 regno, 1355 enum bpf_reg_type reg_type, u32 btf_id) 1356 { 1357 if (reg_type == SCALAR_VALUE) { 1358 mark_reg_unknown(env, regs, regno); 1359 return; 1360 } 1361 mark_reg_known_zero(env, regs, regno); 1362 regs[regno].type = PTR_TO_BTF_ID; 1363 regs[regno].btf_id = btf_id; 1364 } 1365 1366 #define DEF_NOT_SUBREG (0) 1367 static void init_reg_state(struct bpf_verifier_env *env, 1368 struct bpf_func_state *state) 1369 { 1370 struct bpf_reg_state *regs = state->regs; 1371 int i; 1372 1373 for (i = 0; i < MAX_BPF_REG; i++) { 1374 mark_reg_not_init(env, regs, i); 1375 regs[i].live = REG_LIVE_NONE; 1376 regs[i].parent = NULL; 1377 regs[i].subreg_def = DEF_NOT_SUBREG; 1378 } 1379 1380 /* frame pointer */ 1381 regs[BPF_REG_FP].type = PTR_TO_STACK; 1382 mark_reg_known_zero(env, regs, BPF_REG_FP); 1383 regs[BPF_REG_FP].frameno = state->frameno; 1384 } 1385 1386 #define BPF_MAIN_FUNC (-1) 1387 static void init_func_state(struct bpf_verifier_env *env, 1388 struct bpf_func_state *state, 1389 int callsite, int frameno, int subprogno) 1390 { 1391 state->callsite = callsite; 1392 state->frameno = frameno; 1393 state->subprogno = subprogno; 1394 init_reg_state(env, state); 1395 } 1396 1397 enum reg_arg_type { 1398 SRC_OP, /* register is used as source operand */ 1399 DST_OP, /* register is used as destination operand */ 1400 DST_OP_NO_MARK /* same as above, check only, don't mark */ 1401 }; 1402 1403 static int cmp_subprogs(const void *a, const void *b) 1404 { 1405 return ((struct bpf_subprog_info *)a)->start - 1406 ((struct bpf_subprog_info *)b)->start; 1407 } 1408 1409 static int find_subprog(struct bpf_verifier_env *env, int off) 1410 { 1411 struct bpf_subprog_info *p; 1412 1413 p = bsearch(&off, env->subprog_info, env->subprog_cnt, 1414 sizeof(env->subprog_info[0]), cmp_subprogs); 1415 if (!p) 1416 return -ENOENT; 1417 return p - env->subprog_info; 1418 1419 } 1420 1421 static int add_subprog(struct bpf_verifier_env *env, int off) 1422 { 1423 int insn_cnt = env->prog->len; 1424 int ret; 1425 1426 if (off >= insn_cnt || off < 0) { 1427 verbose(env, "call to invalid destination\n"); 1428 return -EINVAL; 1429 } 1430 ret = find_subprog(env, off); 1431 if (ret >= 0) 1432 return 0; 1433 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { 1434 verbose(env, "too many subprograms\n"); 1435 return -E2BIG; 1436 } 1437 env->subprog_info[env->subprog_cnt++].start = off; 1438 sort(env->subprog_info, env->subprog_cnt, 1439 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); 1440 return 0; 1441 } 1442 1443 static int check_subprogs(struct bpf_verifier_env *env) 1444 { 1445 int i, ret, subprog_start, subprog_end, off, cur_subprog = 0; 1446 struct bpf_subprog_info *subprog = env->subprog_info; 1447 struct bpf_insn *insn = env->prog->insnsi; 1448 int insn_cnt = env->prog->len; 1449 1450 /* Add entry function. */ 1451 ret = add_subprog(env, 0); 1452 if (ret < 0) 1453 return ret; 1454 1455 /* determine subprog starts. The end is one before the next starts */ 1456 for (i = 0; i < insn_cnt; i++) { 1457 if (insn[i].code != (BPF_JMP | BPF_CALL)) 1458 continue; 1459 if (insn[i].src_reg != BPF_PSEUDO_CALL) 1460 continue; 1461 if (!env->bpf_capable) { 1462 verbose(env, 1463 "function calls to other bpf functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n"); 1464 return -EPERM; 1465 } 1466 ret = add_subprog(env, i + insn[i].imm + 1); 1467 if (ret < 0) 1468 return ret; 1469 } 1470 1471 /* Add a fake 'exit' subprog which could simplify subprog iteration 1472 * logic. 'subprog_cnt' should not be increased. 1473 */ 1474 subprog[env->subprog_cnt].start = insn_cnt; 1475 1476 if (env->log.level & BPF_LOG_LEVEL2) 1477 for (i = 0; i < env->subprog_cnt; i++) 1478 verbose(env, "func#%d @%d\n", i, subprog[i].start); 1479 1480 /* now check that all jumps are within the same subprog */ 1481 subprog_start = subprog[cur_subprog].start; 1482 subprog_end = subprog[cur_subprog + 1].start; 1483 for (i = 0; i < insn_cnt; i++) { 1484 u8 code = insn[i].code; 1485 1486 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) 1487 goto next; 1488 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) 1489 goto next; 1490 off = i + insn[i].off + 1; 1491 if (off < subprog_start || off >= subprog_end) { 1492 verbose(env, "jump out of range from insn %d to %d\n", i, off); 1493 return -EINVAL; 1494 } 1495 next: 1496 if (i == subprog_end - 1) { 1497 /* to avoid fall-through from one subprog into another 1498 * the last insn of the subprog should be either exit 1499 * or unconditional jump back 1500 */ 1501 if (code != (BPF_JMP | BPF_EXIT) && 1502 code != (BPF_JMP | BPF_JA)) { 1503 verbose(env, "last insn is not an exit or jmp\n"); 1504 return -EINVAL; 1505 } 1506 subprog_start = subprog_end; 1507 cur_subprog++; 1508 if (cur_subprog < env->subprog_cnt) 1509 subprog_end = subprog[cur_subprog + 1].start; 1510 } 1511 } 1512 return 0; 1513 } 1514 1515 /* Parentage chain of this register (or stack slot) should take care of all 1516 * issues like callee-saved registers, stack slot allocation time, etc. 1517 */ 1518 static int mark_reg_read(struct bpf_verifier_env *env, 1519 const struct bpf_reg_state *state, 1520 struct bpf_reg_state *parent, u8 flag) 1521 { 1522 bool writes = parent == state->parent; /* Observe write marks */ 1523 int cnt = 0; 1524 1525 while (parent) { 1526 /* if read wasn't screened by an earlier write ... */ 1527 if (writes && state->live & REG_LIVE_WRITTEN) 1528 break; 1529 if (parent->live & REG_LIVE_DONE) { 1530 verbose(env, "verifier BUG type %s var_off %lld off %d\n", 1531 reg_type_str[parent->type], 1532 parent->var_off.value, parent->off); 1533 return -EFAULT; 1534 } 1535 /* The first condition is more likely to be true than the 1536 * second, checked it first. 1537 */ 1538 if ((parent->live & REG_LIVE_READ) == flag || 1539 parent->live & REG_LIVE_READ64) 1540 /* The parentage chain never changes and 1541 * this parent was already marked as LIVE_READ. 1542 * There is no need to keep walking the chain again and 1543 * keep re-marking all parents as LIVE_READ. 1544 * This case happens when the same register is read 1545 * multiple times without writes into it in-between. 1546 * Also, if parent has the stronger REG_LIVE_READ64 set, 1547 * then no need to set the weak REG_LIVE_READ32. 1548 */ 1549 break; 1550 /* ... then we depend on parent's value */ 1551 parent->live |= flag; 1552 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */ 1553 if (flag == REG_LIVE_READ64) 1554 parent->live &= ~REG_LIVE_READ32; 1555 state = parent; 1556 parent = state->parent; 1557 writes = true; 1558 cnt++; 1559 } 1560 1561 if (env->longest_mark_read_walk < cnt) 1562 env->longest_mark_read_walk = cnt; 1563 return 0; 1564 } 1565 1566 /* This function is supposed to be used by the following 32-bit optimization 1567 * code only. It returns TRUE if the source or destination register operates 1568 * on 64-bit, otherwise return FALSE. 1569 */ 1570 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, 1571 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t) 1572 { 1573 u8 code, class, op; 1574 1575 code = insn->code; 1576 class = BPF_CLASS(code); 1577 op = BPF_OP(code); 1578 if (class == BPF_JMP) { 1579 /* BPF_EXIT for "main" will reach here. Return TRUE 1580 * conservatively. 1581 */ 1582 if (op == BPF_EXIT) 1583 return true; 1584 if (op == BPF_CALL) { 1585 /* BPF to BPF call will reach here because of marking 1586 * caller saved clobber with DST_OP_NO_MARK for which we 1587 * don't care the register def because they are anyway 1588 * marked as NOT_INIT already. 1589 */ 1590 if (insn->src_reg == BPF_PSEUDO_CALL) 1591 return false; 1592 /* Helper call will reach here because of arg type 1593 * check, conservatively return TRUE. 1594 */ 1595 if (t == SRC_OP) 1596 return true; 1597 1598 return false; 1599 } 1600 } 1601 1602 if (class == BPF_ALU64 || class == BPF_JMP || 1603 /* BPF_END always use BPF_ALU class. */ 1604 (class == BPF_ALU && op == BPF_END && insn->imm == 64)) 1605 return true; 1606 1607 if (class == BPF_ALU || class == BPF_JMP32) 1608 return false; 1609 1610 if (class == BPF_LDX) { 1611 if (t != SRC_OP) 1612 return BPF_SIZE(code) == BPF_DW; 1613 /* LDX source must be ptr. */ 1614 return true; 1615 } 1616 1617 if (class == BPF_STX) { 1618 if (reg->type != SCALAR_VALUE) 1619 return true; 1620 return BPF_SIZE(code) == BPF_DW; 1621 } 1622 1623 if (class == BPF_LD) { 1624 u8 mode = BPF_MODE(code); 1625 1626 /* LD_IMM64 */ 1627 if (mode == BPF_IMM) 1628 return true; 1629 1630 /* Both LD_IND and LD_ABS return 32-bit data. */ 1631 if (t != SRC_OP) 1632 return false; 1633 1634 /* Implicit ctx ptr. */ 1635 if (regno == BPF_REG_6) 1636 return true; 1637 1638 /* Explicit source could be any width. */ 1639 return true; 1640 } 1641 1642 if (class == BPF_ST) 1643 /* The only source register for BPF_ST is a ptr. */ 1644 return true; 1645 1646 /* Conservatively return true at default. */ 1647 return true; 1648 } 1649 1650 /* Return TRUE if INSN doesn't have explicit value define. */ 1651 static bool insn_no_def(struct bpf_insn *insn) 1652 { 1653 u8 class = BPF_CLASS(insn->code); 1654 1655 return (class == BPF_JMP || class == BPF_JMP32 || 1656 class == BPF_STX || class == BPF_ST); 1657 } 1658 1659 /* Return TRUE if INSN has defined any 32-bit value explicitly. */ 1660 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn) 1661 { 1662 if (insn_no_def(insn)) 1663 return false; 1664 1665 return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP); 1666 } 1667 1668 static void mark_insn_zext(struct bpf_verifier_env *env, 1669 struct bpf_reg_state *reg) 1670 { 1671 s32 def_idx = reg->subreg_def; 1672 1673 if (def_idx == DEF_NOT_SUBREG) 1674 return; 1675 1676 env->insn_aux_data[def_idx - 1].zext_dst = true; 1677 /* The dst will be zero extended, so won't be sub-register anymore. */ 1678 reg->subreg_def = DEF_NOT_SUBREG; 1679 } 1680 1681 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, 1682 enum reg_arg_type t) 1683 { 1684 struct bpf_verifier_state *vstate = env->cur_state; 1685 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 1686 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; 1687 struct bpf_reg_state *reg, *regs = state->regs; 1688 bool rw64; 1689 1690 if (regno >= MAX_BPF_REG) { 1691 verbose(env, "R%d is invalid\n", regno); 1692 return -EINVAL; 1693 } 1694 1695 reg = ®s[regno]; 1696 rw64 = is_reg64(env, insn, regno, reg, t); 1697 if (t == SRC_OP) { 1698 /* check whether register used as source operand can be read */ 1699 if (reg->type == NOT_INIT) { 1700 verbose(env, "R%d !read_ok\n", regno); 1701 return -EACCES; 1702 } 1703 /* We don't need to worry about FP liveness because it's read-only */ 1704 if (regno == BPF_REG_FP) 1705 return 0; 1706 1707 if (rw64) 1708 mark_insn_zext(env, reg); 1709 1710 return mark_reg_read(env, reg, reg->parent, 1711 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32); 1712 } else { 1713 /* check whether register used as dest operand can be written to */ 1714 if (regno == BPF_REG_FP) { 1715 verbose(env, "frame pointer is read only\n"); 1716 return -EACCES; 1717 } 1718 reg->live |= REG_LIVE_WRITTEN; 1719 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; 1720 if (t == DST_OP) 1721 mark_reg_unknown(env, regs, regno); 1722 } 1723 return 0; 1724 } 1725 1726 /* for any branch, call, exit record the history of jmps in the given state */ 1727 static int push_jmp_history(struct bpf_verifier_env *env, 1728 struct bpf_verifier_state *cur) 1729 { 1730 u32 cnt = cur->jmp_history_cnt; 1731 struct bpf_idx_pair *p; 1732 1733 cnt++; 1734 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER); 1735 if (!p) 1736 return -ENOMEM; 1737 p[cnt - 1].idx = env->insn_idx; 1738 p[cnt - 1].prev_idx = env->prev_insn_idx; 1739 cur->jmp_history = p; 1740 cur->jmp_history_cnt = cnt; 1741 return 0; 1742 } 1743 1744 /* Backtrack one insn at a time. If idx is not at the top of recorded 1745 * history then previous instruction came from straight line execution. 1746 */ 1747 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, 1748 u32 *history) 1749 { 1750 u32 cnt = *history; 1751 1752 if (cnt && st->jmp_history[cnt - 1].idx == i) { 1753 i = st->jmp_history[cnt - 1].prev_idx; 1754 (*history)--; 1755 } else { 1756 i--; 1757 } 1758 return i; 1759 } 1760 1761 /* For given verifier state backtrack_insn() is called from the last insn to 1762 * the first insn. Its purpose is to compute a bitmask of registers and 1763 * stack slots that needs precision in the parent verifier state. 1764 */ 1765 static int backtrack_insn(struct bpf_verifier_env *env, int idx, 1766 u32 *reg_mask, u64 *stack_mask) 1767 { 1768 const struct bpf_insn_cbs cbs = { 1769 .cb_print = verbose, 1770 .private_data = env, 1771 }; 1772 struct bpf_insn *insn = env->prog->insnsi + idx; 1773 u8 class = BPF_CLASS(insn->code); 1774 u8 opcode = BPF_OP(insn->code); 1775 u8 mode = BPF_MODE(insn->code); 1776 u32 dreg = 1u << insn->dst_reg; 1777 u32 sreg = 1u << insn->src_reg; 1778 u32 spi; 1779 1780 if (insn->code == 0) 1781 return 0; 1782 if (env->log.level & BPF_LOG_LEVEL) { 1783 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask); 1784 verbose(env, "%d: ", idx); 1785 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 1786 } 1787 1788 if (class == BPF_ALU || class == BPF_ALU64) { 1789 if (!(*reg_mask & dreg)) 1790 return 0; 1791 if (opcode == BPF_MOV) { 1792 if (BPF_SRC(insn->code) == BPF_X) { 1793 /* dreg = sreg 1794 * dreg needs precision after this insn 1795 * sreg needs precision before this insn 1796 */ 1797 *reg_mask &= ~dreg; 1798 *reg_mask |= sreg; 1799 } else { 1800 /* dreg = K 1801 * dreg needs precision after this insn. 1802 * Corresponding register is already marked 1803 * as precise=true in this verifier state. 1804 * No further markings in parent are necessary 1805 */ 1806 *reg_mask &= ~dreg; 1807 } 1808 } else { 1809 if (BPF_SRC(insn->code) == BPF_X) { 1810 /* dreg += sreg 1811 * both dreg and sreg need precision 1812 * before this insn 1813 */ 1814 *reg_mask |= sreg; 1815 } /* else dreg += K 1816 * dreg still needs precision before this insn 1817 */ 1818 } 1819 } else if (class == BPF_LDX) { 1820 if (!(*reg_mask & dreg)) 1821 return 0; 1822 *reg_mask &= ~dreg; 1823 1824 /* scalars can only be spilled into stack w/o losing precision. 1825 * Load from any other memory can be zero extended. 1826 * The desire to keep that precision is already indicated 1827 * by 'precise' mark in corresponding register of this state. 1828 * No further tracking necessary. 1829 */ 1830 if (insn->src_reg != BPF_REG_FP) 1831 return 0; 1832 if (BPF_SIZE(insn->code) != BPF_DW) 1833 return 0; 1834 1835 /* dreg = *(u64 *)[fp - off] was a fill from the stack. 1836 * that [fp - off] slot contains scalar that needs to be 1837 * tracked with precision 1838 */ 1839 spi = (-insn->off - 1) / BPF_REG_SIZE; 1840 if (spi >= 64) { 1841 verbose(env, "BUG spi %d\n", spi); 1842 WARN_ONCE(1, "verifier backtracking bug"); 1843 return -EFAULT; 1844 } 1845 *stack_mask |= 1ull << spi; 1846 } else if (class == BPF_STX || class == BPF_ST) { 1847 if (*reg_mask & dreg) 1848 /* stx & st shouldn't be using _scalar_ dst_reg 1849 * to access memory. It means backtracking 1850 * encountered a case of pointer subtraction. 1851 */ 1852 return -ENOTSUPP; 1853 /* scalars can only be spilled into stack */ 1854 if (insn->dst_reg != BPF_REG_FP) 1855 return 0; 1856 if (BPF_SIZE(insn->code) != BPF_DW) 1857 return 0; 1858 spi = (-insn->off - 1) / BPF_REG_SIZE; 1859 if (spi >= 64) { 1860 verbose(env, "BUG spi %d\n", spi); 1861 WARN_ONCE(1, "verifier backtracking bug"); 1862 return -EFAULT; 1863 } 1864 if (!(*stack_mask & (1ull << spi))) 1865 return 0; 1866 *stack_mask &= ~(1ull << spi); 1867 if (class == BPF_STX) 1868 *reg_mask |= sreg; 1869 } else if (class == BPF_JMP || class == BPF_JMP32) { 1870 if (opcode == BPF_CALL) { 1871 if (insn->src_reg == BPF_PSEUDO_CALL) 1872 return -ENOTSUPP; 1873 /* regular helper call sets R0 */ 1874 *reg_mask &= ~1; 1875 if (*reg_mask & 0x3f) { 1876 /* if backtracing was looking for registers R1-R5 1877 * they should have been found already. 1878 */ 1879 verbose(env, "BUG regs %x\n", *reg_mask); 1880 WARN_ONCE(1, "verifier backtracking bug"); 1881 return -EFAULT; 1882 } 1883 } else if (opcode == BPF_EXIT) { 1884 return -ENOTSUPP; 1885 } 1886 } else if (class == BPF_LD) { 1887 if (!(*reg_mask & dreg)) 1888 return 0; 1889 *reg_mask &= ~dreg; 1890 /* It's ld_imm64 or ld_abs or ld_ind. 1891 * For ld_imm64 no further tracking of precision 1892 * into parent is necessary 1893 */ 1894 if (mode == BPF_IND || mode == BPF_ABS) 1895 /* to be analyzed */ 1896 return -ENOTSUPP; 1897 } 1898 return 0; 1899 } 1900 1901 /* the scalar precision tracking algorithm: 1902 * . at the start all registers have precise=false. 1903 * . scalar ranges are tracked as normal through alu and jmp insns. 1904 * . once precise value of the scalar register is used in: 1905 * . ptr + scalar alu 1906 * . if (scalar cond K|scalar) 1907 * . helper_call(.., scalar, ...) where ARG_CONST is expected 1908 * backtrack through the verifier states and mark all registers and 1909 * stack slots with spilled constants that these scalar regisers 1910 * should be precise. 1911 * . during state pruning two registers (or spilled stack slots) 1912 * are equivalent if both are not precise. 1913 * 1914 * Note the verifier cannot simply walk register parentage chain, 1915 * since many different registers and stack slots could have been 1916 * used to compute single precise scalar. 1917 * 1918 * The approach of starting with precise=true for all registers and then 1919 * backtrack to mark a register as not precise when the verifier detects 1920 * that program doesn't care about specific value (e.g., when helper 1921 * takes register as ARG_ANYTHING parameter) is not safe. 1922 * 1923 * It's ok to walk single parentage chain of the verifier states. 1924 * It's possible that this backtracking will go all the way till 1st insn. 1925 * All other branches will be explored for needing precision later. 1926 * 1927 * The backtracking needs to deal with cases like: 1928 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0) 1929 * r9 -= r8 1930 * r5 = r9 1931 * if r5 > 0x79f goto pc+7 1932 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff)) 1933 * r5 += 1 1934 * ... 1935 * call bpf_perf_event_output#25 1936 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO 1937 * 1938 * and this case: 1939 * r6 = 1 1940 * call foo // uses callee's r6 inside to compute r0 1941 * r0 += r6 1942 * if r0 == 0 goto 1943 * 1944 * to track above reg_mask/stack_mask needs to be independent for each frame. 1945 * 1946 * Also if parent's curframe > frame where backtracking started, 1947 * the verifier need to mark registers in both frames, otherwise callees 1948 * may incorrectly prune callers. This is similar to 1949 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences") 1950 * 1951 * For now backtracking falls back into conservative marking. 1952 */ 1953 static void mark_all_scalars_precise(struct bpf_verifier_env *env, 1954 struct bpf_verifier_state *st) 1955 { 1956 struct bpf_func_state *func; 1957 struct bpf_reg_state *reg; 1958 int i, j; 1959 1960 /* big hammer: mark all scalars precise in this path. 1961 * pop_stack may still get !precise scalars. 1962 */ 1963 for (; st; st = st->parent) 1964 for (i = 0; i <= st->curframe; i++) { 1965 func = st->frame[i]; 1966 for (j = 0; j < BPF_REG_FP; j++) { 1967 reg = &func->regs[j]; 1968 if (reg->type != SCALAR_VALUE) 1969 continue; 1970 reg->precise = true; 1971 } 1972 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { 1973 if (func->stack[j].slot_type[0] != STACK_SPILL) 1974 continue; 1975 reg = &func->stack[j].spilled_ptr; 1976 if (reg->type != SCALAR_VALUE) 1977 continue; 1978 reg->precise = true; 1979 } 1980 } 1981 } 1982 1983 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, 1984 int spi) 1985 { 1986 struct bpf_verifier_state *st = env->cur_state; 1987 int first_idx = st->first_insn_idx; 1988 int last_idx = env->insn_idx; 1989 struct bpf_func_state *func; 1990 struct bpf_reg_state *reg; 1991 u32 reg_mask = regno >= 0 ? 1u << regno : 0; 1992 u64 stack_mask = spi >= 0 ? 1ull << spi : 0; 1993 bool skip_first = true; 1994 bool new_marks = false; 1995 int i, err; 1996 1997 if (!env->bpf_capable) 1998 return 0; 1999 2000 func = st->frame[st->curframe]; 2001 if (regno >= 0) { 2002 reg = &func->regs[regno]; 2003 if (reg->type != SCALAR_VALUE) { 2004 WARN_ONCE(1, "backtracing misuse"); 2005 return -EFAULT; 2006 } 2007 if (!reg->precise) 2008 new_marks = true; 2009 else 2010 reg_mask = 0; 2011 reg->precise = true; 2012 } 2013 2014 while (spi >= 0) { 2015 if (func->stack[spi].slot_type[0] != STACK_SPILL) { 2016 stack_mask = 0; 2017 break; 2018 } 2019 reg = &func->stack[spi].spilled_ptr; 2020 if (reg->type != SCALAR_VALUE) { 2021 stack_mask = 0; 2022 break; 2023 } 2024 if (!reg->precise) 2025 new_marks = true; 2026 else 2027 stack_mask = 0; 2028 reg->precise = true; 2029 break; 2030 } 2031 2032 if (!new_marks) 2033 return 0; 2034 if (!reg_mask && !stack_mask) 2035 return 0; 2036 for (;;) { 2037 DECLARE_BITMAP(mask, 64); 2038 u32 history = st->jmp_history_cnt; 2039 2040 if (env->log.level & BPF_LOG_LEVEL) 2041 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx); 2042 for (i = last_idx;;) { 2043 if (skip_first) { 2044 err = 0; 2045 skip_first = false; 2046 } else { 2047 err = backtrack_insn(env, i, ®_mask, &stack_mask); 2048 } 2049 if (err == -ENOTSUPP) { 2050 mark_all_scalars_precise(env, st); 2051 return 0; 2052 } else if (err) { 2053 return err; 2054 } 2055 if (!reg_mask && !stack_mask) 2056 /* Found assignment(s) into tracked register in this state. 2057 * Since this state is already marked, just return. 2058 * Nothing to be tracked further in the parent state. 2059 */ 2060 return 0; 2061 if (i == first_idx) 2062 break; 2063 i = get_prev_insn_idx(st, i, &history); 2064 if (i >= env->prog->len) { 2065 /* This can happen if backtracking reached insn 0 2066 * and there are still reg_mask or stack_mask 2067 * to backtrack. 2068 * It means the backtracking missed the spot where 2069 * particular register was initialized with a constant. 2070 */ 2071 verbose(env, "BUG backtracking idx %d\n", i); 2072 WARN_ONCE(1, "verifier backtracking bug"); 2073 return -EFAULT; 2074 } 2075 } 2076 st = st->parent; 2077 if (!st) 2078 break; 2079 2080 new_marks = false; 2081 func = st->frame[st->curframe]; 2082 bitmap_from_u64(mask, reg_mask); 2083 for_each_set_bit(i, mask, 32) { 2084 reg = &func->regs[i]; 2085 if (reg->type != SCALAR_VALUE) { 2086 reg_mask &= ~(1u << i); 2087 continue; 2088 } 2089 if (!reg->precise) 2090 new_marks = true; 2091 reg->precise = true; 2092 } 2093 2094 bitmap_from_u64(mask, stack_mask); 2095 for_each_set_bit(i, mask, 64) { 2096 if (i >= func->allocated_stack / BPF_REG_SIZE) { 2097 /* the sequence of instructions: 2098 * 2: (bf) r3 = r10 2099 * 3: (7b) *(u64 *)(r3 -8) = r0 2100 * 4: (79) r4 = *(u64 *)(r10 -8) 2101 * doesn't contain jmps. It's backtracked 2102 * as a single block. 2103 * During backtracking insn 3 is not recognized as 2104 * stack access, so at the end of backtracking 2105 * stack slot fp-8 is still marked in stack_mask. 2106 * However the parent state may not have accessed 2107 * fp-8 and it's "unallocated" stack space. 2108 * In such case fallback to conservative. 2109 */ 2110 mark_all_scalars_precise(env, st); 2111 return 0; 2112 } 2113 2114 if (func->stack[i].slot_type[0] != STACK_SPILL) { 2115 stack_mask &= ~(1ull << i); 2116 continue; 2117 } 2118 reg = &func->stack[i].spilled_ptr; 2119 if (reg->type != SCALAR_VALUE) { 2120 stack_mask &= ~(1ull << i); 2121 continue; 2122 } 2123 if (!reg->precise) 2124 new_marks = true; 2125 reg->precise = true; 2126 } 2127 if (env->log.level & BPF_LOG_LEVEL) { 2128 print_verifier_state(env, func); 2129 verbose(env, "parent %s regs=%x stack=%llx marks\n", 2130 new_marks ? "didn't have" : "already had", 2131 reg_mask, stack_mask); 2132 } 2133 2134 if (!reg_mask && !stack_mask) 2135 break; 2136 if (!new_marks) 2137 break; 2138 2139 last_idx = st->last_insn_idx; 2140 first_idx = st->first_insn_idx; 2141 } 2142 return 0; 2143 } 2144 2145 static int mark_chain_precision(struct bpf_verifier_env *env, int regno) 2146 { 2147 return __mark_chain_precision(env, regno, -1); 2148 } 2149 2150 static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi) 2151 { 2152 return __mark_chain_precision(env, -1, spi); 2153 } 2154 2155 static bool is_spillable_regtype(enum bpf_reg_type type) 2156 { 2157 switch (type) { 2158 case PTR_TO_MAP_VALUE: 2159 case PTR_TO_MAP_VALUE_OR_NULL: 2160 case PTR_TO_STACK: 2161 case PTR_TO_CTX: 2162 case PTR_TO_PACKET: 2163 case PTR_TO_PACKET_META: 2164 case PTR_TO_PACKET_END: 2165 case PTR_TO_FLOW_KEYS: 2166 case CONST_PTR_TO_MAP: 2167 case PTR_TO_SOCKET: 2168 case PTR_TO_SOCKET_OR_NULL: 2169 case PTR_TO_SOCK_COMMON: 2170 case PTR_TO_SOCK_COMMON_OR_NULL: 2171 case PTR_TO_TCP_SOCK: 2172 case PTR_TO_TCP_SOCK_OR_NULL: 2173 case PTR_TO_XDP_SOCK: 2174 case PTR_TO_BTF_ID: 2175 case PTR_TO_BTF_ID_OR_NULL: 2176 return true; 2177 default: 2178 return false; 2179 } 2180 } 2181 2182 /* Does this register contain a constant zero? */ 2183 static bool register_is_null(struct bpf_reg_state *reg) 2184 { 2185 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); 2186 } 2187 2188 static bool register_is_const(struct bpf_reg_state *reg) 2189 { 2190 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off); 2191 } 2192 2193 static bool __is_pointer_value(bool allow_ptr_leaks, 2194 const struct bpf_reg_state *reg) 2195 { 2196 if (allow_ptr_leaks) 2197 return false; 2198 2199 return reg->type != SCALAR_VALUE; 2200 } 2201 2202 static void save_register_state(struct bpf_func_state *state, 2203 int spi, struct bpf_reg_state *reg) 2204 { 2205 int i; 2206 2207 state->stack[spi].spilled_ptr = *reg; 2208 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 2209 2210 for (i = 0; i < BPF_REG_SIZE; i++) 2211 state->stack[spi].slot_type[i] = STACK_SPILL; 2212 } 2213 2214 /* check_stack_read/write functions track spill/fill of registers, 2215 * stack boundary and alignment are checked in check_mem_access() 2216 */ 2217 static int check_stack_write(struct bpf_verifier_env *env, 2218 struct bpf_func_state *state, /* func where register points to */ 2219 int off, int size, int value_regno, int insn_idx) 2220 { 2221 struct bpf_func_state *cur; /* state of the current function */ 2222 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; 2223 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg; 2224 struct bpf_reg_state *reg = NULL; 2225 2226 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), 2227 state->acquired_refs, true); 2228 if (err) 2229 return err; 2230 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 2231 * so it's aligned access and [off, off + size) are within stack limits 2232 */ 2233 if (!env->allow_ptr_leaks && 2234 state->stack[spi].slot_type[0] == STACK_SPILL && 2235 size != BPF_REG_SIZE) { 2236 verbose(env, "attempt to corrupt spilled pointer on stack\n"); 2237 return -EACCES; 2238 } 2239 2240 cur = env->cur_state->frame[env->cur_state->curframe]; 2241 if (value_regno >= 0) 2242 reg = &cur->regs[value_regno]; 2243 2244 if (reg && size == BPF_REG_SIZE && register_is_const(reg) && 2245 !register_is_null(reg) && env->bpf_capable) { 2246 if (dst_reg != BPF_REG_FP) { 2247 /* The backtracking logic can only recognize explicit 2248 * stack slot address like [fp - 8]. Other spill of 2249 * scalar via different register has to be conervative. 2250 * Backtrack from here and mark all registers as precise 2251 * that contributed into 'reg' being a constant. 2252 */ 2253 err = mark_chain_precision(env, value_regno); 2254 if (err) 2255 return err; 2256 } 2257 save_register_state(state, spi, reg); 2258 } else if (reg && is_spillable_regtype(reg->type)) { 2259 /* register containing pointer is being spilled into stack */ 2260 if (size != BPF_REG_SIZE) { 2261 verbose_linfo(env, insn_idx, "; "); 2262 verbose(env, "invalid size of register spill\n"); 2263 return -EACCES; 2264 } 2265 2266 if (state != cur && reg->type == PTR_TO_STACK) { 2267 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); 2268 return -EINVAL; 2269 } 2270 2271 if (!env->bypass_spec_v4) { 2272 bool sanitize = false; 2273 2274 if (state->stack[spi].slot_type[0] == STACK_SPILL && 2275 register_is_const(&state->stack[spi].spilled_ptr)) 2276 sanitize = true; 2277 for (i = 0; i < BPF_REG_SIZE; i++) 2278 if (state->stack[spi].slot_type[i] == STACK_MISC) { 2279 sanitize = true; 2280 break; 2281 } 2282 if (sanitize) { 2283 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; 2284 int soff = (-spi - 1) * BPF_REG_SIZE; 2285 2286 /* detected reuse of integer stack slot with a pointer 2287 * which means either llvm is reusing stack slot or 2288 * an attacker is trying to exploit CVE-2018-3639 2289 * (speculative store bypass) 2290 * Have to sanitize that slot with preemptive 2291 * store of zero. 2292 */ 2293 if (*poff && *poff != soff) { 2294 /* disallow programs where single insn stores 2295 * into two different stack slots, since verifier 2296 * cannot sanitize them 2297 */ 2298 verbose(env, 2299 "insn %d cannot access two stack slots fp%d and fp%d", 2300 insn_idx, *poff, soff); 2301 return -EINVAL; 2302 } 2303 *poff = soff; 2304 } 2305 } 2306 save_register_state(state, spi, reg); 2307 } else { 2308 u8 type = STACK_MISC; 2309 2310 /* regular write of data into stack destroys any spilled ptr */ 2311 state->stack[spi].spilled_ptr.type = NOT_INIT; 2312 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ 2313 if (state->stack[spi].slot_type[0] == STACK_SPILL) 2314 for (i = 0; i < BPF_REG_SIZE; i++) 2315 state->stack[spi].slot_type[i] = STACK_MISC; 2316 2317 /* only mark the slot as written if all 8 bytes were written 2318 * otherwise read propagation may incorrectly stop too soon 2319 * when stack slots are partially written. 2320 * This heuristic means that read propagation will be 2321 * conservative, since it will add reg_live_read marks 2322 * to stack slots all the way to first state when programs 2323 * writes+reads less than 8 bytes 2324 */ 2325 if (size == BPF_REG_SIZE) 2326 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 2327 2328 /* when we zero initialize stack slots mark them as such */ 2329 if (reg && register_is_null(reg)) { 2330 /* backtracking doesn't work for STACK_ZERO yet. */ 2331 err = mark_chain_precision(env, value_regno); 2332 if (err) 2333 return err; 2334 type = STACK_ZERO; 2335 } 2336 2337 /* Mark slots affected by this stack write. */ 2338 for (i = 0; i < size; i++) 2339 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = 2340 type; 2341 } 2342 return 0; 2343 } 2344 2345 static int check_stack_read(struct bpf_verifier_env *env, 2346 struct bpf_func_state *reg_state /* func where register points to */, 2347 int off, int size, int value_regno) 2348 { 2349 struct bpf_verifier_state *vstate = env->cur_state; 2350 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2351 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; 2352 struct bpf_reg_state *reg; 2353 u8 *stype; 2354 2355 if (reg_state->allocated_stack <= slot) { 2356 verbose(env, "invalid read from stack off %d+0 size %d\n", 2357 off, size); 2358 return -EACCES; 2359 } 2360 stype = reg_state->stack[spi].slot_type; 2361 reg = ®_state->stack[spi].spilled_ptr; 2362 2363 if (stype[0] == STACK_SPILL) { 2364 if (size != BPF_REG_SIZE) { 2365 if (reg->type != SCALAR_VALUE) { 2366 verbose_linfo(env, env->insn_idx, "; "); 2367 verbose(env, "invalid size of register fill\n"); 2368 return -EACCES; 2369 } 2370 if (value_regno >= 0) { 2371 mark_reg_unknown(env, state->regs, value_regno); 2372 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 2373 } 2374 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 2375 return 0; 2376 } 2377 for (i = 1; i < BPF_REG_SIZE; i++) { 2378 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { 2379 verbose(env, "corrupted spill memory\n"); 2380 return -EACCES; 2381 } 2382 } 2383 2384 if (value_regno >= 0) { 2385 /* restore register state from stack */ 2386 state->regs[value_regno] = *reg; 2387 /* mark reg as written since spilled pointer state likely 2388 * has its liveness marks cleared by is_state_visited() 2389 * which resets stack/reg liveness for state transitions 2390 */ 2391 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 2392 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { 2393 /* If value_regno==-1, the caller is asking us whether 2394 * it is acceptable to use this value as a SCALAR_VALUE 2395 * (e.g. for XADD). 2396 * We must not allow unprivileged callers to do that 2397 * with spilled pointers. 2398 */ 2399 verbose(env, "leaking pointer from stack off %d\n", 2400 off); 2401 return -EACCES; 2402 } 2403 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 2404 } else { 2405 int zeros = 0; 2406 2407 for (i = 0; i < size; i++) { 2408 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC) 2409 continue; 2410 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) { 2411 zeros++; 2412 continue; 2413 } 2414 verbose(env, "invalid read from stack off %d+%d size %d\n", 2415 off, i, size); 2416 return -EACCES; 2417 } 2418 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 2419 if (value_regno >= 0) { 2420 if (zeros == size) { 2421 /* any size read into register is zero extended, 2422 * so the whole register == const_zero 2423 */ 2424 __mark_reg_const_zero(&state->regs[value_regno]); 2425 /* backtracking doesn't support STACK_ZERO yet, 2426 * so mark it precise here, so that later 2427 * backtracking can stop here. 2428 * Backtracking may not need this if this register 2429 * doesn't participate in pointer adjustment. 2430 * Forward propagation of precise flag is not 2431 * necessary either. This mark is only to stop 2432 * backtracking. Any register that contributed 2433 * to const 0 was marked precise before spill. 2434 */ 2435 state->regs[value_regno].precise = true; 2436 } else { 2437 /* have read misc data from the stack */ 2438 mark_reg_unknown(env, state->regs, value_regno); 2439 } 2440 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 2441 } 2442 } 2443 return 0; 2444 } 2445 2446 static int check_stack_access(struct bpf_verifier_env *env, 2447 const struct bpf_reg_state *reg, 2448 int off, int size) 2449 { 2450 /* Stack accesses must be at a fixed offset, so that we 2451 * can determine what type of data were returned. See 2452 * check_stack_read(). 2453 */ 2454 if (!tnum_is_const(reg->var_off)) { 2455 char tn_buf[48]; 2456 2457 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2458 verbose(env, "variable stack access var_off=%s off=%d size=%d\n", 2459 tn_buf, off, size); 2460 return -EACCES; 2461 } 2462 2463 if (off >= 0 || off < -MAX_BPF_STACK) { 2464 verbose(env, "invalid stack off=%d size=%d\n", off, size); 2465 return -EACCES; 2466 } 2467 2468 return 0; 2469 } 2470 2471 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno, 2472 int off, int size, enum bpf_access_type type) 2473 { 2474 struct bpf_reg_state *regs = cur_regs(env); 2475 struct bpf_map *map = regs[regno].map_ptr; 2476 u32 cap = bpf_map_flags_to_cap(map); 2477 2478 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) { 2479 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n", 2480 map->value_size, off, size); 2481 return -EACCES; 2482 } 2483 2484 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) { 2485 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n", 2486 map->value_size, off, size); 2487 return -EACCES; 2488 } 2489 2490 return 0; 2491 } 2492 2493 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */ 2494 static int __check_mem_access(struct bpf_verifier_env *env, int regno, 2495 int off, int size, u32 mem_size, 2496 bool zero_size_allowed) 2497 { 2498 bool size_ok = size > 0 || (size == 0 && zero_size_allowed); 2499 struct bpf_reg_state *reg; 2500 2501 if (off >= 0 && size_ok && (u64)off + size <= mem_size) 2502 return 0; 2503 2504 reg = &cur_regs(env)[regno]; 2505 switch (reg->type) { 2506 case PTR_TO_MAP_VALUE: 2507 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", 2508 mem_size, off, size); 2509 break; 2510 case PTR_TO_PACKET: 2511 case PTR_TO_PACKET_META: 2512 case PTR_TO_PACKET_END: 2513 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 2514 off, size, regno, reg->id, off, mem_size); 2515 break; 2516 case PTR_TO_MEM: 2517 default: 2518 verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n", 2519 mem_size, off, size); 2520 } 2521 2522 return -EACCES; 2523 } 2524 2525 /* check read/write into a memory region with possible variable offset */ 2526 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno, 2527 int off, int size, u32 mem_size, 2528 bool zero_size_allowed) 2529 { 2530 struct bpf_verifier_state *vstate = env->cur_state; 2531 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2532 struct bpf_reg_state *reg = &state->regs[regno]; 2533 int err; 2534 2535 /* We may have adjusted the register pointing to memory region, so we 2536 * need to try adding each of min_value and max_value to off 2537 * to make sure our theoretical access will be safe. 2538 */ 2539 if (env->log.level & BPF_LOG_LEVEL) 2540 print_verifier_state(env, state); 2541 2542 /* The minimum value is only important with signed 2543 * comparisons where we can't assume the floor of a 2544 * value is 0. If we are using signed variables for our 2545 * index'es we need to make sure that whatever we use 2546 * will have a set floor within our range. 2547 */ 2548 if (reg->smin_value < 0 && 2549 (reg->smin_value == S64_MIN || 2550 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || 2551 reg->smin_value + off < 0)) { 2552 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 2553 regno); 2554 return -EACCES; 2555 } 2556 err = __check_mem_access(env, regno, reg->smin_value + off, size, 2557 mem_size, zero_size_allowed); 2558 if (err) { 2559 verbose(env, "R%d min value is outside of the allowed memory range\n", 2560 regno); 2561 return err; 2562 } 2563 2564 /* If we haven't set a max value then we need to bail since we can't be 2565 * sure we won't do bad things. 2566 * If reg->umax_value + off could overflow, treat that as unbounded too. 2567 */ 2568 if (reg->umax_value >= BPF_MAX_VAR_OFF) { 2569 verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n", 2570 regno); 2571 return -EACCES; 2572 } 2573 err = __check_mem_access(env, regno, reg->umax_value + off, size, 2574 mem_size, zero_size_allowed); 2575 if (err) { 2576 verbose(env, "R%d max value is outside of the allowed memory range\n", 2577 regno); 2578 return err; 2579 } 2580 2581 return 0; 2582 } 2583 2584 /* check read/write into a map element with possible variable offset */ 2585 static int check_map_access(struct bpf_verifier_env *env, u32 regno, 2586 int off, int size, bool zero_size_allowed) 2587 { 2588 struct bpf_verifier_state *vstate = env->cur_state; 2589 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2590 struct bpf_reg_state *reg = &state->regs[regno]; 2591 struct bpf_map *map = reg->map_ptr; 2592 int err; 2593 2594 err = check_mem_region_access(env, regno, off, size, map->value_size, 2595 zero_size_allowed); 2596 if (err) 2597 return err; 2598 2599 if (map_value_has_spin_lock(map)) { 2600 u32 lock = map->spin_lock_off; 2601 2602 /* if any part of struct bpf_spin_lock can be touched by 2603 * load/store reject this program. 2604 * To check that [x1, x2) overlaps with [y1, y2) 2605 * it is sufficient to check x1 < y2 && y1 < x2. 2606 */ 2607 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) && 2608 lock < reg->umax_value + off + size) { 2609 verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n"); 2610 return -EACCES; 2611 } 2612 } 2613 return err; 2614 } 2615 2616 #define MAX_PACKET_OFF 0xffff 2617 2618 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, 2619 const struct bpf_call_arg_meta *meta, 2620 enum bpf_access_type t) 2621 { 2622 switch (env->prog->type) { 2623 /* Program types only with direct read access go here! */ 2624 case BPF_PROG_TYPE_LWT_IN: 2625 case BPF_PROG_TYPE_LWT_OUT: 2626 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2627 case BPF_PROG_TYPE_SK_REUSEPORT: 2628 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2629 case BPF_PROG_TYPE_CGROUP_SKB: 2630 if (t == BPF_WRITE) 2631 return false; 2632 /* fallthrough */ 2633 2634 /* Program types with direct read + write access go here! */ 2635 case BPF_PROG_TYPE_SCHED_CLS: 2636 case BPF_PROG_TYPE_SCHED_ACT: 2637 case BPF_PROG_TYPE_XDP: 2638 case BPF_PROG_TYPE_LWT_XMIT: 2639 case BPF_PROG_TYPE_SK_SKB: 2640 case BPF_PROG_TYPE_SK_MSG: 2641 if (meta) 2642 return meta->pkt_access; 2643 2644 env->seen_direct_write = true; 2645 return true; 2646 2647 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2648 if (t == BPF_WRITE) 2649 env->seen_direct_write = true; 2650 2651 return true; 2652 2653 default: 2654 return false; 2655 } 2656 } 2657 2658 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, 2659 int size, bool zero_size_allowed) 2660 { 2661 struct bpf_reg_state *regs = cur_regs(env); 2662 struct bpf_reg_state *reg = ®s[regno]; 2663 int err; 2664 2665 /* We may have added a variable offset to the packet pointer; but any 2666 * reg->range we have comes after that. We are only checking the fixed 2667 * offset. 2668 */ 2669 2670 /* We don't allow negative numbers, because we aren't tracking enough 2671 * detail to prove they're safe. 2672 */ 2673 if (reg->smin_value < 0) { 2674 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 2675 regno); 2676 return -EACCES; 2677 } 2678 err = __check_mem_access(env, regno, off, size, reg->range, 2679 zero_size_allowed); 2680 if (err) { 2681 verbose(env, "R%d offset is outside of the packet\n", regno); 2682 return err; 2683 } 2684 2685 /* __check_mem_access has made sure "off + size - 1" is within u16. 2686 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, 2687 * otherwise find_good_pkt_pointers would have refused to set range info 2688 * that __check_mem_access would have rejected this pkt access. 2689 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. 2690 */ 2691 env->prog->aux->max_pkt_offset = 2692 max_t(u32, env->prog->aux->max_pkt_offset, 2693 off + reg->umax_value + size - 1); 2694 2695 return err; 2696 } 2697 2698 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ 2699 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, 2700 enum bpf_access_type t, enum bpf_reg_type *reg_type, 2701 u32 *btf_id) 2702 { 2703 struct bpf_insn_access_aux info = { 2704 .reg_type = *reg_type, 2705 .log = &env->log, 2706 }; 2707 2708 if (env->ops->is_valid_access && 2709 env->ops->is_valid_access(off, size, t, env->prog, &info)) { 2710 /* A non zero info.ctx_field_size indicates that this field is a 2711 * candidate for later verifier transformation to load the whole 2712 * field and then apply a mask when accessed with a narrower 2713 * access than actual ctx access size. A zero info.ctx_field_size 2714 * will only allow for whole field access and rejects any other 2715 * type of narrower access. 2716 */ 2717 *reg_type = info.reg_type; 2718 2719 if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL) 2720 *btf_id = info.btf_id; 2721 else 2722 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; 2723 /* remember the offset of last byte accessed in ctx */ 2724 if (env->prog->aux->max_ctx_offset < off + size) 2725 env->prog->aux->max_ctx_offset = off + size; 2726 return 0; 2727 } 2728 2729 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); 2730 return -EACCES; 2731 } 2732 2733 static int check_flow_keys_access(struct bpf_verifier_env *env, int off, 2734 int size) 2735 { 2736 if (size < 0 || off < 0 || 2737 (u64)off + size > sizeof(struct bpf_flow_keys)) { 2738 verbose(env, "invalid access to flow keys off=%d size=%d\n", 2739 off, size); 2740 return -EACCES; 2741 } 2742 return 0; 2743 } 2744 2745 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, 2746 u32 regno, int off, int size, 2747 enum bpf_access_type t) 2748 { 2749 struct bpf_reg_state *regs = cur_regs(env); 2750 struct bpf_reg_state *reg = ®s[regno]; 2751 struct bpf_insn_access_aux info = {}; 2752 bool valid; 2753 2754 if (reg->smin_value < 0) { 2755 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 2756 regno); 2757 return -EACCES; 2758 } 2759 2760 switch (reg->type) { 2761 case PTR_TO_SOCK_COMMON: 2762 valid = bpf_sock_common_is_valid_access(off, size, t, &info); 2763 break; 2764 case PTR_TO_SOCKET: 2765 valid = bpf_sock_is_valid_access(off, size, t, &info); 2766 break; 2767 case PTR_TO_TCP_SOCK: 2768 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info); 2769 break; 2770 case PTR_TO_XDP_SOCK: 2771 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info); 2772 break; 2773 default: 2774 valid = false; 2775 } 2776 2777 2778 if (valid) { 2779 env->insn_aux_data[insn_idx].ctx_field_size = 2780 info.ctx_field_size; 2781 return 0; 2782 } 2783 2784 verbose(env, "R%d invalid %s access off=%d size=%d\n", 2785 regno, reg_type_str[reg->type], off, size); 2786 2787 return -EACCES; 2788 } 2789 2790 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) 2791 { 2792 return cur_regs(env) + regno; 2793 } 2794 2795 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 2796 { 2797 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); 2798 } 2799 2800 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) 2801 { 2802 const struct bpf_reg_state *reg = reg_state(env, regno); 2803 2804 return reg->type == PTR_TO_CTX; 2805 } 2806 2807 static bool is_sk_reg(struct bpf_verifier_env *env, int regno) 2808 { 2809 const struct bpf_reg_state *reg = reg_state(env, regno); 2810 2811 return type_is_sk_pointer(reg->type); 2812 } 2813 2814 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) 2815 { 2816 const struct bpf_reg_state *reg = reg_state(env, regno); 2817 2818 return type_is_pkt_pointer(reg->type); 2819 } 2820 2821 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) 2822 { 2823 const struct bpf_reg_state *reg = reg_state(env, regno); 2824 2825 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ 2826 return reg->type == PTR_TO_FLOW_KEYS; 2827 } 2828 2829 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, 2830 const struct bpf_reg_state *reg, 2831 int off, int size, bool strict) 2832 { 2833 struct tnum reg_off; 2834 int ip_align; 2835 2836 /* Byte size accesses are always allowed. */ 2837 if (!strict || size == 1) 2838 return 0; 2839 2840 /* For platforms that do not have a Kconfig enabling 2841 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of 2842 * NET_IP_ALIGN is universally set to '2'. And on platforms 2843 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get 2844 * to this code only in strict mode where we want to emulate 2845 * the NET_IP_ALIGN==2 checking. Therefore use an 2846 * unconditional IP align value of '2'. 2847 */ 2848 ip_align = 2; 2849 2850 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); 2851 if (!tnum_is_aligned(reg_off, size)) { 2852 char tn_buf[48]; 2853 2854 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2855 verbose(env, 2856 "misaligned packet access off %d+%s+%d+%d size %d\n", 2857 ip_align, tn_buf, reg->off, off, size); 2858 return -EACCES; 2859 } 2860 2861 return 0; 2862 } 2863 2864 static int check_generic_ptr_alignment(struct bpf_verifier_env *env, 2865 const struct bpf_reg_state *reg, 2866 const char *pointer_desc, 2867 int off, int size, bool strict) 2868 { 2869 struct tnum reg_off; 2870 2871 /* Byte size accesses are always allowed. */ 2872 if (!strict || size == 1) 2873 return 0; 2874 2875 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); 2876 if (!tnum_is_aligned(reg_off, size)) { 2877 char tn_buf[48]; 2878 2879 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2880 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", 2881 pointer_desc, tn_buf, reg->off, off, size); 2882 return -EACCES; 2883 } 2884 2885 return 0; 2886 } 2887 2888 static int check_ptr_alignment(struct bpf_verifier_env *env, 2889 const struct bpf_reg_state *reg, int off, 2890 int size, bool strict_alignment_once) 2891 { 2892 bool strict = env->strict_alignment || strict_alignment_once; 2893 const char *pointer_desc = ""; 2894 2895 switch (reg->type) { 2896 case PTR_TO_PACKET: 2897 case PTR_TO_PACKET_META: 2898 /* Special case, because of NET_IP_ALIGN. Given metadata sits 2899 * right in front, treat it the very same way. 2900 */ 2901 return check_pkt_ptr_alignment(env, reg, off, size, strict); 2902 case PTR_TO_FLOW_KEYS: 2903 pointer_desc = "flow keys "; 2904 break; 2905 case PTR_TO_MAP_VALUE: 2906 pointer_desc = "value "; 2907 break; 2908 case PTR_TO_CTX: 2909 pointer_desc = "context "; 2910 break; 2911 case PTR_TO_STACK: 2912 pointer_desc = "stack "; 2913 /* The stack spill tracking logic in check_stack_write() 2914 * and check_stack_read() relies on stack accesses being 2915 * aligned. 2916 */ 2917 strict = true; 2918 break; 2919 case PTR_TO_SOCKET: 2920 pointer_desc = "sock "; 2921 break; 2922 case PTR_TO_SOCK_COMMON: 2923 pointer_desc = "sock_common "; 2924 break; 2925 case PTR_TO_TCP_SOCK: 2926 pointer_desc = "tcp_sock "; 2927 break; 2928 case PTR_TO_XDP_SOCK: 2929 pointer_desc = "xdp_sock "; 2930 break; 2931 default: 2932 break; 2933 } 2934 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, 2935 strict); 2936 } 2937 2938 static int update_stack_depth(struct bpf_verifier_env *env, 2939 const struct bpf_func_state *func, 2940 int off) 2941 { 2942 u16 stack = env->subprog_info[func->subprogno].stack_depth; 2943 2944 if (stack >= -off) 2945 return 0; 2946 2947 /* update known max for given subprogram */ 2948 env->subprog_info[func->subprogno].stack_depth = -off; 2949 return 0; 2950 } 2951 2952 /* starting from main bpf function walk all instructions of the function 2953 * and recursively walk all callees that given function can call. 2954 * Ignore jump and exit insns. 2955 * Since recursion is prevented by check_cfg() this algorithm 2956 * only needs a local stack of MAX_CALL_FRAMES to remember callsites 2957 */ 2958 static int check_max_stack_depth(struct bpf_verifier_env *env) 2959 { 2960 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end; 2961 struct bpf_subprog_info *subprog = env->subprog_info; 2962 struct bpf_insn *insn = env->prog->insnsi; 2963 int ret_insn[MAX_CALL_FRAMES]; 2964 int ret_prog[MAX_CALL_FRAMES]; 2965 2966 process_func: 2967 /* round up to 32-bytes, since this is granularity 2968 * of interpreter stack size 2969 */ 2970 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 2971 if (depth > MAX_BPF_STACK) { 2972 verbose(env, "combined stack size of %d calls is %d. Too large\n", 2973 frame + 1, depth); 2974 return -EACCES; 2975 } 2976 continue_func: 2977 subprog_end = subprog[idx + 1].start; 2978 for (; i < subprog_end; i++) { 2979 if (insn[i].code != (BPF_JMP | BPF_CALL)) 2980 continue; 2981 if (insn[i].src_reg != BPF_PSEUDO_CALL) 2982 continue; 2983 /* remember insn and function to return to */ 2984 ret_insn[frame] = i + 1; 2985 ret_prog[frame] = idx; 2986 2987 /* find the callee */ 2988 i = i + insn[i].imm + 1; 2989 idx = find_subprog(env, i); 2990 if (idx < 0) { 2991 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 2992 i); 2993 return -EFAULT; 2994 } 2995 frame++; 2996 if (frame >= MAX_CALL_FRAMES) { 2997 verbose(env, "the call stack of %d frames is too deep !\n", 2998 frame); 2999 return -E2BIG; 3000 } 3001 goto process_func; 3002 } 3003 /* end of for() loop means the last insn of the 'subprog' 3004 * was reached. Doesn't matter whether it was JA or EXIT 3005 */ 3006 if (frame == 0) 3007 return 0; 3008 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 3009 frame--; 3010 i = ret_insn[frame]; 3011 idx = ret_prog[frame]; 3012 goto continue_func; 3013 } 3014 3015 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 3016 static int get_callee_stack_depth(struct bpf_verifier_env *env, 3017 const struct bpf_insn *insn, int idx) 3018 { 3019 int start = idx + insn->imm + 1, subprog; 3020 3021 subprog = find_subprog(env, start); 3022 if (subprog < 0) { 3023 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 3024 start); 3025 return -EFAULT; 3026 } 3027 return env->subprog_info[subprog].stack_depth; 3028 } 3029 #endif 3030 3031 int check_ctx_reg(struct bpf_verifier_env *env, 3032 const struct bpf_reg_state *reg, int regno) 3033 { 3034 /* Access to ctx or passing it to a helper is only allowed in 3035 * its original, unmodified form. 3036 */ 3037 3038 if (reg->off) { 3039 verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n", 3040 regno, reg->off); 3041 return -EACCES; 3042 } 3043 3044 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 3045 char tn_buf[48]; 3046 3047 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3048 verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf); 3049 return -EACCES; 3050 } 3051 3052 return 0; 3053 } 3054 3055 static int check_tp_buffer_access(struct bpf_verifier_env *env, 3056 const struct bpf_reg_state *reg, 3057 int regno, int off, int size) 3058 { 3059 if (off < 0) { 3060 verbose(env, 3061 "R%d invalid tracepoint buffer access: off=%d, size=%d", 3062 regno, off, size); 3063 return -EACCES; 3064 } 3065 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 3066 char tn_buf[48]; 3067 3068 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3069 verbose(env, 3070 "R%d invalid variable buffer offset: off=%d, var_off=%s", 3071 regno, off, tn_buf); 3072 return -EACCES; 3073 } 3074 if (off + size > env->prog->aux->max_tp_access) 3075 env->prog->aux->max_tp_access = off + size; 3076 3077 return 0; 3078 } 3079 3080 /* BPF architecture zero extends alu32 ops into 64-bit registesr */ 3081 static void zext_32_to_64(struct bpf_reg_state *reg) 3082 { 3083 reg->var_off = tnum_subreg(reg->var_off); 3084 __reg_assign_32_into_64(reg); 3085 } 3086 3087 /* truncate register to smaller size (in bytes) 3088 * must be called with size < BPF_REG_SIZE 3089 */ 3090 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) 3091 { 3092 u64 mask; 3093 3094 /* clear high bits in bit representation */ 3095 reg->var_off = tnum_cast(reg->var_off, size); 3096 3097 /* fix arithmetic bounds */ 3098 mask = ((u64)1 << (size * 8)) - 1; 3099 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { 3100 reg->umin_value &= mask; 3101 reg->umax_value &= mask; 3102 } else { 3103 reg->umin_value = 0; 3104 reg->umax_value = mask; 3105 } 3106 reg->smin_value = reg->umin_value; 3107 reg->smax_value = reg->umax_value; 3108 3109 /* If size is smaller than 32bit register the 32bit register 3110 * values are also truncated so we push 64-bit bounds into 3111 * 32-bit bounds. Above were truncated < 32-bits already. 3112 */ 3113 if (size >= 4) 3114 return; 3115 __reg_combine_64_into_32(reg); 3116 } 3117 3118 static bool bpf_map_is_rdonly(const struct bpf_map *map) 3119 { 3120 return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen; 3121 } 3122 3123 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val) 3124 { 3125 void *ptr; 3126 u64 addr; 3127 int err; 3128 3129 err = map->ops->map_direct_value_addr(map, &addr, off); 3130 if (err) 3131 return err; 3132 ptr = (void *)(long)addr + off; 3133 3134 switch (size) { 3135 case sizeof(u8): 3136 *val = (u64)*(u8 *)ptr; 3137 break; 3138 case sizeof(u16): 3139 *val = (u64)*(u16 *)ptr; 3140 break; 3141 case sizeof(u32): 3142 *val = (u64)*(u32 *)ptr; 3143 break; 3144 case sizeof(u64): 3145 *val = *(u64 *)ptr; 3146 break; 3147 default: 3148 return -EINVAL; 3149 } 3150 return 0; 3151 } 3152 3153 static int check_ptr_to_btf_access(struct bpf_verifier_env *env, 3154 struct bpf_reg_state *regs, 3155 int regno, int off, int size, 3156 enum bpf_access_type atype, 3157 int value_regno) 3158 { 3159 struct bpf_reg_state *reg = regs + regno; 3160 const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id); 3161 const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off); 3162 u32 btf_id; 3163 int ret; 3164 3165 if (off < 0) { 3166 verbose(env, 3167 "R%d is ptr_%s invalid negative access: off=%d\n", 3168 regno, tname, off); 3169 return -EACCES; 3170 } 3171 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 3172 char tn_buf[48]; 3173 3174 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3175 verbose(env, 3176 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n", 3177 regno, tname, off, tn_buf); 3178 return -EACCES; 3179 } 3180 3181 if (env->ops->btf_struct_access) { 3182 ret = env->ops->btf_struct_access(&env->log, t, off, size, 3183 atype, &btf_id); 3184 } else { 3185 if (atype != BPF_READ) { 3186 verbose(env, "only read is supported\n"); 3187 return -EACCES; 3188 } 3189 3190 ret = btf_struct_access(&env->log, t, off, size, atype, 3191 &btf_id); 3192 } 3193 3194 if (ret < 0) 3195 return ret; 3196 3197 if (atype == BPF_READ && value_regno >= 0) 3198 mark_btf_ld_reg(env, regs, value_regno, ret, btf_id); 3199 3200 return 0; 3201 } 3202 3203 static int check_ptr_to_map_access(struct bpf_verifier_env *env, 3204 struct bpf_reg_state *regs, 3205 int regno, int off, int size, 3206 enum bpf_access_type atype, 3207 int value_regno) 3208 { 3209 struct bpf_reg_state *reg = regs + regno; 3210 struct bpf_map *map = reg->map_ptr; 3211 const struct btf_type *t; 3212 const char *tname; 3213 u32 btf_id; 3214 int ret; 3215 3216 if (!btf_vmlinux) { 3217 verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n"); 3218 return -ENOTSUPP; 3219 } 3220 3221 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { 3222 verbose(env, "map_ptr access not supported for map type %d\n", 3223 map->map_type); 3224 return -ENOTSUPP; 3225 } 3226 3227 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); 3228 tname = btf_name_by_offset(btf_vmlinux, t->name_off); 3229 3230 if (!env->allow_ptr_to_map_access) { 3231 verbose(env, 3232 "%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n", 3233 tname); 3234 return -EPERM; 3235 } 3236 3237 if (off < 0) { 3238 verbose(env, "R%d is %s invalid negative access: off=%d\n", 3239 regno, tname, off); 3240 return -EACCES; 3241 } 3242 3243 if (atype != BPF_READ) { 3244 verbose(env, "only read from %s is supported\n", tname); 3245 return -EACCES; 3246 } 3247 3248 ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id); 3249 if (ret < 0) 3250 return ret; 3251 3252 if (value_regno >= 0) 3253 mark_btf_ld_reg(env, regs, value_regno, ret, btf_id); 3254 3255 return 0; 3256 } 3257 3258 3259 /* check whether memory at (regno + off) is accessible for t = (read | write) 3260 * if t==write, value_regno is a register which value is stored into memory 3261 * if t==read, value_regno is a register which will receive the value from memory 3262 * if t==write && value_regno==-1, some unknown value is stored into memory 3263 * if t==read && value_regno==-1, don't care what we read from memory 3264 */ 3265 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, 3266 int off, int bpf_size, enum bpf_access_type t, 3267 int value_regno, bool strict_alignment_once) 3268 { 3269 struct bpf_reg_state *regs = cur_regs(env); 3270 struct bpf_reg_state *reg = regs + regno; 3271 struct bpf_func_state *state; 3272 int size, err = 0; 3273 3274 size = bpf_size_to_bytes(bpf_size); 3275 if (size < 0) 3276 return size; 3277 3278 /* alignment checks will add in reg->off themselves */ 3279 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); 3280 if (err) 3281 return err; 3282 3283 /* for access checks, reg->off is just part of off */ 3284 off += reg->off; 3285 3286 if (reg->type == PTR_TO_MAP_VALUE) { 3287 if (t == BPF_WRITE && value_regno >= 0 && 3288 is_pointer_value(env, value_regno)) { 3289 verbose(env, "R%d leaks addr into map\n", value_regno); 3290 return -EACCES; 3291 } 3292 err = check_map_access_type(env, regno, off, size, t); 3293 if (err) 3294 return err; 3295 err = check_map_access(env, regno, off, size, false); 3296 if (!err && t == BPF_READ && value_regno >= 0) { 3297 struct bpf_map *map = reg->map_ptr; 3298 3299 /* if map is read-only, track its contents as scalars */ 3300 if (tnum_is_const(reg->var_off) && 3301 bpf_map_is_rdonly(map) && 3302 map->ops->map_direct_value_addr) { 3303 int map_off = off + reg->var_off.value; 3304 u64 val = 0; 3305 3306 err = bpf_map_direct_read(map, map_off, size, 3307 &val); 3308 if (err) 3309 return err; 3310 3311 regs[value_regno].type = SCALAR_VALUE; 3312 __mark_reg_known(®s[value_regno], val); 3313 } else { 3314 mark_reg_unknown(env, regs, value_regno); 3315 } 3316 } 3317 } else if (reg->type == PTR_TO_MEM) { 3318 if (t == BPF_WRITE && value_regno >= 0 && 3319 is_pointer_value(env, value_regno)) { 3320 verbose(env, "R%d leaks addr into mem\n", value_regno); 3321 return -EACCES; 3322 } 3323 err = check_mem_region_access(env, regno, off, size, 3324 reg->mem_size, false); 3325 if (!err && t == BPF_READ && value_regno >= 0) 3326 mark_reg_unknown(env, regs, value_regno); 3327 } else if (reg->type == PTR_TO_CTX) { 3328 enum bpf_reg_type reg_type = SCALAR_VALUE; 3329 u32 btf_id = 0; 3330 3331 if (t == BPF_WRITE && value_regno >= 0 && 3332 is_pointer_value(env, value_regno)) { 3333 verbose(env, "R%d leaks addr into ctx\n", value_regno); 3334 return -EACCES; 3335 } 3336 3337 err = check_ctx_reg(env, reg, regno); 3338 if (err < 0) 3339 return err; 3340 3341 err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf_id); 3342 if (err) 3343 verbose_linfo(env, insn_idx, "; "); 3344 if (!err && t == BPF_READ && value_regno >= 0) { 3345 /* ctx access returns either a scalar, or a 3346 * PTR_TO_PACKET[_META,_END]. In the latter 3347 * case, we know the offset is zero. 3348 */ 3349 if (reg_type == SCALAR_VALUE) { 3350 mark_reg_unknown(env, regs, value_regno); 3351 } else { 3352 mark_reg_known_zero(env, regs, 3353 value_regno); 3354 if (reg_type_may_be_null(reg_type)) 3355 regs[value_regno].id = ++env->id_gen; 3356 /* A load of ctx field could have different 3357 * actual load size with the one encoded in the 3358 * insn. When the dst is PTR, it is for sure not 3359 * a sub-register. 3360 */ 3361 regs[value_regno].subreg_def = DEF_NOT_SUBREG; 3362 if (reg_type == PTR_TO_BTF_ID || 3363 reg_type == PTR_TO_BTF_ID_OR_NULL) 3364 regs[value_regno].btf_id = btf_id; 3365 } 3366 regs[value_regno].type = reg_type; 3367 } 3368 3369 } else if (reg->type == PTR_TO_STACK) { 3370 off += reg->var_off.value; 3371 err = check_stack_access(env, reg, off, size); 3372 if (err) 3373 return err; 3374 3375 state = func(env, reg); 3376 err = update_stack_depth(env, state, off); 3377 if (err) 3378 return err; 3379 3380 if (t == BPF_WRITE) 3381 err = check_stack_write(env, state, off, size, 3382 value_regno, insn_idx); 3383 else 3384 err = check_stack_read(env, state, off, size, 3385 value_regno); 3386 } else if (reg_is_pkt_pointer(reg)) { 3387 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { 3388 verbose(env, "cannot write into packet\n"); 3389 return -EACCES; 3390 } 3391 if (t == BPF_WRITE && value_regno >= 0 && 3392 is_pointer_value(env, value_regno)) { 3393 verbose(env, "R%d leaks addr into packet\n", 3394 value_regno); 3395 return -EACCES; 3396 } 3397 err = check_packet_access(env, regno, off, size, false); 3398 if (!err && t == BPF_READ && value_regno >= 0) 3399 mark_reg_unknown(env, regs, value_regno); 3400 } else if (reg->type == PTR_TO_FLOW_KEYS) { 3401 if (t == BPF_WRITE && value_regno >= 0 && 3402 is_pointer_value(env, value_regno)) { 3403 verbose(env, "R%d leaks addr into flow keys\n", 3404 value_regno); 3405 return -EACCES; 3406 } 3407 3408 err = check_flow_keys_access(env, off, size); 3409 if (!err && t == BPF_READ && value_regno >= 0) 3410 mark_reg_unknown(env, regs, value_regno); 3411 } else if (type_is_sk_pointer(reg->type)) { 3412 if (t == BPF_WRITE) { 3413 verbose(env, "R%d cannot write into %s\n", 3414 regno, reg_type_str[reg->type]); 3415 return -EACCES; 3416 } 3417 err = check_sock_access(env, insn_idx, regno, off, size, t); 3418 if (!err && value_regno >= 0) 3419 mark_reg_unknown(env, regs, value_regno); 3420 } else if (reg->type == PTR_TO_TP_BUFFER) { 3421 err = check_tp_buffer_access(env, reg, regno, off, size); 3422 if (!err && t == BPF_READ && value_regno >= 0) 3423 mark_reg_unknown(env, regs, value_regno); 3424 } else if (reg->type == PTR_TO_BTF_ID) { 3425 err = check_ptr_to_btf_access(env, regs, regno, off, size, t, 3426 value_regno); 3427 } else if (reg->type == CONST_PTR_TO_MAP) { 3428 err = check_ptr_to_map_access(env, regs, regno, off, size, t, 3429 value_regno); 3430 } else { 3431 verbose(env, "R%d invalid mem access '%s'\n", regno, 3432 reg_type_str[reg->type]); 3433 return -EACCES; 3434 } 3435 3436 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 3437 regs[value_regno].type == SCALAR_VALUE) { 3438 /* b/h/w load zero-extends, mark upper bits as known 0 */ 3439 coerce_reg_to_size(®s[value_regno], size); 3440 } 3441 return err; 3442 } 3443 3444 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) 3445 { 3446 int err; 3447 3448 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || 3449 insn->imm != 0) { 3450 verbose(env, "BPF_XADD uses reserved fields\n"); 3451 return -EINVAL; 3452 } 3453 3454 /* check src1 operand */ 3455 err = check_reg_arg(env, insn->src_reg, SRC_OP); 3456 if (err) 3457 return err; 3458 3459 /* check src2 operand */ 3460 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 3461 if (err) 3462 return err; 3463 3464 if (is_pointer_value(env, insn->src_reg)) { 3465 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); 3466 return -EACCES; 3467 } 3468 3469 if (is_ctx_reg(env, insn->dst_reg) || 3470 is_pkt_reg(env, insn->dst_reg) || 3471 is_flow_key_reg(env, insn->dst_reg) || 3472 is_sk_reg(env, insn->dst_reg)) { 3473 verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", 3474 insn->dst_reg, 3475 reg_type_str[reg_state(env, insn->dst_reg)->type]); 3476 return -EACCES; 3477 } 3478 3479 /* check whether atomic_add can read the memory */ 3480 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 3481 BPF_SIZE(insn->code), BPF_READ, -1, true); 3482 if (err) 3483 return err; 3484 3485 /* check whether atomic_add can write into the same memory */ 3486 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 3487 BPF_SIZE(insn->code), BPF_WRITE, -1, true); 3488 } 3489 3490 static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno, 3491 int off, int access_size, 3492 bool zero_size_allowed) 3493 { 3494 struct bpf_reg_state *reg = reg_state(env, regno); 3495 3496 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 3497 access_size < 0 || (access_size == 0 && !zero_size_allowed)) { 3498 if (tnum_is_const(reg->var_off)) { 3499 verbose(env, "invalid stack type R%d off=%d access_size=%d\n", 3500 regno, off, access_size); 3501 } else { 3502 char tn_buf[48]; 3503 3504 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3505 verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n", 3506 regno, tn_buf, access_size); 3507 } 3508 return -EACCES; 3509 } 3510 return 0; 3511 } 3512 3513 /* when register 'regno' is passed into function that will read 'access_size' 3514 * bytes from that pointer, make sure that it's within stack boundary 3515 * and all elements of stack are initialized. 3516 * Unlike most pointer bounds-checking functions, this one doesn't take an 3517 * 'off' argument, so it has to add in reg->off itself. 3518 */ 3519 static int check_stack_boundary(struct bpf_verifier_env *env, int regno, 3520 int access_size, bool zero_size_allowed, 3521 struct bpf_call_arg_meta *meta) 3522 { 3523 struct bpf_reg_state *reg = reg_state(env, regno); 3524 struct bpf_func_state *state = func(env, reg); 3525 int err, min_off, max_off, i, j, slot, spi; 3526 3527 if (reg->type != PTR_TO_STACK) { 3528 /* Allow zero-byte read from NULL, regardless of pointer type */ 3529 if (zero_size_allowed && access_size == 0 && 3530 register_is_null(reg)) 3531 return 0; 3532 3533 verbose(env, "R%d type=%s expected=%s\n", regno, 3534 reg_type_str[reg->type], 3535 reg_type_str[PTR_TO_STACK]); 3536 return -EACCES; 3537 } 3538 3539 if (tnum_is_const(reg->var_off)) { 3540 min_off = max_off = reg->var_off.value + reg->off; 3541 err = __check_stack_boundary(env, regno, min_off, access_size, 3542 zero_size_allowed); 3543 if (err) 3544 return err; 3545 } else { 3546 /* Variable offset is prohibited for unprivileged mode for 3547 * simplicity since it requires corresponding support in 3548 * Spectre masking for stack ALU. 3549 * See also retrieve_ptr_limit(). 3550 */ 3551 if (!env->bypass_spec_v1) { 3552 char tn_buf[48]; 3553 3554 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3555 verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n", 3556 regno, tn_buf); 3557 return -EACCES; 3558 } 3559 /* Only initialized buffer on stack is allowed to be accessed 3560 * with variable offset. With uninitialized buffer it's hard to 3561 * guarantee that whole memory is marked as initialized on 3562 * helper return since specific bounds are unknown what may 3563 * cause uninitialized stack leaking. 3564 */ 3565 if (meta && meta->raw_mode) 3566 meta = NULL; 3567 3568 if (reg->smax_value >= BPF_MAX_VAR_OFF || 3569 reg->smax_value <= -BPF_MAX_VAR_OFF) { 3570 verbose(env, "R%d unbounded indirect variable offset stack access\n", 3571 regno); 3572 return -EACCES; 3573 } 3574 min_off = reg->smin_value + reg->off; 3575 max_off = reg->smax_value + reg->off; 3576 err = __check_stack_boundary(env, regno, min_off, access_size, 3577 zero_size_allowed); 3578 if (err) { 3579 verbose(env, "R%d min value is outside of stack bound\n", 3580 regno); 3581 return err; 3582 } 3583 err = __check_stack_boundary(env, regno, max_off, access_size, 3584 zero_size_allowed); 3585 if (err) { 3586 verbose(env, "R%d max value is outside of stack bound\n", 3587 regno); 3588 return err; 3589 } 3590 } 3591 3592 if (meta && meta->raw_mode) { 3593 meta->access_size = access_size; 3594 meta->regno = regno; 3595 return 0; 3596 } 3597 3598 for (i = min_off; i < max_off + access_size; i++) { 3599 u8 *stype; 3600 3601 slot = -i - 1; 3602 spi = slot / BPF_REG_SIZE; 3603 if (state->allocated_stack <= slot) 3604 goto err; 3605 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 3606 if (*stype == STACK_MISC) 3607 goto mark; 3608 if (*stype == STACK_ZERO) { 3609 /* helper can write anything into the stack */ 3610 *stype = STACK_MISC; 3611 goto mark; 3612 } 3613 3614 if (state->stack[spi].slot_type[0] == STACK_SPILL && 3615 state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID) 3616 goto mark; 3617 3618 if (state->stack[spi].slot_type[0] == STACK_SPILL && 3619 state->stack[spi].spilled_ptr.type == SCALAR_VALUE) { 3620 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); 3621 for (j = 0; j < BPF_REG_SIZE; j++) 3622 state->stack[spi].slot_type[j] = STACK_MISC; 3623 goto mark; 3624 } 3625 3626 err: 3627 if (tnum_is_const(reg->var_off)) { 3628 verbose(env, "invalid indirect read from stack off %d+%d size %d\n", 3629 min_off, i - min_off, access_size); 3630 } else { 3631 char tn_buf[48]; 3632 3633 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3634 verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n", 3635 tn_buf, i - min_off, access_size); 3636 } 3637 return -EACCES; 3638 mark: 3639 /* reading any byte out of 8-byte 'spill_slot' will cause 3640 * the whole slot to be marked as 'read' 3641 */ 3642 mark_reg_read(env, &state->stack[spi].spilled_ptr, 3643 state->stack[spi].spilled_ptr.parent, 3644 REG_LIVE_READ64); 3645 } 3646 return update_stack_depth(env, state, min_off); 3647 } 3648 3649 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, 3650 int access_size, bool zero_size_allowed, 3651 struct bpf_call_arg_meta *meta) 3652 { 3653 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 3654 3655 switch (reg->type) { 3656 case PTR_TO_PACKET: 3657 case PTR_TO_PACKET_META: 3658 return check_packet_access(env, regno, reg->off, access_size, 3659 zero_size_allowed); 3660 case PTR_TO_MAP_VALUE: 3661 if (check_map_access_type(env, regno, reg->off, access_size, 3662 meta && meta->raw_mode ? BPF_WRITE : 3663 BPF_READ)) 3664 return -EACCES; 3665 return check_map_access(env, regno, reg->off, access_size, 3666 zero_size_allowed); 3667 case PTR_TO_MEM: 3668 return check_mem_region_access(env, regno, reg->off, 3669 access_size, reg->mem_size, 3670 zero_size_allowed); 3671 default: /* scalar_value|ptr_to_stack or invalid ptr */ 3672 return check_stack_boundary(env, regno, access_size, 3673 zero_size_allowed, meta); 3674 } 3675 } 3676 3677 /* Implementation details: 3678 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL 3679 * Two bpf_map_lookups (even with the same key) will have different reg->id. 3680 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after 3681 * value_or_null->value transition, since the verifier only cares about 3682 * the range of access to valid map value pointer and doesn't care about actual 3683 * address of the map element. 3684 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps 3685 * reg->id > 0 after value_or_null->value transition. By doing so 3686 * two bpf_map_lookups will be considered two different pointers that 3687 * point to different bpf_spin_locks. 3688 * The verifier allows taking only one bpf_spin_lock at a time to avoid 3689 * dead-locks. 3690 * Since only one bpf_spin_lock is allowed the checks are simpler than 3691 * reg_is_refcounted() logic. The verifier needs to remember only 3692 * one spin_lock instead of array of acquired_refs. 3693 * cur_state->active_spin_lock remembers which map value element got locked 3694 * and clears it after bpf_spin_unlock. 3695 */ 3696 static int process_spin_lock(struct bpf_verifier_env *env, int regno, 3697 bool is_lock) 3698 { 3699 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 3700 struct bpf_verifier_state *cur = env->cur_state; 3701 bool is_const = tnum_is_const(reg->var_off); 3702 struct bpf_map *map = reg->map_ptr; 3703 u64 val = reg->var_off.value; 3704 3705 if (reg->type != PTR_TO_MAP_VALUE) { 3706 verbose(env, "R%d is not a pointer to map_value\n", regno); 3707 return -EINVAL; 3708 } 3709 if (!is_const) { 3710 verbose(env, 3711 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n", 3712 regno); 3713 return -EINVAL; 3714 } 3715 if (!map->btf) { 3716 verbose(env, 3717 "map '%s' has to have BTF in order to use bpf_spin_lock\n", 3718 map->name); 3719 return -EINVAL; 3720 } 3721 if (!map_value_has_spin_lock(map)) { 3722 if (map->spin_lock_off == -E2BIG) 3723 verbose(env, 3724 "map '%s' has more than one 'struct bpf_spin_lock'\n", 3725 map->name); 3726 else if (map->spin_lock_off == -ENOENT) 3727 verbose(env, 3728 "map '%s' doesn't have 'struct bpf_spin_lock'\n", 3729 map->name); 3730 else 3731 verbose(env, 3732 "map '%s' is not a struct type or bpf_spin_lock is mangled\n", 3733 map->name); 3734 return -EINVAL; 3735 } 3736 if (map->spin_lock_off != val + reg->off) { 3737 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n", 3738 val + reg->off); 3739 return -EINVAL; 3740 } 3741 if (is_lock) { 3742 if (cur->active_spin_lock) { 3743 verbose(env, 3744 "Locking two bpf_spin_locks are not allowed\n"); 3745 return -EINVAL; 3746 } 3747 cur->active_spin_lock = reg->id; 3748 } else { 3749 if (!cur->active_spin_lock) { 3750 verbose(env, "bpf_spin_unlock without taking a lock\n"); 3751 return -EINVAL; 3752 } 3753 if (cur->active_spin_lock != reg->id) { 3754 verbose(env, "bpf_spin_unlock of different lock\n"); 3755 return -EINVAL; 3756 } 3757 cur->active_spin_lock = 0; 3758 } 3759 return 0; 3760 } 3761 3762 static bool arg_type_is_mem_ptr(enum bpf_arg_type type) 3763 { 3764 return type == ARG_PTR_TO_MEM || 3765 type == ARG_PTR_TO_MEM_OR_NULL || 3766 type == ARG_PTR_TO_UNINIT_MEM; 3767 } 3768 3769 static bool arg_type_is_mem_size(enum bpf_arg_type type) 3770 { 3771 return type == ARG_CONST_SIZE || 3772 type == ARG_CONST_SIZE_OR_ZERO; 3773 } 3774 3775 static bool arg_type_is_alloc_mem_ptr(enum bpf_arg_type type) 3776 { 3777 return type == ARG_PTR_TO_ALLOC_MEM || 3778 type == ARG_PTR_TO_ALLOC_MEM_OR_NULL; 3779 } 3780 3781 static bool arg_type_is_alloc_size(enum bpf_arg_type type) 3782 { 3783 return type == ARG_CONST_ALLOC_SIZE_OR_ZERO; 3784 } 3785 3786 static bool arg_type_is_int_ptr(enum bpf_arg_type type) 3787 { 3788 return type == ARG_PTR_TO_INT || 3789 type == ARG_PTR_TO_LONG; 3790 } 3791 3792 static int int_ptr_type_to_size(enum bpf_arg_type type) 3793 { 3794 if (type == ARG_PTR_TO_INT) 3795 return sizeof(u32); 3796 else if (type == ARG_PTR_TO_LONG) 3797 return sizeof(u64); 3798 3799 return -EINVAL; 3800 } 3801 3802 static int check_func_arg(struct bpf_verifier_env *env, u32 arg, 3803 struct bpf_call_arg_meta *meta, 3804 const struct bpf_func_proto *fn) 3805 { 3806 u32 regno = BPF_REG_1 + arg; 3807 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 3808 enum bpf_reg_type expected_type, type = reg->type; 3809 enum bpf_arg_type arg_type = fn->arg_type[arg]; 3810 int err = 0; 3811 3812 if (arg_type == ARG_DONTCARE) 3813 return 0; 3814 3815 err = check_reg_arg(env, regno, SRC_OP); 3816 if (err) 3817 return err; 3818 3819 if (arg_type == ARG_ANYTHING) { 3820 if (is_pointer_value(env, regno)) { 3821 verbose(env, "R%d leaks addr into helper function\n", 3822 regno); 3823 return -EACCES; 3824 } 3825 return 0; 3826 } 3827 3828 if (type_is_pkt_pointer(type) && 3829 !may_access_direct_pkt_data(env, meta, BPF_READ)) { 3830 verbose(env, "helper access to the packet is not allowed\n"); 3831 return -EACCES; 3832 } 3833 3834 if (arg_type == ARG_PTR_TO_MAP_KEY || 3835 arg_type == ARG_PTR_TO_MAP_VALUE || 3836 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE || 3837 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) { 3838 expected_type = PTR_TO_STACK; 3839 if (register_is_null(reg) && 3840 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) 3841 /* final test in check_stack_boundary() */; 3842 else if (!type_is_pkt_pointer(type) && 3843 type != PTR_TO_MAP_VALUE && 3844 type != expected_type) 3845 goto err_type; 3846 } else if (arg_type == ARG_CONST_SIZE || 3847 arg_type == ARG_CONST_SIZE_OR_ZERO || 3848 arg_type == ARG_CONST_ALLOC_SIZE_OR_ZERO) { 3849 expected_type = SCALAR_VALUE; 3850 if (type != expected_type) 3851 goto err_type; 3852 } else if (arg_type == ARG_CONST_MAP_PTR) { 3853 expected_type = CONST_PTR_TO_MAP; 3854 if (type != expected_type) 3855 goto err_type; 3856 } else if (arg_type == ARG_PTR_TO_CTX || 3857 arg_type == ARG_PTR_TO_CTX_OR_NULL) { 3858 expected_type = PTR_TO_CTX; 3859 if (!(register_is_null(reg) && 3860 arg_type == ARG_PTR_TO_CTX_OR_NULL)) { 3861 if (type != expected_type) 3862 goto err_type; 3863 err = check_ctx_reg(env, reg, regno); 3864 if (err < 0) 3865 return err; 3866 } 3867 } else if (arg_type == ARG_PTR_TO_SOCK_COMMON) { 3868 expected_type = PTR_TO_SOCK_COMMON; 3869 /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */ 3870 if (!type_is_sk_pointer(type)) 3871 goto err_type; 3872 if (reg->ref_obj_id) { 3873 if (meta->ref_obj_id) { 3874 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 3875 regno, reg->ref_obj_id, 3876 meta->ref_obj_id); 3877 return -EFAULT; 3878 } 3879 meta->ref_obj_id = reg->ref_obj_id; 3880 } 3881 } else if (arg_type == ARG_PTR_TO_SOCKET || 3882 arg_type == ARG_PTR_TO_SOCKET_OR_NULL) { 3883 expected_type = PTR_TO_SOCKET; 3884 if (!(register_is_null(reg) && 3885 arg_type == ARG_PTR_TO_SOCKET_OR_NULL)) { 3886 if (type != expected_type) 3887 goto err_type; 3888 } 3889 } else if (arg_type == ARG_PTR_TO_BTF_ID) { 3890 expected_type = PTR_TO_BTF_ID; 3891 if (type != expected_type) 3892 goto err_type; 3893 if (!fn->check_btf_id) { 3894 if (reg->btf_id != meta->btf_id) { 3895 verbose(env, "Helper has type %s got %s in R%d\n", 3896 kernel_type_name(meta->btf_id), 3897 kernel_type_name(reg->btf_id), regno); 3898 3899 return -EACCES; 3900 } 3901 } else if (!fn->check_btf_id(reg->btf_id, arg)) { 3902 verbose(env, "Helper does not support %s in R%d\n", 3903 kernel_type_name(reg->btf_id), regno); 3904 3905 return -EACCES; 3906 } 3907 if (!tnum_is_const(reg->var_off) || reg->var_off.value || reg->off) { 3908 verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n", 3909 regno); 3910 return -EACCES; 3911 } 3912 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) { 3913 if (meta->func_id == BPF_FUNC_spin_lock) { 3914 if (process_spin_lock(env, regno, true)) 3915 return -EACCES; 3916 } else if (meta->func_id == BPF_FUNC_spin_unlock) { 3917 if (process_spin_lock(env, regno, false)) 3918 return -EACCES; 3919 } else { 3920 verbose(env, "verifier internal error\n"); 3921 return -EFAULT; 3922 } 3923 } else if (arg_type_is_mem_ptr(arg_type)) { 3924 expected_type = PTR_TO_STACK; 3925 /* One exception here. In case function allows for NULL to be 3926 * passed in as argument, it's a SCALAR_VALUE type. Final test 3927 * happens during stack boundary checking. 3928 */ 3929 if (register_is_null(reg) && 3930 (arg_type == ARG_PTR_TO_MEM_OR_NULL || 3931 arg_type == ARG_PTR_TO_ALLOC_MEM_OR_NULL)) 3932 /* final test in check_stack_boundary() */; 3933 else if (!type_is_pkt_pointer(type) && 3934 type != PTR_TO_MAP_VALUE && 3935 type != PTR_TO_MEM && 3936 type != expected_type) 3937 goto err_type; 3938 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; 3939 } else if (arg_type_is_alloc_mem_ptr(arg_type)) { 3940 expected_type = PTR_TO_MEM; 3941 if (register_is_null(reg) && 3942 arg_type == ARG_PTR_TO_ALLOC_MEM_OR_NULL) 3943 /* final test in check_stack_boundary() */; 3944 else if (type != expected_type) 3945 goto err_type; 3946 if (meta->ref_obj_id) { 3947 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 3948 regno, reg->ref_obj_id, 3949 meta->ref_obj_id); 3950 return -EFAULT; 3951 } 3952 meta->ref_obj_id = reg->ref_obj_id; 3953 } else if (arg_type_is_int_ptr(arg_type)) { 3954 expected_type = PTR_TO_STACK; 3955 if (!type_is_pkt_pointer(type) && 3956 type != PTR_TO_MAP_VALUE && 3957 type != expected_type) 3958 goto err_type; 3959 } else { 3960 verbose(env, "unsupported arg_type %d\n", arg_type); 3961 return -EFAULT; 3962 } 3963 3964 if (arg_type == ARG_CONST_MAP_PTR) { 3965 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 3966 meta->map_ptr = reg->map_ptr; 3967 } else if (arg_type == ARG_PTR_TO_MAP_KEY) { 3968 /* bpf_map_xxx(..., map_ptr, ..., key) call: 3969 * check that [key, key + map->key_size) are within 3970 * stack limits and initialized 3971 */ 3972 if (!meta->map_ptr) { 3973 /* in function declaration map_ptr must come before 3974 * map_key, so that it's verified and known before 3975 * we have to check map_key here. Otherwise it means 3976 * that kernel subsystem misconfigured verifier 3977 */ 3978 verbose(env, "invalid map_ptr to access map->key\n"); 3979 return -EACCES; 3980 } 3981 err = check_helper_mem_access(env, regno, 3982 meta->map_ptr->key_size, false, 3983 NULL); 3984 } else if (arg_type == ARG_PTR_TO_MAP_VALUE || 3985 (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL && 3986 !register_is_null(reg)) || 3987 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) { 3988 /* bpf_map_xxx(..., map_ptr, ..., value) call: 3989 * check [value, value + map->value_size) validity 3990 */ 3991 if (!meta->map_ptr) { 3992 /* kernel subsystem misconfigured verifier */ 3993 verbose(env, "invalid map_ptr to access map->value\n"); 3994 return -EACCES; 3995 } 3996 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE); 3997 err = check_helper_mem_access(env, regno, 3998 meta->map_ptr->value_size, false, 3999 meta); 4000 } else if (arg_type_is_mem_size(arg_type)) { 4001 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); 4002 4003 /* This is used to refine r0 return value bounds for helpers 4004 * that enforce this value as an upper bound on return values. 4005 * See do_refine_retval_range() for helpers that can refine 4006 * the return value. C type of helper is u32 so we pull register 4007 * bound from umax_value however, if negative verifier errors 4008 * out. Only upper bounds can be learned because retval is an 4009 * int type and negative retvals are allowed. 4010 */ 4011 meta->msize_max_value = reg->umax_value; 4012 4013 /* The register is SCALAR_VALUE; the access check 4014 * happens using its boundaries. 4015 */ 4016 if (!tnum_is_const(reg->var_off)) 4017 /* For unprivileged variable accesses, disable raw 4018 * mode so that the program is required to 4019 * initialize all the memory that the helper could 4020 * just partially fill up. 4021 */ 4022 meta = NULL; 4023 4024 if (reg->smin_value < 0) { 4025 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", 4026 regno); 4027 return -EACCES; 4028 } 4029 4030 if (reg->umin_value == 0) { 4031 err = check_helper_mem_access(env, regno - 1, 0, 4032 zero_size_allowed, 4033 meta); 4034 if (err) 4035 return err; 4036 } 4037 4038 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { 4039 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", 4040 regno); 4041 return -EACCES; 4042 } 4043 err = check_helper_mem_access(env, regno - 1, 4044 reg->umax_value, 4045 zero_size_allowed, meta); 4046 if (!err) 4047 err = mark_chain_precision(env, regno); 4048 } else if (arg_type_is_alloc_size(arg_type)) { 4049 if (!tnum_is_const(reg->var_off)) { 4050 verbose(env, "R%d unbounded size, use 'var &= const' or 'if (var < const)'\n", 4051 regno); 4052 return -EACCES; 4053 } 4054 meta->mem_size = reg->var_off.value; 4055 } else if (arg_type_is_int_ptr(arg_type)) { 4056 int size = int_ptr_type_to_size(arg_type); 4057 4058 err = check_helper_mem_access(env, regno, size, false, meta); 4059 if (err) 4060 return err; 4061 err = check_ptr_alignment(env, reg, 0, size, true); 4062 } 4063 4064 return err; 4065 err_type: 4066 verbose(env, "R%d type=%s expected=%s\n", regno, 4067 reg_type_str[type], reg_type_str[expected_type]); 4068 return -EACCES; 4069 } 4070 4071 static int check_map_func_compatibility(struct bpf_verifier_env *env, 4072 struct bpf_map *map, int func_id) 4073 { 4074 if (!map) 4075 return 0; 4076 4077 /* We need a two way check, first is from map perspective ... */ 4078 switch (map->map_type) { 4079 case BPF_MAP_TYPE_PROG_ARRAY: 4080 if (func_id != BPF_FUNC_tail_call) 4081 goto error; 4082 break; 4083 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 4084 if (func_id != BPF_FUNC_perf_event_read && 4085 func_id != BPF_FUNC_perf_event_output && 4086 func_id != BPF_FUNC_skb_output && 4087 func_id != BPF_FUNC_perf_event_read_value && 4088 func_id != BPF_FUNC_xdp_output) 4089 goto error; 4090 break; 4091 case BPF_MAP_TYPE_RINGBUF: 4092 if (func_id != BPF_FUNC_ringbuf_output && 4093 func_id != BPF_FUNC_ringbuf_reserve && 4094 func_id != BPF_FUNC_ringbuf_submit && 4095 func_id != BPF_FUNC_ringbuf_discard && 4096 func_id != BPF_FUNC_ringbuf_query) 4097 goto error; 4098 break; 4099 case BPF_MAP_TYPE_STACK_TRACE: 4100 if (func_id != BPF_FUNC_get_stackid) 4101 goto error; 4102 break; 4103 case BPF_MAP_TYPE_CGROUP_ARRAY: 4104 if (func_id != BPF_FUNC_skb_under_cgroup && 4105 func_id != BPF_FUNC_current_task_under_cgroup) 4106 goto error; 4107 break; 4108 case BPF_MAP_TYPE_CGROUP_STORAGE: 4109 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 4110 if (func_id != BPF_FUNC_get_local_storage) 4111 goto error; 4112 break; 4113 case BPF_MAP_TYPE_DEVMAP: 4114 case BPF_MAP_TYPE_DEVMAP_HASH: 4115 if (func_id != BPF_FUNC_redirect_map && 4116 func_id != BPF_FUNC_map_lookup_elem) 4117 goto error; 4118 break; 4119 /* Restrict bpf side of cpumap and xskmap, open when use-cases 4120 * appear. 4121 */ 4122 case BPF_MAP_TYPE_CPUMAP: 4123 if (func_id != BPF_FUNC_redirect_map) 4124 goto error; 4125 break; 4126 case BPF_MAP_TYPE_XSKMAP: 4127 if (func_id != BPF_FUNC_redirect_map && 4128 func_id != BPF_FUNC_map_lookup_elem) 4129 goto error; 4130 break; 4131 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 4132 case BPF_MAP_TYPE_HASH_OF_MAPS: 4133 if (func_id != BPF_FUNC_map_lookup_elem) 4134 goto error; 4135 break; 4136 case BPF_MAP_TYPE_SOCKMAP: 4137 if (func_id != BPF_FUNC_sk_redirect_map && 4138 func_id != BPF_FUNC_sock_map_update && 4139 func_id != BPF_FUNC_map_delete_elem && 4140 func_id != BPF_FUNC_msg_redirect_map && 4141 func_id != BPF_FUNC_sk_select_reuseport && 4142 func_id != BPF_FUNC_map_lookup_elem) 4143 goto error; 4144 break; 4145 case BPF_MAP_TYPE_SOCKHASH: 4146 if (func_id != BPF_FUNC_sk_redirect_hash && 4147 func_id != BPF_FUNC_sock_hash_update && 4148 func_id != BPF_FUNC_map_delete_elem && 4149 func_id != BPF_FUNC_msg_redirect_hash && 4150 func_id != BPF_FUNC_sk_select_reuseport && 4151 func_id != BPF_FUNC_map_lookup_elem) 4152 goto error; 4153 break; 4154 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 4155 if (func_id != BPF_FUNC_sk_select_reuseport) 4156 goto error; 4157 break; 4158 case BPF_MAP_TYPE_QUEUE: 4159 case BPF_MAP_TYPE_STACK: 4160 if (func_id != BPF_FUNC_map_peek_elem && 4161 func_id != BPF_FUNC_map_pop_elem && 4162 func_id != BPF_FUNC_map_push_elem) 4163 goto error; 4164 break; 4165 case BPF_MAP_TYPE_SK_STORAGE: 4166 if (func_id != BPF_FUNC_sk_storage_get && 4167 func_id != BPF_FUNC_sk_storage_delete) 4168 goto error; 4169 break; 4170 default: 4171 break; 4172 } 4173 4174 /* ... and second from the function itself. */ 4175 switch (func_id) { 4176 case BPF_FUNC_tail_call: 4177 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 4178 goto error; 4179 if (env->subprog_cnt > 1) { 4180 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n"); 4181 return -EINVAL; 4182 } 4183 break; 4184 case BPF_FUNC_perf_event_read: 4185 case BPF_FUNC_perf_event_output: 4186 case BPF_FUNC_perf_event_read_value: 4187 case BPF_FUNC_skb_output: 4188 case BPF_FUNC_xdp_output: 4189 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 4190 goto error; 4191 break; 4192 case BPF_FUNC_get_stackid: 4193 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 4194 goto error; 4195 break; 4196 case BPF_FUNC_current_task_under_cgroup: 4197 case BPF_FUNC_skb_under_cgroup: 4198 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 4199 goto error; 4200 break; 4201 case BPF_FUNC_redirect_map: 4202 if (map->map_type != BPF_MAP_TYPE_DEVMAP && 4203 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && 4204 map->map_type != BPF_MAP_TYPE_CPUMAP && 4205 map->map_type != BPF_MAP_TYPE_XSKMAP) 4206 goto error; 4207 break; 4208 case BPF_FUNC_sk_redirect_map: 4209 case BPF_FUNC_msg_redirect_map: 4210 case BPF_FUNC_sock_map_update: 4211 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) 4212 goto error; 4213 break; 4214 case BPF_FUNC_sk_redirect_hash: 4215 case BPF_FUNC_msg_redirect_hash: 4216 case BPF_FUNC_sock_hash_update: 4217 if (map->map_type != BPF_MAP_TYPE_SOCKHASH) 4218 goto error; 4219 break; 4220 case BPF_FUNC_get_local_storage: 4221 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 4222 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 4223 goto error; 4224 break; 4225 case BPF_FUNC_sk_select_reuseport: 4226 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && 4227 map->map_type != BPF_MAP_TYPE_SOCKMAP && 4228 map->map_type != BPF_MAP_TYPE_SOCKHASH) 4229 goto error; 4230 break; 4231 case BPF_FUNC_map_peek_elem: 4232 case BPF_FUNC_map_pop_elem: 4233 case BPF_FUNC_map_push_elem: 4234 if (map->map_type != BPF_MAP_TYPE_QUEUE && 4235 map->map_type != BPF_MAP_TYPE_STACK) 4236 goto error; 4237 break; 4238 case BPF_FUNC_sk_storage_get: 4239 case BPF_FUNC_sk_storage_delete: 4240 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) 4241 goto error; 4242 break; 4243 default: 4244 break; 4245 } 4246 4247 return 0; 4248 error: 4249 verbose(env, "cannot pass map_type %d into func %s#%d\n", 4250 map->map_type, func_id_name(func_id), func_id); 4251 return -EINVAL; 4252 } 4253 4254 static bool check_raw_mode_ok(const struct bpf_func_proto *fn) 4255 { 4256 int count = 0; 4257 4258 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) 4259 count++; 4260 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) 4261 count++; 4262 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) 4263 count++; 4264 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) 4265 count++; 4266 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) 4267 count++; 4268 4269 /* We only support one arg being in raw mode at the moment, 4270 * which is sufficient for the helper functions we have 4271 * right now. 4272 */ 4273 return count <= 1; 4274 } 4275 4276 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr, 4277 enum bpf_arg_type arg_next) 4278 { 4279 return (arg_type_is_mem_ptr(arg_curr) && 4280 !arg_type_is_mem_size(arg_next)) || 4281 (!arg_type_is_mem_ptr(arg_curr) && 4282 arg_type_is_mem_size(arg_next)); 4283 } 4284 4285 static bool check_arg_pair_ok(const struct bpf_func_proto *fn) 4286 { 4287 /* bpf_xxx(..., buf, len) call will access 'len' 4288 * bytes from memory 'buf'. Both arg types need 4289 * to be paired, so make sure there's no buggy 4290 * helper function specification. 4291 */ 4292 if (arg_type_is_mem_size(fn->arg1_type) || 4293 arg_type_is_mem_ptr(fn->arg5_type) || 4294 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) || 4295 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) || 4296 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) || 4297 check_args_pair_invalid(fn->arg4_type, fn->arg5_type)) 4298 return false; 4299 4300 return true; 4301 } 4302 4303 static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id) 4304 { 4305 int count = 0; 4306 4307 if (arg_type_may_be_refcounted(fn->arg1_type)) 4308 count++; 4309 if (arg_type_may_be_refcounted(fn->arg2_type)) 4310 count++; 4311 if (arg_type_may_be_refcounted(fn->arg3_type)) 4312 count++; 4313 if (arg_type_may_be_refcounted(fn->arg4_type)) 4314 count++; 4315 if (arg_type_may_be_refcounted(fn->arg5_type)) 4316 count++; 4317 4318 /* A reference acquiring function cannot acquire 4319 * another refcounted ptr. 4320 */ 4321 if (may_be_acquire_function(func_id) && count) 4322 return false; 4323 4324 /* We only support one arg being unreferenced at the moment, 4325 * which is sufficient for the helper functions we have right now. 4326 */ 4327 return count <= 1; 4328 } 4329 4330 static int check_func_proto(const struct bpf_func_proto *fn, int func_id) 4331 { 4332 return check_raw_mode_ok(fn) && 4333 check_arg_pair_ok(fn) && 4334 check_refcount_ok(fn, func_id) ? 0 : -EINVAL; 4335 } 4336 4337 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] 4338 * are now invalid, so turn them into unknown SCALAR_VALUE. 4339 */ 4340 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, 4341 struct bpf_func_state *state) 4342 { 4343 struct bpf_reg_state *regs = state->regs, *reg; 4344 int i; 4345 4346 for (i = 0; i < MAX_BPF_REG; i++) 4347 if (reg_is_pkt_pointer_any(®s[i])) 4348 mark_reg_unknown(env, regs, i); 4349 4350 bpf_for_each_spilled_reg(i, state, reg) { 4351 if (!reg) 4352 continue; 4353 if (reg_is_pkt_pointer_any(reg)) 4354 __mark_reg_unknown(env, reg); 4355 } 4356 } 4357 4358 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) 4359 { 4360 struct bpf_verifier_state *vstate = env->cur_state; 4361 int i; 4362 4363 for (i = 0; i <= vstate->curframe; i++) 4364 __clear_all_pkt_pointers(env, vstate->frame[i]); 4365 } 4366 4367 static void release_reg_references(struct bpf_verifier_env *env, 4368 struct bpf_func_state *state, 4369 int ref_obj_id) 4370 { 4371 struct bpf_reg_state *regs = state->regs, *reg; 4372 int i; 4373 4374 for (i = 0; i < MAX_BPF_REG; i++) 4375 if (regs[i].ref_obj_id == ref_obj_id) 4376 mark_reg_unknown(env, regs, i); 4377 4378 bpf_for_each_spilled_reg(i, state, reg) { 4379 if (!reg) 4380 continue; 4381 if (reg->ref_obj_id == ref_obj_id) 4382 __mark_reg_unknown(env, reg); 4383 } 4384 } 4385 4386 /* The pointer with the specified id has released its reference to kernel 4387 * resources. Identify all copies of the same pointer and clear the reference. 4388 */ 4389 static int release_reference(struct bpf_verifier_env *env, 4390 int ref_obj_id) 4391 { 4392 struct bpf_verifier_state *vstate = env->cur_state; 4393 int err; 4394 int i; 4395 4396 err = release_reference_state(cur_func(env), ref_obj_id); 4397 if (err) 4398 return err; 4399 4400 for (i = 0; i <= vstate->curframe; i++) 4401 release_reg_references(env, vstate->frame[i], ref_obj_id); 4402 4403 return 0; 4404 } 4405 4406 static void clear_caller_saved_regs(struct bpf_verifier_env *env, 4407 struct bpf_reg_state *regs) 4408 { 4409 int i; 4410 4411 /* after the call registers r0 - r5 were scratched */ 4412 for (i = 0; i < CALLER_SAVED_REGS; i++) { 4413 mark_reg_not_init(env, regs, caller_saved[i]); 4414 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 4415 } 4416 } 4417 4418 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 4419 int *insn_idx) 4420 { 4421 struct bpf_verifier_state *state = env->cur_state; 4422 struct bpf_func_info_aux *func_info_aux; 4423 struct bpf_func_state *caller, *callee; 4424 int i, err, subprog, target_insn; 4425 bool is_global = false; 4426 4427 if (state->curframe + 1 >= MAX_CALL_FRAMES) { 4428 verbose(env, "the call stack of %d frames is too deep\n", 4429 state->curframe + 2); 4430 return -E2BIG; 4431 } 4432 4433 target_insn = *insn_idx + insn->imm; 4434 subprog = find_subprog(env, target_insn + 1); 4435 if (subprog < 0) { 4436 verbose(env, "verifier bug. No program starts at insn %d\n", 4437 target_insn + 1); 4438 return -EFAULT; 4439 } 4440 4441 caller = state->frame[state->curframe]; 4442 if (state->frame[state->curframe + 1]) { 4443 verbose(env, "verifier bug. Frame %d already allocated\n", 4444 state->curframe + 1); 4445 return -EFAULT; 4446 } 4447 4448 func_info_aux = env->prog->aux->func_info_aux; 4449 if (func_info_aux) 4450 is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; 4451 err = btf_check_func_arg_match(env, subprog, caller->regs); 4452 if (err == -EFAULT) 4453 return err; 4454 if (is_global) { 4455 if (err) { 4456 verbose(env, "Caller passes invalid args into func#%d\n", 4457 subprog); 4458 return err; 4459 } else { 4460 if (env->log.level & BPF_LOG_LEVEL) 4461 verbose(env, 4462 "Func#%d is global and valid. Skipping.\n", 4463 subprog); 4464 clear_caller_saved_regs(env, caller->regs); 4465 4466 /* All global functions return SCALAR_VALUE */ 4467 mark_reg_unknown(env, caller->regs, BPF_REG_0); 4468 4469 /* continue with next insn after call */ 4470 return 0; 4471 } 4472 } 4473 4474 callee = kzalloc(sizeof(*callee), GFP_KERNEL); 4475 if (!callee) 4476 return -ENOMEM; 4477 state->frame[state->curframe + 1] = callee; 4478 4479 /* callee cannot access r0, r6 - r9 for reading and has to write 4480 * into its own stack before reading from it. 4481 * callee can read/write into caller's stack 4482 */ 4483 init_func_state(env, callee, 4484 /* remember the callsite, it will be used by bpf_exit */ 4485 *insn_idx /* callsite */, 4486 state->curframe + 1 /* frameno within this callchain */, 4487 subprog /* subprog number within this prog */); 4488 4489 /* Transfer references to the callee */ 4490 err = transfer_reference_state(callee, caller); 4491 if (err) 4492 return err; 4493 4494 /* copy r1 - r5 args that callee can access. The copy includes parent 4495 * pointers, which connects us up to the liveness chain 4496 */ 4497 for (i = BPF_REG_1; i <= BPF_REG_5; i++) 4498 callee->regs[i] = caller->regs[i]; 4499 4500 clear_caller_saved_regs(env, caller->regs); 4501 4502 /* only increment it after check_reg_arg() finished */ 4503 state->curframe++; 4504 4505 /* and go analyze first insn of the callee */ 4506 *insn_idx = target_insn; 4507 4508 if (env->log.level & BPF_LOG_LEVEL) { 4509 verbose(env, "caller:\n"); 4510 print_verifier_state(env, caller); 4511 verbose(env, "callee:\n"); 4512 print_verifier_state(env, callee); 4513 } 4514 return 0; 4515 } 4516 4517 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) 4518 { 4519 struct bpf_verifier_state *state = env->cur_state; 4520 struct bpf_func_state *caller, *callee; 4521 struct bpf_reg_state *r0; 4522 int err; 4523 4524 callee = state->frame[state->curframe]; 4525 r0 = &callee->regs[BPF_REG_0]; 4526 if (r0->type == PTR_TO_STACK) { 4527 /* technically it's ok to return caller's stack pointer 4528 * (or caller's caller's pointer) back to the caller, 4529 * since these pointers are valid. Only current stack 4530 * pointer will be invalid as soon as function exits, 4531 * but let's be conservative 4532 */ 4533 verbose(env, "cannot return stack pointer to the caller\n"); 4534 return -EINVAL; 4535 } 4536 4537 state->curframe--; 4538 caller = state->frame[state->curframe]; 4539 /* return to the caller whatever r0 had in the callee */ 4540 caller->regs[BPF_REG_0] = *r0; 4541 4542 /* Transfer references to the caller */ 4543 err = transfer_reference_state(caller, callee); 4544 if (err) 4545 return err; 4546 4547 *insn_idx = callee->callsite + 1; 4548 if (env->log.level & BPF_LOG_LEVEL) { 4549 verbose(env, "returning from callee:\n"); 4550 print_verifier_state(env, callee); 4551 verbose(env, "to caller at %d:\n", *insn_idx); 4552 print_verifier_state(env, caller); 4553 } 4554 /* clear everything in the callee */ 4555 free_func_state(callee); 4556 state->frame[state->curframe + 1] = NULL; 4557 return 0; 4558 } 4559 4560 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, 4561 int func_id, 4562 struct bpf_call_arg_meta *meta) 4563 { 4564 struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; 4565 4566 if (ret_type != RET_INTEGER || 4567 (func_id != BPF_FUNC_get_stack && 4568 func_id != BPF_FUNC_probe_read_str && 4569 func_id != BPF_FUNC_probe_read_kernel_str && 4570 func_id != BPF_FUNC_probe_read_user_str)) 4571 return; 4572 4573 ret_reg->smax_value = meta->msize_max_value; 4574 ret_reg->s32_max_value = meta->msize_max_value; 4575 __reg_deduce_bounds(ret_reg); 4576 __reg_bound_offset(ret_reg); 4577 __update_reg_bounds(ret_reg); 4578 } 4579 4580 static int 4581 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 4582 int func_id, int insn_idx) 4583 { 4584 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 4585 struct bpf_map *map = meta->map_ptr; 4586 4587 if (func_id != BPF_FUNC_tail_call && 4588 func_id != BPF_FUNC_map_lookup_elem && 4589 func_id != BPF_FUNC_map_update_elem && 4590 func_id != BPF_FUNC_map_delete_elem && 4591 func_id != BPF_FUNC_map_push_elem && 4592 func_id != BPF_FUNC_map_pop_elem && 4593 func_id != BPF_FUNC_map_peek_elem) 4594 return 0; 4595 4596 if (map == NULL) { 4597 verbose(env, "kernel subsystem misconfigured verifier\n"); 4598 return -EINVAL; 4599 } 4600 4601 /* In case of read-only, some additional restrictions 4602 * need to be applied in order to prevent altering the 4603 * state of the map from program side. 4604 */ 4605 if ((map->map_flags & BPF_F_RDONLY_PROG) && 4606 (func_id == BPF_FUNC_map_delete_elem || 4607 func_id == BPF_FUNC_map_update_elem || 4608 func_id == BPF_FUNC_map_push_elem || 4609 func_id == BPF_FUNC_map_pop_elem)) { 4610 verbose(env, "write into map forbidden\n"); 4611 return -EACCES; 4612 } 4613 4614 if (!BPF_MAP_PTR(aux->map_ptr_state)) 4615 bpf_map_ptr_store(aux, meta->map_ptr, 4616 !meta->map_ptr->bypass_spec_v1); 4617 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr) 4618 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, 4619 !meta->map_ptr->bypass_spec_v1); 4620 return 0; 4621 } 4622 4623 static int 4624 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 4625 int func_id, int insn_idx) 4626 { 4627 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 4628 struct bpf_reg_state *regs = cur_regs(env), *reg; 4629 struct bpf_map *map = meta->map_ptr; 4630 struct tnum range; 4631 u64 val; 4632 int err; 4633 4634 if (func_id != BPF_FUNC_tail_call) 4635 return 0; 4636 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) { 4637 verbose(env, "kernel subsystem misconfigured verifier\n"); 4638 return -EINVAL; 4639 } 4640 4641 range = tnum_range(0, map->max_entries - 1); 4642 reg = ®s[BPF_REG_3]; 4643 4644 if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) { 4645 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 4646 return 0; 4647 } 4648 4649 err = mark_chain_precision(env, BPF_REG_3); 4650 if (err) 4651 return err; 4652 4653 val = reg->var_off.value; 4654 if (bpf_map_key_unseen(aux)) 4655 bpf_map_key_store(aux, val); 4656 else if (!bpf_map_key_poisoned(aux) && 4657 bpf_map_key_immediate(aux) != val) 4658 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 4659 return 0; 4660 } 4661 4662 static int check_reference_leak(struct bpf_verifier_env *env) 4663 { 4664 struct bpf_func_state *state = cur_func(env); 4665 int i; 4666 4667 for (i = 0; i < state->acquired_refs; i++) { 4668 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", 4669 state->refs[i].id, state->refs[i].insn_idx); 4670 } 4671 return state->acquired_refs ? -EINVAL : 0; 4672 } 4673 4674 static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) 4675 { 4676 const struct bpf_func_proto *fn = NULL; 4677 struct bpf_reg_state *regs; 4678 struct bpf_call_arg_meta meta; 4679 bool changes_data; 4680 int i, err; 4681 4682 /* find function prototype */ 4683 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 4684 verbose(env, "invalid func %s#%d\n", func_id_name(func_id), 4685 func_id); 4686 return -EINVAL; 4687 } 4688 4689 if (env->ops->get_func_proto) 4690 fn = env->ops->get_func_proto(func_id, env->prog); 4691 if (!fn) { 4692 verbose(env, "unknown func %s#%d\n", func_id_name(func_id), 4693 func_id); 4694 return -EINVAL; 4695 } 4696 4697 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 4698 if (!env->prog->gpl_compatible && fn->gpl_only) { 4699 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); 4700 return -EINVAL; 4701 } 4702 4703 /* With LD_ABS/IND some JITs save/restore skb from r1. */ 4704 changes_data = bpf_helper_changes_pkt_data(fn->func); 4705 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { 4706 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", 4707 func_id_name(func_id), func_id); 4708 return -EINVAL; 4709 } 4710 4711 memset(&meta, 0, sizeof(meta)); 4712 meta.pkt_access = fn->pkt_access; 4713 4714 err = check_func_proto(fn, func_id); 4715 if (err) { 4716 verbose(env, "kernel subsystem misconfigured func %s#%d\n", 4717 func_id_name(func_id), func_id); 4718 return err; 4719 } 4720 4721 meta.func_id = func_id; 4722 /* check args */ 4723 for (i = 0; i < 5; i++) { 4724 if (!fn->check_btf_id) { 4725 err = btf_resolve_helper_id(&env->log, fn, i); 4726 if (err > 0) 4727 meta.btf_id = err; 4728 } 4729 err = check_func_arg(env, i, &meta, fn); 4730 if (err) 4731 return err; 4732 } 4733 4734 err = record_func_map(env, &meta, func_id, insn_idx); 4735 if (err) 4736 return err; 4737 4738 err = record_func_key(env, &meta, func_id, insn_idx); 4739 if (err) 4740 return err; 4741 4742 /* Mark slots with STACK_MISC in case of raw mode, stack offset 4743 * is inferred from register state. 4744 */ 4745 for (i = 0; i < meta.access_size; i++) { 4746 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, 4747 BPF_WRITE, -1, false); 4748 if (err) 4749 return err; 4750 } 4751 4752 if (func_id == BPF_FUNC_tail_call) { 4753 err = check_reference_leak(env); 4754 if (err) { 4755 verbose(env, "tail_call would lead to reference leak\n"); 4756 return err; 4757 } 4758 } else if (is_release_function(func_id)) { 4759 err = release_reference(env, meta.ref_obj_id); 4760 if (err) { 4761 verbose(env, "func %s#%d reference has not been acquired before\n", 4762 func_id_name(func_id), func_id); 4763 return err; 4764 } 4765 } 4766 4767 regs = cur_regs(env); 4768 4769 /* check that flags argument in get_local_storage(map, flags) is 0, 4770 * this is required because get_local_storage() can't return an error. 4771 */ 4772 if (func_id == BPF_FUNC_get_local_storage && 4773 !register_is_null(®s[BPF_REG_2])) { 4774 verbose(env, "get_local_storage() doesn't support non-zero flags\n"); 4775 return -EINVAL; 4776 } 4777 4778 /* reset caller saved regs */ 4779 for (i = 0; i < CALLER_SAVED_REGS; i++) { 4780 mark_reg_not_init(env, regs, caller_saved[i]); 4781 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 4782 } 4783 4784 /* helper call returns 64-bit value. */ 4785 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 4786 4787 /* update return register (already marked as written above) */ 4788 if (fn->ret_type == RET_INTEGER) { 4789 /* sets type to SCALAR_VALUE */ 4790 mark_reg_unknown(env, regs, BPF_REG_0); 4791 } else if (fn->ret_type == RET_VOID) { 4792 regs[BPF_REG_0].type = NOT_INIT; 4793 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || 4794 fn->ret_type == RET_PTR_TO_MAP_VALUE) { 4795 /* There is no offset yet applied, variable or fixed */ 4796 mark_reg_known_zero(env, regs, BPF_REG_0); 4797 /* remember map_ptr, so that check_map_access() 4798 * can check 'value_size' boundary of memory access 4799 * to map element returned from bpf_map_lookup_elem() 4800 */ 4801 if (meta.map_ptr == NULL) { 4802 verbose(env, 4803 "kernel subsystem misconfigured verifier\n"); 4804 return -EINVAL; 4805 } 4806 regs[BPF_REG_0].map_ptr = meta.map_ptr; 4807 if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { 4808 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; 4809 if (map_value_has_spin_lock(meta.map_ptr)) 4810 regs[BPF_REG_0].id = ++env->id_gen; 4811 } else { 4812 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 4813 regs[BPF_REG_0].id = ++env->id_gen; 4814 } 4815 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { 4816 mark_reg_known_zero(env, regs, BPF_REG_0); 4817 regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; 4818 regs[BPF_REG_0].id = ++env->id_gen; 4819 } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) { 4820 mark_reg_known_zero(env, regs, BPF_REG_0); 4821 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL; 4822 regs[BPF_REG_0].id = ++env->id_gen; 4823 } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) { 4824 mark_reg_known_zero(env, regs, BPF_REG_0); 4825 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL; 4826 regs[BPF_REG_0].id = ++env->id_gen; 4827 } else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) { 4828 mark_reg_known_zero(env, regs, BPF_REG_0); 4829 regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL; 4830 regs[BPF_REG_0].id = ++env->id_gen; 4831 regs[BPF_REG_0].mem_size = meta.mem_size; 4832 } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) { 4833 int ret_btf_id; 4834 4835 mark_reg_known_zero(env, regs, BPF_REG_0); 4836 regs[BPF_REG_0].type = PTR_TO_BTF_ID_OR_NULL; 4837 ret_btf_id = *fn->ret_btf_id; 4838 if (ret_btf_id == 0) { 4839 verbose(env, "invalid return type %d of func %s#%d\n", 4840 fn->ret_type, func_id_name(func_id), func_id); 4841 return -EINVAL; 4842 } 4843 regs[BPF_REG_0].btf_id = ret_btf_id; 4844 } else { 4845 verbose(env, "unknown return type %d of func %s#%d\n", 4846 fn->ret_type, func_id_name(func_id), func_id); 4847 return -EINVAL; 4848 } 4849 4850 if (is_ptr_cast_function(func_id)) { 4851 /* For release_reference() */ 4852 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; 4853 } else if (is_acquire_function(func_id, meta.map_ptr)) { 4854 int id = acquire_reference_state(env, insn_idx); 4855 4856 if (id < 0) 4857 return id; 4858 /* For mark_ptr_or_null_reg() */ 4859 regs[BPF_REG_0].id = id; 4860 /* For release_reference() */ 4861 regs[BPF_REG_0].ref_obj_id = id; 4862 } 4863 4864 do_refine_retval_range(regs, fn->ret_type, func_id, &meta); 4865 4866 err = check_map_func_compatibility(env, meta.map_ptr, func_id); 4867 if (err) 4868 return err; 4869 4870 if ((func_id == BPF_FUNC_get_stack || 4871 func_id == BPF_FUNC_get_task_stack) && 4872 !env->prog->has_callchain_buf) { 4873 const char *err_str; 4874 4875 #ifdef CONFIG_PERF_EVENTS 4876 err = get_callchain_buffers(sysctl_perf_event_max_stack); 4877 err_str = "cannot get callchain buffer for func %s#%d\n"; 4878 #else 4879 err = -ENOTSUPP; 4880 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; 4881 #endif 4882 if (err) { 4883 verbose(env, err_str, func_id_name(func_id), func_id); 4884 return err; 4885 } 4886 4887 env->prog->has_callchain_buf = true; 4888 } 4889 4890 if (changes_data) 4891 clear_all_pkt_pointers(env); 4892 return 0; 4893 } 4894 4895 static bool signed_add_overflows(s64 a, s64 b) 4896 { 4897 /* Do the add in u64, where overflow is well-defined */ 4898 s64 res = (s64)((u64)a + (u64)b); 4899 4900 if (b < 0) 4901 return res > a; 4902 return res < a; 4903 } 4904 4905 static bool signed_add32_overflows(s64 a, s64 b) 4906 { 4907 /* Do the add in u32, where overflow is well-defined */ 4908 s32 res = (s32)((u32)a + (u32)b); 4909 4910 if (b < 0) 4911 return res > a; 4912 return res < a; 4913 } 4914 4915 static bool signed_sub_overflows(s32 a, s32 b) 4916 { 4917 /* Do the sub in u64, where overflow is well-defined */ 4918 s64 res = (s64)((u64)a - (u64)b); 4919 4920 if (b < 0) 4921 return res < a; 4922 return res > a; 4923 } 4924 4925 static bool signed_sub32_overflows(s32 a, s32 b) 4926 { 4927 /* Do the sub in u64, where overflow is well-defined */ 4928 s32 res = (s32)((u32)a - (u32)b); 4929 4930 if (b < 0) 4931 return res < a; 4932 return res > a; 4933 } 4934 4935 static bool check_reg_sane_offset(struct bpf_verifier_env *env, 4936 const struct bpf_reg_state *reg, 4937 enum bpf_reg_type type) 4938 { 4939 bool known = tnum_is_const(reg->var_off); 4940 s64 val = reg->var_off.value; 4941 s64 smin = reg->smin_value; 4942 4943 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { 4944 verbose(env, "math between %s pointer and %lld is not allowed\n", 4945 reg_type_str[type], val); 4946 return false; 4947 } 4948 4949 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { 4950 verbose(env, "%s pointer offset %d is not allowed\n", 4951 reg_type_str[type], reg->off); 4952 return false; 4953 } 4954 4955 if (smin == S64_MIN) { 4956 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", 4957 reg_type_str[type]); 4958 return false; 4959 } 4960 4961 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { 4962 verbose(env, "value %lld makes %s pointer be out of bounds\n", 4963 smin, reg_type_str[type]); 4964 return false; 4965 } 4966 4967 return true; 4968 } 4969 4970 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) 4971 { 4972 return &env->insn_aux_data[env->insn_idx]; 4973 } 4974 4975 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, 4976 u32 *ptr_limit, u8 opcode, bool off_is_neg) 4977 { 4978 bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || 4979 (opcode == BPF_SUB && !off_is_neg); 4980 u32 off; 4981 4982 switch (ptr_reg->type) { 4983 case PTR_TO_STACK: 4984 /* Indirect variable offset stack access is prohibited in 4985 * unprivileged mode so it's not handled here. 4986 */ 4987 off = ptr_reg->off + ptr_reg->var_off.value; 4988 if (mask_to_left) 4989 *ptr_limit = MAX_BPF_STACK + off; 4990 else 4991 *ptr_limit = -off; 4992 return 0; 4993 case PTR_TO_MAP_VALUE: 4994 if (mask_to_left) { 4995 *ptr_limit = ptr_reg->umax_value + ptr_reg->off; 4996 } else { 4997 off = ptr_reg->smin_value + ptr_reg->off; 4998 *ptr_limit = ptr_reg->map_ptr->value_size - off; 4999 } 5000 return 0; 5001 default: 5002 return -EINVAL; 5003 } 5004 } 5005 5006 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, 5007 const struct bpf_insn *insn) 5008 { 5009 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K; 5010 } 5011 5012 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, 5013 u32 alu_state, u32 alu_limit) 5014 { 5015 /* If we arrived here from different branches with different 5016 * state or limits to sanitize, then this won't work. 5017 */ 5018 if (aux->alu_state && 5019 (aux->alu_state != alu_state || 5020 aux->alu_limit != alu_limit)) 5021 return -EACCES; 5022 5023 /* Corresponding fixup done in fixup_bpf_calls(). */ 5024 aux->alu_state = alu_state; 5025 aux->alu_limit = alu_limit; 5026 return 0; 5027 } 5028 5029 static int sanitize_val_alu(struct bpf_verifier_env *env, 5030 struct bpf_insn *insn) 5031 { 5032 struct bpf_insn_aux_data *aux = cur_aux(env); 5033 5034 if (can_skip_alu_sanitation(env, insn)) 5035 return 0; 5036 5037 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); 5038 } 5039 5040 static int sanitize_ptr_alu(struct bpf_verifier_env *env, 5041 struct bpf_insn *insn, 5042 const struct bpf_reg_state *ptr_reg, 5043 struct bpf_reg_state *dst_reg, 5044 bool off_is_neg) 5045 { 5046 struct bpf_verifier_state *vstate = env->cur_state; 5047 struct bpf_insn_aux_data *aux = cur_aux(env); 5048 bool ptr_is_dst_reg = ptr_reg == dst_reg; 5049 u8 opcode = BPF_OP(insn->code); 5050 u32 alu_state, alu_limit; 5051 struct bpf_reg_state tmp; 5052 bool ret; 5053 5054 if (can_skip_alu_sanitation(env, insn)) 5055 return 0; 5056 5057 /* We already marked aux for masking from non-speculative 5058 * paths, thus we got here in the first place. We only care 5059 * to explore bad access from here. 5060 */ 5061 if (vstate->speculative) 5062 goto do_sim; 5063 5064 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; 5065 alu_state |= ptr_is_dst_reg ? 5066 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; 5067 5068 if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) 5069 return 0; 5070 if (update_alu_sanitation_state(aux, alu_state, alu_limit)) 5071 return -EACCES; 5072 do_sim: 5073 /* Simulate and find potential out-of-bounds access under 5074 * speculative execution from truncation as a result of 5075 * masking when off was not within expected range. If off 5076 * sits in dst, then we temporarily need to move ptr there 5077 * to simulate dst (== 0) +/-= ptr. Needed, for example, 5078 * for cases where we use K-based arithmetic in one direction 5079 * and truncated reg-based in the other in order to explore 5080 * bad access. 5081 */ 5082 if (!ptr_is_dst_reg) { 5083 tmp = *dst_reg; 5084 *dst_reg = *ptr_reg; 5085 } 5086 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); 5087 if (!ptr_is_dst_reg && ret) 5088 *dst_reg = tmp; 5089 return !ret ? -EFAULT : 0; 5090 } 5091 5092 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. 5093 * Caller should also handle BPF_MOV case separately. 5094 * If we return -EACCES, caller may want to try again treating pointer as a 5095 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. 5096 */ 5097 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, 5098 struct bpf_insn *insn, 5099 const struct bpf_reg_state *ptr_reg, 5100 const struct bpf_reg_state *off_reg) 5101 { 5102 struct bpf_verifier_state *vstate = env->cur_state; 5103 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 5104 struct bpf_reg_state *regs = state->regs, *dst_reg; 5105 bool known = tnum_is_const(off_reg->var_off); 5106 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, 5107 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; 5108 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, 5109 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; 5110 u32 dst = insn->dst_reg, src = insn->src_reg; 5111 u8 opcode = BPF_OP(insn->code); 5112 int ret; 5113 5114 dst_reg = ®s[dst]; 5115 5116 if ((known && (smin_val != smax_val || umin_val != umax_val)) || 5117 smin_val > smax_val || umin_val > umax_val) { 5118 /* Taint dst register if offset had invalid bounds derived from 5119 * e.g. dead branches. 5120 */ 5121 __mark_reg_unknown(env, dst_reg); 5122 return 0; 5123 } 5124 5125 if (BPF_CLASS(insn->code) != BPF_ALU64) { 5126 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ 5127 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 5128 __mark_reg_unknown(env, dst_reg); 5129 return 0; 5130 } 5131 5132 verbose(env, 5133 "R%d 32-bit pointer arithmetic prohibited\n", 5134 dst); 5135 return -EACCES; 5136 } 5137 5138 switch (ptr_reg->type) { 5139 case PTR_TO_MAP_VALUE_OR_NULL: 5140 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", 5141 dst, reg_type_str[ptr_reg->type]); 5142 return -EACCES; 5143 case CONST_PTR_TO_MAP: 5144 case PTR_TO_PACKET_END: 5145 case PTR_TO_SOCKET: 5146 case PTR_TO_SOCKET_OR_NULL: 5147 case PTR_TO_SOCK_COMMON: 5148 case PTR_TO_SOCK_COMMON_OR_NULL: 5149 case PTR_TO_TCP_SOCK: 5150 case PTR_TO_TCP_SOCK_OR_NULL: 5151 case PTR_TO_XDP_SOCK: 5152 verbose(env, "R%d pointer arithmetic on %s prohibited\n", 5153 dst, reg_type_str[ptr_reg->type]); 5154 return -EACCES; 5155 case PTR_TO_MAP_VALUE: 5156 if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) { 5157 verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n", 5158 off_reg == dst_reg ? dst : src); 5159 return -EACCES; 5160 } 5161 /* fall-through */ 5162 default: 5163 break; 5164 } 5165 5166 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. 5167 * The id may be overwritten later if we create a new variable offset. 5168 */ 5169 dst_reg->type = ptr_reg->type; 5170 dst_reg->id = ptr_reg->id; 5171 5172 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || 5173 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) 5174 return -EINVAL; 5175 5176 /* pointer types do not carry 32-bit bounds at the moment. */ 5177 __mark_reg32_unbounded(dst_reg); 5178 5179 switch (opcode) { 5180 case BPF_ADD: 5181 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); 5182 if (ret < 0) { 5183 verbose(env, "R%d tried to add from different maps or paths\n", dst); 5184 return ret; 5185 } 5186 /* We can take a fixed offset as long as it doesn't overflow 5187 * the s32 'off' field 5188 */ 5189 if (known && (ptr_reg->off + smin_val == 5190 (s64)(s32)(ptr_reg->off + smin_val))) { 5191 /* pointer += K. Accumulate it into fixed offset */ 5192 dst_reg->smin_value = smin_ptr; 5193 dst_reg->smax_value = smax_ptr; 5194 dst_reg->umin_value = umin_ptr; 5195 dst_reg->umax_value = umax_ptr; 5196 dst_reg->var_off = ptr_reg->var_off; 5197 dst_reg->off = ptr_reg->off + smin_val; 5198 dst_reg->raw = ptr_reg->raw; 5199 break; 5200 } 5201 /* A new variable offset is created. Note that off_reg->off 5202 * == 0, since it's a scalar. 5203 * dst_reg gets the pointer type and since some positive 5204 * integer value was added to the pointer, give it a new 'id' 5205 * if it's a PTR_TO_PACKET. 5206 * this creates a new 'base' pointer, off_reg (variable) gets 5207 * added into the variable offset, and we copy the fixed offset 5208 * from ptr_reg. 5209 */ 5210 if (signed_add_overflows(smin_ptr, smin_val) || 5211 signed_add_overflows(smax_ptr, smax_val)) { 5212 dst_reg->smin_value = S64_MIN; 5213 dst_reg->smax_value = S64_MAX; 5214 } else { 5215 dst_reg->smin_value = smin_ptr + smin_val; 5216 dst_reg->smax_value = smax_ptr + smax_val; 5217 } 5218 if (umin_ptr + umin_val < umin_ptr || 5219 umax_ptr + umax_val < umax_ptr) { 5220 dst_reg->umin_value = 0; 5221 dst_reg->umax_value = U64_MAX; 5222 } else { 5223 dst_reg->umin_value = umin_ptr + umin_val; 5224 dst_reg->umax_value = umax_ptr + umax_val; 5225 } 5226 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); 5227 dst_reg->off = ptr_reg->off; 5228 dst_reg->raw = ptr_reg->raw; 5229 if (reg_is_pkt_pointer(ptr_reg)) { 5230 dst_reg->id = ++env->id_gen; 5231 /* something was added to pkt_ptr, set range to zero */ 5232 dst_reg->raw = 0; 5233 } 5234 break; 5235 case BPF_SUB: 5236 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); 5237 if (ret < 0) { 5238 verbose(env, "R%d tried to sub from different maps or paths\n", dst); 5239 return ret; 5240 } 5241 if (dst_reg == off_reg) { 5242 /* scalar -= pointer. Creates an unknown scalar */ 5243 verbose(env, "R%d tried to subtract pointer from scalar\n", 5244 dst); 5245 return -EACCES; 5246 } 5247 /* We don't allow subtraction from FP, because (according to 5248 * test_verifier.c test "invalid fp arithmetic", JITs might not 5249 * be able to deal with it. 5250 */ 5251 if (ptr_reg->type == PTR_TO_STACK) { 5252 verbose(env, "R%d subtraction from stack pointer prohibited\n", 5253 dst); 5254 return -EACCES; 5255 } 5256 if (known && (ptr_reg->off - smin_val == 5257 (s64)(s32)(ptr_reg->off - smin_val))) { 5258 /* pointer -= K. Subtract it from fixed offset */ 5259 dst_reg->smin_value = smin_ptr; 5260 dst_reg->smax_value = smax_ptr; 5261 dst_reg->umin_value = umin_ptr; 5262 dst_reg->umax_value = umax_ptr; 5263 dst_reg->var_off = ptr_reg->var_off; 5264 dst_reg->id = ptr_reg->id; 5265 dst_reg->off = ptr_reg->off - smin_val; 5266 dst_reg->raw = ptr_reg->raw; 5267 break; 5268 } 5269 /* A new variable offset is created. If the subtrahend is known 5270 * nonnegative, then any reg->range we had before is still good. 5271 */ 5272 if (signed_sub_overflows(smin_ptr, smax_val) || 5273 signed_sub_overflows(smax_ptr, smin_val)) { 5274 /* Overflow possible, we know nothing */ 5275 dst_reg->smin_value = S64_MIN; 5276 dst_reg->smax_value = S64_MAX; 5277 } else { 5278 dst_reg->smin_value = smin_ptr - smax_val; 5279 dst_reg->smax_value = smax_ptr - smin_val; 5280 } 5281 if (umin_ptr < umax_val) { 5282 /* Overflow possible, we know nothing */ 5283 dst_reg->umin_value = 0; 5284 dst_reg->umax_value = U64_MAX; 5285 } else { 5286 /* Cannot overflow (as long as bounds are consistent) */ 5287 dst_reg->umin_value = umin_ptr - umax_val; 5288 dst_reg->umax_value = umax_ptr - umin_val; 5289 } 5290 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); 5291 dst_reg->off = ptr_reg->off; 5292 dst_reg->raw = ptr_reg->raw; 5293 if (reg_is_pkt_pointer(ptr_reg)) { 5294 dst_reg->id = ++env->id_gen; 5295 /* something was added to pkt_ptr, set range to zero */ 5296 if (smin_val < 0) 5297 dst_reg->raw = 0; 5298 } 5299 break; 5300 case BPF_AND: 5301 case BPF_OR: 5302 case BPF_XOR: 5303 /* bitwise ops on pointers are troublesome, prohibit. */ 5304 verbose(env, "R%d bitwise operator %s on pointer prohibited\n", 5305 dst, bpf_alu_string[opcode >> 4]); 5306 return -EACCES; 5307 default: 5308 /* other operators (e.g. MUL,LSH) produce non-pointer results */ 5309 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", 5310 dst, bpf_alu_string[opcode >> 4]); 5311 return -EACCES; 5312 } 5313 5314 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) 5315 return -EINVAL; 5316 5317 __update_reg_bounds(dst_reg); 5318 __reg_deduce_bounds(dst_reg); 5319 __reg_bound_offset(dst_reg); 5320 5321 /* For unprivileged we require that resulting offset must be in bounds 5322 * in order to be able to sanitize access later on. 5323 */ 5324 if (!env->bypass_spec_v1) { 5325 if (dst_reg->type == PTR_TO_MAP_VALUE && 5326 check_map_access(env, dst, dst_reg->off, 1, false)) { 5327 verbose(env, "R%d pointer arithmetic of map value goes out of range, " 5328 "prohibited for !root\n", dst); 5329 return -EACCES; 5330 } else if (dst_reg->type == PTR_TO_STACK && 5331 check_stack_access(env, dst_reg, dst_reg->off + 5332 dst_reg->var_off.value, 1)) { 5333 verbose(env, "R%d stack pointer arithmetic goes out of range, " 5334 "prohibited for !root\n", dst); 5335 return -EACCES; 5336 } 5337 } 5338 5339 return 0; 5340 } 5341 5342 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg, 5343 struct bpf_reg_state *src_reg) 5344 { 5345 s32 smin_val = src_reg->s32_min_value; 5346 s32 smax_val = src_reg->s32_max_value; 5347 u32 umin_val = src_reg->u32_min_value; 5348 u32 umax_val = src_reg->u32_max_value; 5349 5350 if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) || 5351 signed_add32_overflows(dst_reg->s32_max_value, smax_val)) { 5352 dst_reg->s32_min_value = S32_MIN; 5353 dst_reg->s32_max_value = S32_MAX; 5354 } else { 5355 dst_reg->s32_min_value += smin_val; 5356 dst_reg->s32_max_value += smax_val; 5357 } 5358 if (dst_reg->u32_min_value + umin_val < umin_val || 5359 dst_reg->u32_max_value + umax_val < umax_val) { 5360 dst_reg->u32_min_value = 0; 5361 dst_reg->u32_max_value = U32_MAX; 5362 } else { 5363 dst_reg->u32_min_value += umin_val; 5364 dst_reg->u32_max_value += umax_val; 5365 } 5366 } 5367 5368 static void scalar_min_max_add(struct bpf_reg_state *dst_reg, 5369 struct bpf_reg_state *src_reg) 5370 { 5371 s64 smin_val = src_reg->smin_value; 5372 s64 smax_val = src_reg->smax_value; 5373 u64 umin_val = src_reg->umin_value; 5374 u64 umax_val = src_reg->umax_value; 5375 5376 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 5377 signed_add_overflows(dst_reg->smax_value, smax_val)) { 5378 dst_reg->smin_value = S64_MIN; 5379 dst_reg->smax_value = S64_MAX; 5380 } else { 5381 dst_reg->smin_value += smin_val; 5382 dst_reg->smax_value += smax_val; 5383 } 5384 if (dst_reg->umin_value + umin_val < umin_val || 5385 dst_reg->umax_value + umax_val < umax_val) { 5386 dst_reg->umin_value = 0; 5387 dst_reg->umax_value = U64_MAX; 5388 } else { 5389 dst_reg->umin_value += umin_val; 5390 dst_reg->umax_value += umax_val; 5391 } 5392 } 5393 5394 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg, 5395 struct bpf_reg_state *src_reg) 5396 { 5397 s32 smin_val = src_reg->s32_min_value; 5398 s32 smax_val = src_reg->s32_max_value; 5399 u32 umin_val = src_reg->u32_min_value; 5400 u32 umax_val = src_reg->u32_max_value; 5401 5402 if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) || 5403 signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) { 5404 /* Overflow possible, we know nothing */ 5405 dst_reg->s32_min_value = S32_MIN; 5406 dst_reg->s32_max_value = S32_MAX; 5407 } else { 5408 dst_reg->s32_min_value -= smax_val; 5409 dst_reg->s32_max_value -= smin_val; 5410 } 5411 if (dst_reg->u32_min_value < umax_val) { 5412 /* Overflow possible, we know nothing */ 5413 dst_reg->u32_min_value = 0; 5414 dst_reg->u32_max_value = U32_MAX; 5415 } else { 5416 /* Cannot overflow (as long as bounds are consistent) */ 5417 dst_reg->u32_min_value -= umax_val; 5418 dst_reg->u32_max_value -= umin_val; 5419 } 5420 } 5421 5422 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg, 5423 struct bpf_reg_state *src_reg) 5424 { 5425 s64 smin_val = src_reg->smin_value; 5426 s64 smax_val = src_reg->smax_value; 5427 u64 umin_val = src_reg->umin_value; 5428 u64 umax_val = src_reg->umax_value; 5429 5430 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || 5431 signed_sub_overflows(dst_reg->smax_value, smin_val)) { 5432 /* Overflow possible, we know nothing */ 5433 dst_reg->smin_value = S64_MIN; 5434 dst_reg->smax_value = S64_MAX; 5435 } else { 5436 dst_reg->smin_value -= smax_val; 5437 dst_reg->smax_value -= smin_val; 5438 } 5439 if (dst_reg->umin_value < umax_val) { 5440 /* Overflow possible, we know nothing */ 5441 dst_reg->umin_value = 0; 5442 dst_reg->umax_value = U64_MAX; 5443 } else { 5444 /* Cannot overflow (as long as bounds are consistent) */ 5445 dst_reg->umin_value -= umax_val; 5446 dst_reg->umax_value -= umin_val; 5447 } 5448 } 5449 5450 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg, 5451 struct bpf_reg_state *src_reg) 5452 { 5453 s32 smin_val = src_reg->s32_min_value; 5454 u32 umin_val = src_reg->u32_min_value; 5455 u32 umax_val = src_reg->u32_max_value; 5456 5457 if (smin_val < 0 || dst_reg->s32_min_value < 0) { 5458 /* Ain't nobody got time to multiply that sign */ 5459 __mark_reg32_unbounded(dst_reg); 5460 return; 5461 } 5462 /* Both values are positive, so we can work with unsigned and 5463 * copy the result to signed (unless it exceeds S32_MAX). 5464 */ 5465 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) { 5466 /* Potential overflow, we know nothing */ 5467 __mark_reg32_unbounded(dst_reg); 5468 return; 5469 } 5470 dst_reg->u32_min_value *= umin_val; 5471 dst_reg->u32_max_value *= umax_val; 5472 if (dst_reg->u32_max_value > S32_MAX) { 5473 /* Overflow possible, we know nothing */ 5474 dst_reg->s32_min_value = S32_MIN; 5475 dst_reg->s32_max_value = S32_MAX; 5476 } else { 5477 dst_reg->s32_min_value = dst_reg->u32_min_value; 5478 dst_reg->s32_max_value = dst_reg->u32_max_value; 5479 } 5480 } 5481 5482 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg, 5483 struct bpf_reg_state *src_reg) 5484 { 5485 s64 smin_val = src_reg->smin_value; 5486 u64 umin_val = src_reg->umin_value; 5487 u64 umax_val = src_reg->umax_value; 5488 5489 if (smin_val < 0 || dst_reg->smin_value < 0) { 5490 /* Ain't nobody got time to multiply that sign */ 5491 __mark_reg64_unbounded(dst_reg); 5492 return; 5493 } 5494 /* Both values are positive, so we can work with unsigned and 5495 * copy the result to signed (unless it exceeds S64_MAX). 5496 */ 5497 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { 5498 /* Potential overflow, we know nothing */ 5499 __mark_reg64_unbounded(dst_reg); 5500 return; 5501 } 5502 dst_reg->umin_value *= umin_val; 5503 dst_reg->umax_value *= umax_val; 5504 if (dst_reg->umax_value > S64_MAX) { 5505 /* Overflow possible, we know nothing */ 5506 dst_reg->smin_value = S64_MIN; 5507 dst_reg->smax_value = S64_MAX; 5508 } else { 5509 dst_reg->smin_value = dst_reg->umin_value; 5510 dst_reg->smax_value = dst_reg->umax_value; 5511 } 5512 } 5513 5514 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg, 5515 struct bpf_reg_state *src_reg) 5516 { 5517 bool src_known = tnum_subreg_is_const(src_reg->var_off); 5518 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 5519 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 5520 s32 smin_val = src_reg->s32_min_value; 5521 u32 umax_val = src_reg->u32_max_value; 5522 5523 /* Assuming scalar64_min_max_and will be called so its safe 5524 * to skip updating register for known 32-bit case. 5525 */ 5526 if (src_known && dst_known) 5527 return; 5528 5529 /* We get our minimum from the var_off, since that's inherently 5530 * bitwise. Our maximum is the minimum of the operands' maxima. 5531 */ 5532 dst_reg->u32_min_value = var32_off.value; 5533 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val); 5534 if (dst_reg->s32_min_value < 0 || smin_val < 0) { 5535 /* Lose signed bounds when ANDing negative numbers, 5536 * ain't nobody got time for that. 5537 */ 5538 dst_reg->s32_min_value = S32_MIN; 5539 dst_reg->s32_max_value = S32_MAX; 5540 } else { 5541 /* ANDing two positives gives a positive, so safe to 5542 * cast result into s64. 5543 */ 5544 dst_reg->s32_min_value = dst_reg->u32_min_value; 5545 dst_reg->s32_max_value = dst_reg->u32_max_value; 5546 } 5547 5548 } 5549 5550 static void scalar_min_max_and(struct bpf_reg_state *dst_reg, 5551 struct bpf_reg_state *src_reg) 5552 { 5553 bool src_known = tnum_is_const(src_reg->var_off); 5554 bool dst_known = tnum_is_const(dst_reg->var_off); 5555 s64 smin_val = src_reg->smin_value; 5556 u64 umax_val = src_reg->umax_value; 5557 5558 if (src_known && dst_known) { 5559 __mark_reg_known(dst_reg, dst_reg->var_off.value & 5560 src_reg->var_off.value); 5561 return; 5562 } 5563 5564 /* We get our minimum from the var_off, since that's inherently 5565 * bitwise. Our maximum is the minimum of the operands' maxima. 5566 */ 5567 dst_reg->umin_value = dst_reg->var_off.value; 5568 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); 5569 if (dst_reg->smin_value < 0 || smin_val < 0) { 5570 /* Lose signed bounds when ANDing negative numbers, 5571 * ain't nobody got time for that. 5572 */ 5573 dst_reg->smin_value = S64_MIN; 5574 dst_reg->smax_value = S64_MAX; 5575 } else { 5576 /* ANDing two positives gives a positive, so safe to 5577 * cast result into s64. 5578 */ 5579 dst_reg->smin_value = dst_reg->umin_value; 5580 dst_reg->smax_value = dst_reg->umax_value; 5581 } 5582 /* We may learn something more from the var_off */ 5583 __update_reg_bounds(dst_reg); 5584 } 5585 5586 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg, 5587 struct bpf_reg_state *src_reg) 5588 { 5589 bool src_known = tnum_subreg_is_const(src_reg->var_off); 5590 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 5591 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 5592 s32 smin_val = src_reg->smin_value; 5593 u32 umin_val = src_reg->umin_value; 5594 5595 /* Assuming scalar64_min_max_or will be called so it is safe 5596 * to skip updating register for known case. 5597 */ 5598 if (src_known && dst_known) 5599 return; 5600 5601 /* We get our maximum from the var_off, and our minimum is the 5602 * maximum of the operands' minima 5603 */ 5604 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val); 5605 dst_reg->u32_max_value = var32_off.value | var32_off.mask; 5606 if (dst_reg->s32_min_value < 0 || smin_val < 0) { 5607 /* Lose signed bounds when ORing negative numbers, 5608 * ain't nobody got time for that. 5609 */ 5610 dst_reg->s32_min_value = S32_MIN; 5611 dst_reg->s32_max_value = S32_MAX; 5612 } else { 5613 /* ORing two positives gives a positive, so safe to 5614 * cast result into s64. 5615 */ 5616 dst_reg->s32_min_value = dst_reg->umin_value; 5617 dst_reg->s32_max_value = dst_reg->umax_value; 5618 } 5619 } 5620 5621 static void scalar_min_max_or(struct bpf_reg_state *dst_reg, 5622 struct bpf_reg_state *src_reg) 5623 { 5624 bool src_known = tnum_is_const(src_reg->var_off); 5625 bool dst_known = tnum_is_const(dst_reg->var_off); 5626 s64 smin_val = src_reg->smin_value; 5627 u64 umin_val = src_reg->umin_value; 5628 5629 if (src_known && dst_known) { 5630 __mark_reg_known(dst_reg, dst_reg->var_off.value | 5631 src_reg->var_off.value); 5632 return; 5633 } 5634 5635 /* We get our maximum from the var_off, and our minimum is the 5636 * maximum of the operands' minima 5637 */ 5638 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); 5639 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; 5640 if (dst_reg->smin_value < 0 || smin_val < 0) { 5641 /* Lose signed bounds when ORing negative numbers, 5642 * ain't nobody got time for that. 5643 */ 5644 dst_reg->smin_value = S64_MIN; 5645 dst_reg->smax_value = S64_MAX; 5646 } else { 5647 /* ORing two positives gives a positive, so safe to 5648 * cast result into s64. 5649 */ 5650 dst_reg->smin_value = dst_reg->umin_value; 5651 dst_reg->smax_value = dst_reg->umax_value; 5652 } 5653 /* We may learn something more from the var_off */ 5654 __update_reg_bounds(dst_reg); 5655 } 5656 5657 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, 5658 u64 umin_val, u64 umax_val) 5659 { 5660 /* We lose all sign bit information (except what we can pick 5661 * up from var_off) 5662 */ 5663 dst_reg->s32_min_value = S32_MIN; 5664 dst_reg->s32_max_value = S32_MAX; 5665 /* If we might shift our top bit out, then we know nothing */ 5666 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) { 5667 dst_reg->u32_min_value = 0; 5668 dst_reg->u32_max_value = U32_MAX; 5669 } else { 5670 dst_reg->u32_min_value <<= umin_val; 5671 dst_reg->u32_max_value <<= umax_val; 5672 } 5673 } 5674 5675 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, 5676 struct bpf_reg_state *src_reg) 5677 { 5678 u32 umax_val = src_reg->u32_max_value; 5679 u32 umin_val = src_reg->u32_min_value; 5680 /* u32 alu operation will zext upper bits */ 5681 struct tnum subreg = tnum_subreg(dst_reg->var_off); 5682 5683 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); 5684 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val)); 5685 /* Not required but being careful mark reg64 bounds as unknown so 5686 * that we are forced to pick them up from tnum and zext later and 5687 * if some path skips this step we are still safe. 5688 */ 5689 __mark_reg64_unbounded(dst_reg); 5690 __update_reg32_bounds(dst_reg); 5691 } 5692 5693 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg, 5694 u64 umin_val, u64 umax_val) 5695 { 5696 /* Special case <<32 because it is a common compiler pattern to sign 5697 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are 5698 * positive we know this shift will also be positive so we can track 5699 * bounds correctly. Otherwise we lose all sign bit information except 5700 * what we can pick up from var_off. Perhaps we can generalize this 5701 * later to shifts of any length. 5702 */ 5703 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0) 5704 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32; 5705 else 5706 dst_reg->smax_value = S64_MAX; 5707 5708 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0) 5709 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32; 5710 else 5711 dst_reg->smin_value = S64_MIN; 5712 5713 /* If we might shift our top bit out, then we know nothing */ 5714 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { 5715 dst_reg->umin_value = 0; 5716 dst_reg->umax_value = U64_MAX; 5717 } else { 5718 dst_reg->umin_value <<= umin_val; 5719 dst_reg->umax_value <<= umax_val; 5720 } 5721 } 5722 5723 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg, 5724 struct bpf_reg_state *src_reg) 5725 { 5726 u64 umax_val = src_reg->umax_value; 5727 u64 umin_val = src_reg->umin_value; 5728 5729 /* scalar64 calc uses 32bit unshifted bounds so must be called first */ 5730 __scalar64_min_max_lsh(dst_reg, umin_val, umax_val); 5731 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); 5732 5733 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); 5734 /* We may learn something more from the var_off */ 5735 __update_reg_bounds(dst_reg); 5736 } 5737 5738 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg, 5739 struct bpf_reg_state *src_reg) 5740 { 5741 struct tnum subreg = tnum_subreg(dst_reg->var_off); 5742 u32 umax_val = src_reg->u32_max_value; 5743 u32 umin_val = src_reg->u32_min_value; 5744 5745 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 5746 * be negative, then either: 5747 * 1) src_reg might be zero, so the sign bit of the result is 5748 * unknown, so we lose our signed bounds 5749 * 2) it's known negative, thus the unsigned bounds capture the 5750 * signed bounds 5751 * 3) the signed bounds cross zero, so they tell us nothing 5752 * about the result 5753 * If the value in dst_reg is known nonnegative, then again the 5754 * unsigned bounts capture the signed bounds. 5755 * Thus, in all cases it suffices to blow away our signed bounds 5756 * and rely on inferring new ones from the unsigned bounds and 5757 * var_off of the result. 5758 */ 5759 dst_reg->s32_min_value = S32_MIN; 5760 dst_reg->s32_max_value = S32_MAX; 5761 5762 dst_reg->var_off = tnum_rshift(subreg, umin_val); 5763 dst_reg->u32_min_value >>= umax_val; 5764 dst_reg->u32_max_value >>= umin_val; 5765 5766 __mark_reg64_unbounded(dst_reg); 5767 __update_reg32_bounds(dst_reg); 5768 } 5769 5770 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg, 5771 struct bpf_reg_state *src_reg) 5772 { 5773 u64 umax_val = src_reg->umax_value; 5774 u64 umin_val = src_reg->umin_value; 5775 5776 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 5777 * be negative, then either: 5778 * 1) src_reg might be zero, so the sign bit of the result is 5779 * unknown, so we lose our signed bounds 5780 * 2) it's known negative, thus the unsigned bounds capture the 5781 * signed bounds 5782 * 3) the signed bounds cross zero, so they tell us nothing 5783 * about the result 5784 * If the value in dst_reg is known nonnegative, then again the 5785 * unsigned bounts capture the signed bounds. 5786 * Thus, in all cases it suffices to blow away our signed bounds 5787 * and rely on inferring new ones from the unsigned bounds and 5788 * var_off of the result. 5789 */ 5790 dst_reg->smin_value = S64_MIN; 5791 dst_reg->smax_value = S64_MAX; 5792 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); 5793 dst_reg->umin_value >>= umax_val; 5794 dst_reg->umax_value >>= umin_val; 5795 5796 /* Its not easy to operate on alu32 bounds here because it depends 5797 * on bits being shifted in. Take easy way out and mark unbounded 5798 * so we can recalculate later from tnum. 5799 */ 5800 __mark_reg32_unbounded(dst_reg); 5801 __update_reg_bounds(dst_reg); 5802 } 5803 5804 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg, 5805 struct bpf_reg_state *src_reg) 5806 { 5807 u64 umin_val = src_reg->u32_min_value; 5808 5809 /* Upon reaching here, src_known is true and 5810 * umax_val is equal to umin_val. 5811 */ 5812 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val); 5813 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val); 5814 5815 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32); 5816 5817 /* blow away the dst_reg umin_value/umax_value and rely on 5818 * dst_reg var_off to refine the result. 5819 */ 5820 dst_reg->u32_min_value = 0; 5821 dst_reg->u32_max_value = U32_MAX; 5822 5823 __mark_reg64_unbounded(dst_reg); 5824 __update_reg32_bounds(dst_reg); 5825 } 5826 5827 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg, 5828 struct bpf_reg_state *src_reg) 5829 { 5830 u64 umin_val = src_reg->umin_value; 5831 5832 /* Upon reaching here, src_known is true and umax_val is equal 5833 * to umin_val. 5834 */ 5835 dst_reg->smin_value >>= umin_val; 5836 dst_reg->smax_value >>= umin_val; 5837 5838 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64); 5839 5840 /* blow away the dst_reg umin_value/umax_value and rely on 5841 * dst_reg var_off to refine the result. 5842 */ 5843 dst_reg->umin_value = 0; 5844 dst_reg->umax_value = U64_MAX; 5845 5846 /* Its not easy to operate on alu32 bounds here because it depends 5847 * on bits being shifted in from upper 32-bits. Take easy way out 5848 * and mark unbounded so we can recalculate later from tnum. 5849 */ 5850 __mark_reg32_unbounded(dst_reg); 5851 __update_reg_bounds(dst_reg); 5852 } 5853 5854 /* WARNING: This function does calculations on 64-bit values, but the actual 5855 * execution may occur on 32-bit values. Therefore, things like bitshifts 5856 * need extra checks in the 32-bit case. 5857 */ 5858 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 5859 struct bpf_insn *insn, 5860 struct bpf_reg_state *dst_reg, 5861 struct bpf_reg_state src_reg) 5862 { 5863 struct bpf_reg_state *regs = cur_regs(env); 5864 u8 opcode = BPF_OP(insn->code); 5865 bool src_known; 5866 s64 smin_val, smax_val; 5867 u64 umin_val, umax_val; 5868 s32 s32_min_val, s32_max_val; 5869 u32 u32_min_val, u32_max_val; 5870 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 5871 u32 dst = insn->dst_reg; 5872 int ret; 5873 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); 5874 5875 smin_val = src_reg.smin_value; 5876 smax_val = src_reg.smax_value; 5877 umin_val = src_reg.umin_value; 5878 umax_val = src_reg.umax_value; 5879 5880 s32_min_val = src_reg.s32_min_value; 5881 s32_max_val = src_reg.s32_max_value; 5882 u32_min_val = src_reg.u32_min_value; 5883 u32_max_val = src_reg.u32_max_value; 5884 5885 if (alu32) { 5886 src_known = tnum_subreg_is_const(src_reg.var_off); 5887 if ((src_known && 5888 (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) || 5889 s32_min_val > s32_max_val || u32_min_val > u32_max_val) { 5890 /* Taint dst register if offset had invalid bounds 5891 * derived from e.g. dead branches. 5892 */ 5893 __mark_reg_unknown(env, dst_reg); 5894 return 0; 5895 } 5896 } else { 5897 src_known = tnum_is_const(src_reg.var_off); 5898 if ((src_known && 5899 (smin_val != smax_val || umin_val != umax_val)) || 5900 smin_val > smax_val || umin_val > umax_val) { 5901 /* Taint dst register if offset had invalid bounds 5902 * derived from e.g. dead branches. 5903 */ 5904 __mark_reg_unknown(env, dst_reg); 5905 return 0; 5906 } 5907 } 5908 5909 if (!src_known && 5910 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { 5911 __mark_reg_unknown(env, dst_reg); 5912 return 0; 5913 } 5914 5915 /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops. 5916 * There are two classes of instructions: The first class we track both 5917 * alu32 and alu64 sign/unsigned bounds independently this provides the 5918 * greatest amount of precision when alu operations are mixed with jmp32 5919 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD, 5920 * and BPF_OR. This is possible because these ops have fairly easy to 5921 * understand and calculate behavior in both 32-bit and 64-bit alu ops. 5922 * See alu32 verifier tests for examples. The second class of 5923 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy 5924 * with regards to tracking sign/unsigned bounds because the bits may 5925 * cross subreg boundaries in the alu64 case. When this happens we mark 5926 * the reg unbounded in the subreg bound space and use the resulting 5927 * tnum to calculate an approximation of the sign/unsigned bounds. 5928 */ 5929 switch (opcode) { 5930 case BPF_ADD: 5931 ret = sanitize_val_alu(env, insn); 5932 if (ret < 0) { 5933 verbose(env, "R%d tried to add from different pointers or scalars\n", dst); 5934 return ret; 5935 } 5936 scalar32_min_max_add(dst_reg, &src_reg); 5937 scalar_min_max_add(dst_reg, &src_reg); 5938 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); 5939 break; 5940 case BPF_SUB: 5941 ret = sanitize_val_alu(env, insn); 5942 if (ret < 0) { 5943 verbose(env, "R%d tried to sub from different pointers or scalars\n", dst); 5944 return ret; 5945 } 5946 scalar32_min_max_sub(dst_reg, &src_reg); 5947 scalar_min_max_sub(dst_reg, &src_reg); 5948 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); 5949 break; 5950 case BPF_MUL: 5951 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); 5952 scalar32_min_max_mul(dst_reg, &src_reg); 5953 scalar_min_max_mul(dst_reg, &src_reg); 5954 break; 5955 case BPF_AND: 5956 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); 5957 scalar32_min_max_and(dst_reg, &src_reg); 5958 scalar_min_max_and(dst_reg, &src_reg); 5959 break; 5960 case BPF_OR: 5961 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); 5962 scalar32_min_max_or(dst_reg, &src_reg); 5963 scalar_min_max_or(dst_reg, &src_reg); 5964 break; 5965 case BPF_LSH: 5966 if (umax_val >= insn_bitness) { 5967 /* Shifts greater than 31 or 63 are undefined. 5968 * This includes shifts by a negative number. 5969 */ 5970 mark_reg_unknown(env, regs, insn->dst_reg); 5971 break; 5972 } 5973 if (alu32) 5974 scalar32_min_max_lsh(dst_reg, &src_reg); 5975 else 5976 scalar_min_max_lsh(dst_reg, &src_reg); 5977 break; 5978 case BPF_RSH: 5979 if (umax_val >= insn_bitness) { 5980 /* Shifts greater than 31 or 63 are undefined. 5981 * This includes shifts by a negative number. 5982 */ 5983 mark_reg_unknown(env, regs, insn->dst_reg); 5984 break; 5985 } 5986 if (alu32) 5987 scalar32_min_max_rsh(dst_reg, &src_reg); 5988 else 5989 scalar_min_max_rsh(dst_reg, &src_reg); 5990 break; 5991 case BPF_ARSH: 5992 if (umax_val >= insn_bitness) { 5993 /* Shifts greater than 31 or 63 are undefined. 5994 * This includes shifts by a negative number. 5995 */ 5996 mark_reg_unknown(env, regs, insn->dst_reg); 5997 break; 5998 } 5999 if (alu32) 6000 scalar32_min_max_arsh(dst_reg, &src_reg); 6001 else 6002 scalar_min_max_arsh(dst_reg, &src_reg); 6003 break; 6004 default: 6005 mark_reg_unknown(env, regs, insn->dst_reg); 6006 break; 6007 } 6008 6009 /* ALU32 ops are zero extended into 64bit register */ 6010 if (alu32) 6011 zext_32_to_64(dst_reg); 6012 6013 __update_reg_bounds(dst_reg); 6014 __reg_deduce_bounds(dst_reg); 6015 __reg_bound_offset(dst_reg); 6016 return 0; 6017 } 6018 6019 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max 6020 * and var_off. 6021 */ 6022 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, 6023 struct bpf_insn *insn) 6024 { 6025 struct bpf_verifier_state *vstate = env->cur_state; 6026 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 6027 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; 6028 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; 6029 u8 opcode = BPF_OP(insn->code); 6030 int err; 6031 6032 dst_reg = ®s[insn->dst_reg]; 6033 src_reg = NULL; 6034 if (dst_reg->type != SCALAR_VALUE) 6035 ptr_reg = dst_reg; 6036 if (BPF_SRC(insn->code) == BPF_X) { 6037 src_reg = ®s[insn->src_reg]; 6038 if (src_reg->type != SCALAR_VALUE) { 6039 if (dst_reg->type != SCALAR_VALUE) { 6040 /* Combining two pointers by any ALU op yields 6041 * an arbitrary scalar. Disallow all math except 6042 * pointer subtraction 6043 */ 6044 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 6045 mark_reg_unknown(env, regs, insn->dst_reg); 6046 return 0; 6047 } 6048 verbose(env, "R%d pointer %s pointer prohibited\n", 6049 insn->dst_reg, 6050 bpf_alu_string[opcode >> 4]); 6051 return -EACCES; 6052 } else { 6053 /* scalar += pointer 6054 * This is legal, but we have to reverse our 6055 * src/dest handling in computing the range 6056 */ 6057 err = mark_chain_precision(env, insn->dst_reg); 6058 if (err) 6059 return err; 6060 return adjust_ptr_min_max_vals(env, insn, 6061 src_reg, dst_reg); 6062 } 6063 } else if (ptr_reg) { 6064 /* pointer += scalar */ 6065 err = mark_chain_precision(env, insn->src_reg); 6066 if (err) 6067 return err; 6068 return adjust_ptr_min_max_vals(env, insn, 6069 dst_reg, src_reg); 6070 } 6071 } else { 6072 /* Pretend the src is a reg with a known value, since we only 6073 * need to be able to read from this state. 6074 */ 6075 off_reg.type = SCALAR_VALUE; 6076 __mark_reg_known(&off_reg, insn->imm); 6077 src_reg = &off_reg; 6078 if (ptr_reg) /* pointer += K */ 6079 return adjust_ptr_min_max_vals(env, insn, 6080 ptr_reg, src_reg); 6081 } 6082 6083 /* Got here implies adding two SCALAR_VALUEs */ 6084 if (WARN_ON_ONCE(ptr_reg)) { 6085 print_verifier_state(env, state); 6086 verbose(env, "verifier internal error: unexpected ptr_reg\n"); 6087 return -EINVAL; 6088 } 6089 if (WARN_ON(!src_reg)) { 6090 print_verifier_state(env, state); 6091 verbose(env, "verifier internal error: no src_reg\n"); 6092 return -EINVAL; 6093 } 6094 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); 6095 } 6096 6097 /* check validity of 32-bit and 64-bit arithmetic operations */ 6098 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) 6099 { 6100 struct bpf_reg_state *regs = cur_regs(env); 6101 u8 opcode = BPF_OP(insn->code); 6102 int err; 6103 6104 if (opcode == BPF_END || opcode == BPF_NEG) { 6105 if (opcode == BPF_NEG) { 6106 if (BPF_SRC(insn->code) != 0 || 6107 insn->src_reg != BPF_REG_0 || 6108 insn->off != 0 || insn->imm != 0) { 6109 verbose(env, "BPF_NEG uses reserved fields\n"); 6110 return -EINVAL; 6111 } 6112 } else { 6113 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 6114 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || 6115 BPF_CLASS(insn->code) == BPF_ALU64) { 6116 verbose(env, "BPF_END uses reserved fields\n"); 6117 return -EINVAL; 6118 } 6119 } 6120 6121 /* check src operand */ 6122 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 6123 if (err) 6124 return err; 6125 6126 if (is_pointer_value(env, insn->dst_reg)) { 6127 verbose(env, "R%d pointer arithmetic prohibited\n", 6128 insn->dst_reg); 6129 return -EACCES; 6130 } 6131 6132 /* check dest operand */ 6133 err = check_reg_arg(env, insn->dst_reg, DST_OP); 6134 if (err) 6135 return err; 6136 6137 } else if (opcode == BPF_MOV) { 6138 6139 if (BPF_SRC(insn->code) == BPF_X) { 6140 if (insn->imm != 0 || insn->off != 0) { 6141 verbose(env, "BPF_MOV uses reserved fields\n"); 6142 return -EINVAL; 6143 } 6144 6145 /* check src operand */ 6146 err = check_reg_arg(env, insn->src_reg, SRC_OP); 6147 if (err) 6148 return err; 6149 } else { 6150 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 6151 verbose(env, "BPF_MOV uses reserved fields\n"); 6152 return -EINVAL; 6153 } 6154 } 6155 6156 /* check dest operand, mark as required later */ 6157 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 6158 if (err) 6159 return err; 6160 6161 if (BPF_SRC(insn->code) == BPF_X) { 6162 struct bpf_reg_state *src_reg = regs + insn->src_reg; 6163 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; 6164 6165 if (BPF_CLASS(insn->code) == BPF_ALU64) { 6166 /* case: R1 = R2 6167 * copy register state to dest reg 6168 */ 6169 *dst_reg = *src_reg; 6170 dst_reg->live |= REG_LIVE_WRITTEN; 6171 dst_reg->subreg_def = DEF_NOT_SUBREG; 6172 } else { 6173 /* R1 = (u32) R2 */ 6174 if (is_pointer_value(env, insn->src_reg)) { 6175 verbose(env, 6176 "R%d partial copy of pointer\n", 6177 insn->src_reg); 6178 return -EACCES; 6179 } else if (src_reg->type == SCALAR_VALUE) { 6180 *dst_reg = *src_reg; 6181 dst_reg->live |= REG_LIVE_WRITTEN; 6182 dst_reg->subreg_def = env->insn_idx + 1; 6183 } else { 6184 mark_reg_unknown(env, regs, 6185 insn->dst_reg); 6186 } 6187 zext_32_to_64(dst_reg); 6188 } 6189 } else { 6190 /* case: R = imm 6191 * remember the value we stored into this reg 6192 */ 6193 /* clear any state __mark_reg_known doesn't set */ 6194 mark_reg_unknown(env, regs, insn->dst_reg); 6195 regs[insn->dst_reg].type = SCALAR_VALUE; 6196 if (BPF_CLASS(insn->code) == BPF_ALU64) { 6197 __mark_reg_known(regs + insn->dst_reg, 6198 insn->imm); 6199 } else { 6200 __mark_reg_known(regs + insn->dst_reg, 6201 (u32)insn->imm); 6202 } 6203 } 6204 6205 } else if (opcode > BPF_END) { 6206 verbose(env, "invalid BPF_ALU opcode %x\n", opcode); 6207 return -EINVAL; 6208 6209 } else { /* all other ALU ops: and, sub, xor, add, ... */ 6210 6211 if (BPF_SRC(insn->code) == BPF_X) { 6212 if (insn->imm != 0 || insn->off != 0) { 6213 verbose(env, "BPF_ALU uses reserved fields\n"); 6214 return -EINVAL; 6215 } 6216 /* check src1 operand */ 6217 err = check_reg_arg(env, insn->src_reg, SRC_OP); 6218 if (err) 6219 return err; 6220 } else { 6221 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 6222 verbose(env, "BPF_ALU uses reserved fields\n"); 6223 return -EINVAL; 6224 } 6225 } 6226 6227 /* check src2 operand */ 6228 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 6229 if (err) 6230 return err; 6231 6232 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 6233 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 6234 verbose(env, "div by zero\n"); 6235 return -EINVAL; 6236 } 6237 6238 if ((opcode == BPF_LSH || opcode == BPF_RSH || 6239 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 6240 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 6241 6242 if (insn->imm < 0 || insn->imm >= size) { 6243 verbose(env, "invalid shift %d\n", insn->imm); 6244 return -EINVAL; 6245 } 6246 } 6247 6248 /* check dest operand */ 6249 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 6250 if (err) 6251 return err; 6252 6253 return adjust_reg_min_max_vals(env, insn); 6254 } 6255 6256 return 0; 6257 } 6258 6259 static void __find_good_pkt_pointers(struct bpf_func_state *state, 6260 struct bpf_reg_state *dst_reg, 6261 enum bpf_reg_type type, u16 new_range) 6262 { 6263 struct bpf_reg_state *reg; 6264 int i; 6265 6266 for (i = 0; i < MAX_BPF_REG; i++) { 6267 reg = &state->regs[i]; 6268 if (reg->type == type && reg->id == dst_reg->id) 6269 /* keep the maximum range already checked */ 6270 reg->range = max(reg->range, new_range); 6271 } 6272 6273 bpf_for_each_spilled_reg(i, state, reg) { 6274 if (!reg) 6275 continue; 6276 if (reg->type == type && reg->id == dst_reg->id) 6277 reg->range = max(reg->range, new_range); 6278 } 6279 } 6280 6281 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, 6282 struct bpf_reg_state *dst_reg, 6283 enum bpf_reg_type type, 6284 bool range_right_open) 6285 { 6286 u16 new_range; 6287 int i; 6288 6289 if (dst_reg->off < 0 || 6290 (dst_reg->off == 0 && range_right_open)) 6291 /* This doesn't give us any range */ 6292 return; 6293 6294 if (dst_reg->umax_value > MAX_PACKET_OFF || 6295 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) 6296 /* Risk of overflow. For instance, ptr + (1<<63) may be less 6297 * than pkt_end, but that's because it's also less than pkt. 6298 */ 6299 return; 6300 6301 new_range = dst_reg->off; 6302 if (range_right_open) 6303 new_range--; 6304 6305 /* Examples for register markings: 6306 * 6307 * pkt_data in dst register: 6308 * 6309 * r2 = r3; 6310 * r2 += 8; 6311 * if (r2 > pkt_end) goto <handle exception> 6312 * <access okay> 6313 * 6314 * r2 = r3; 6315 * r2 += 8; 6316 * if (r2 < pkt_end) goto <access okay> 6317 * <handle exception> 6318 * 6319 * Where: 6320 * r2 == dst_reg, pkt_end == src_reg 6321 * r2=pkt(id=n,off=8,r=0) 6322 * r3=pkt(id=n,off=0,r=0) 6323 * 6324 * pkt_data in src register: 6325 * 6326 * r2 = r3; 6327 * r2 += 8; 6328 * if (pkt_end >= r2) goto <access okay> 6329 * <handle exception> 6330 * 6331 * r2 = r3; 6332 * r2 += 8; 6333 * if (pkt_end <= r2) goto <handle exception> 6334 * <access okay> 6335 * 6336 * Where: 6337 * pkt_end == dst_reg, r2 == src_reg 6338 * r2=pkt(id=n,off=8,r=0) 6339 * r3=pkt(id=n,off=0,r=0) 6340 * 6341 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 6342 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) 6343 * and [r3, r3 + 8-1) respectively is safe to access depending on 6344 * the check. 6345 */ 6346 6347 /* If our ids match, then we must have the same max_value. And we 6348 * don't care about the other reg's fixed offset, since if it's too big 6349 * the range won't allow anything. 6350 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. 6351 */ 6352 for (i = 0; i <= vstate->curframe; i++) 6353 __find_good_pkt_pointers(vstate->frame[i], dst_reg, type, 6354 new_range); 6355 } 6356 6357 static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode) 6358 { 6359 struct tnum subreg = tnum_subreg(reg->var_off); 6360 s32 sval = (s32)val; 6361 6362 switch (opcode) { 6363 case BPF_JEQ: 6364 if (tnum_is_const(subreg)) 6365 return !!tnum_equals_const(subreg, val); 6366 break; 6367 case BPF_JNE: 6368 if (tnum_is_const(subreg)) 6369 return !tnum_equals_const(subreg, val); 6370 break; 6371 case BPF_JSET: 6372 if ((~subreg.mask & subreg.value) & val) 6373 return 1; 6374 if (!((subreg.mask | subreg.value) & val)) 6375 return 0; 6376 break; 6377 case BPF_JGT: 6378 if (reg->u32_min_value > val) 6379 return 1; 6380 else if (reg->u32_max_value <= val) 6381 return 0; 6382 break; 6383 case BPF_JSGT: 6384 if (reg->s32_min_value > sval) 6385 return 1; 6386 else if (reg->s32_max_value < sval) 6387 return 0; 6388 break; 6389 case BPF_JLT: 6390 if (reg->u32_max_value < val) 6391 return 1; 6392 else if (reg->u32_min_value >= val) 6393 return 0; 6394 break; 6395 case BPF_JSLT: 6396 if (reg->s32_max_value < sval) 6397 return 1; 6398 else if (reg->s32_min_value >= sval) 6399 return 0; 6400 break; 6401 case BPF_JGE: 6402 if (reg->u32_min_value >= val) 6403 return 1; 6404 else if (reg->u32_max_value < val) 6405 return 0; 6406 break; 6407 case BPF_JSGE: 6408 if (reg->s32_min_value >= sval) 6409 return 1; 6410 else if (reg->s32_max_value < sval) 6411 return 0; 6412 break; 6413 case BPF_JLE: 6414 if (reg->u32_max_value <= val) 6415 return 1; 6416 else if (reg->u32_min_value > val) 6417 return 0; 6418 break; 6419 case BPF_JSLE: 6420 if (reg->s32_max_value <= sval) 6421 return 1; 6422 else if (reg->s32_min_value > sval) 6423 return 0; 6424 break; 6425 } 6426 6427 return -1; 6428 } 6429 6430 6431 static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) 6432 { 6433 s64 sval = (s64)val; 6434 6435 switch (opcode) { 6436 case BPF_JEQ: 6437 if (tnum_is_const(reg->var_off)) 6438 return !!tnum_equals_const(reg->var_off, val); 6439 break; 6440 case BPF_JNE: 6441 if (tnum_is_const(reg->var_off)) 6442 return !tnum_equals_const(reg->var_off, val); 6443 break; 6444 case BPF_JSET: 6445 if ((~reg->var_off.mask & reg->var_off.value) & val) 6446 return 1; 6447 if (!((reg->var_off.mask | reg->var_off.value) & val)) 6448 return 0; 6449 break; 6450 case BPF_JGT: 6451 if (reg->umin_value > val) 6452 return 1; 6453 else if (reg->umax_value <= val) 6454 return 0; 6455 break; 6456 case BPF_JSGT: 6457 if (reg->smin_value > sval) 6458 return 1; 6459 else if (reg->smax_value < sval) 6460 return 0; 6461 break; 6462 case BPF_JLT: 6463 if (reg->umax_value < val) 6464 return 1; 6465 else if (reg->umin_value >= val) 6466 return 0; 6467 break; 6468 case BPF_JSLT: 6469 if (reg->smax_value < sval) 6470 return 1; 6471 else if (reg->smin_value >= sval) 6472 return 0; 6473 break; 6474 case BPF_JGE: 6475 if (reg->umin_value >= val) 6476 return 1; 6477 else if (reg->umax_value < val) 6478 return 0; 6479 break; 6480 case BPF_JSGE: 6481 if (reg->smin_value >= sval) 6482 return 1; 6483 else if (reg->smax_value < sval) 6484 return 0; 6485 break; 6486 case BPF_JLE: 6487 if (reg->umax_value <= val) 6488 return 1; 6489 else if (reg->umin_value > val) 6490 return 0; 6491 break; 6492 case BPF_JSLE: 6493 if (reg->smax_value <= sval) 6494 return 1; 6495 else if (reg->smin_value > sval) 6496 return 0; 6497 break; 6498 } 6499 6500 return -1; 6501 } 6502 6503 /* compute branch direction of the expression "if (reg opcode val) goto target;" 6504 * and return: 6505 * 1 - branch will be taken and "goto target" will be executed 6506 * 0 - branch will not be taken and fall-through to next insn 6507 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value 6508 * range [0,10] 6509 */ 6510 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, 6511 bool is_jmp32) 6512 { 6513 if (__is_pointer_value(false, reg)) { 6514 if (!reg_type_not_null(reg->type)) 6515 return -1; 6516 6517 /* If pointer is valid tests against zero will fail so we can 6518 * use this to direct branch taken. 6519 */ 6520 if (val != 0) 6521 return -1; 6522 6523 switch (opcode) { 6524 case BPF_JEQ: 6525 return 0; 6526 case BPF_JNE: 6527 return 1; 6528 default: 6529 return -1; 6530 } 6531 } 6532 6533 if (is_jmp32) 6534 return is_branch32_taken(reg, val, opcode); 6535 return is_branch64_taken(reg, val, opcode); 6536 } 6537 6538 /* Adjusts the register min/max values in the case that the dst_reg is the 6539 * variable register that we are working on, and src_reg is a constant or we're 6540 * simply doing a BPF_K check. 6541 * In JEQ/JNE cases we also adjust the var_off values. 6542 */ 6543 static void reg_set_min_max(struct bpf_reg_state *true_reg, 6544 struct bpf_reg_state *false_reg, 6545 u64 val, u32 val32, 6546 u8 opcode, bool is_jmp32) 6547 { 6548 struct tnum false_32off = tnum_subreg(false_reg->var_off); 6549 struct tnum false_64off = false_reg->var_off; 6550 struct tnum true_32off = tnum_subreg(true_reg->var_off); 6551 struct tnum true_64off = true_reg->var_off; 6552 s64 sval = (s64)val; 6553 s32 sval32 = (s32)val32; 6554 6555 /* If the dst_reg is a pointer, we can't learn anything about its 6556 * variable offset from the compare (unless src_reg were a pointer into 6557 * the same object, but we don't bother with that. 6558 * Since false_reg and true_reg have the same type by construction, we 6559 * only need to check one of them for pointerness. 6560 */ 6561 if (__is_pointer_value(false, false_reg)) 6562 return; 6563 6564 switch (opcode) { 6565 case BPF_JEQ: 6566 case BPF_JNE: 6567 { 6568 struct bpf_reg_state *reg = 6569 opcode == BPF_JEQ ? true_reg : false_reg; 6570 6571 /* For BPF_JEQ, if this is false we know nothing Jon Snow, but 6572 * if it is true we know the value for sure. Likewise for 6573 * BPF_JNE. 6574 */ 6575 if (is_jmp32) 6576 __mark_reg32_known(reg, val32); 6577 else 6578 __mark_reg_known(reg, val); 6579 break; 6580 } 6581 case BPF_JSET: 6582 if (is_jmp32) { 6583 false_32off = tnum_and(false_32off, tnum_const(~val32)); 6584 if (is_power_of_2(val32)) 6585 true_32off = tnum_or(true_32off, 6586 tnum_const(val32)); 6587 } else { 6588 false_64off = tnum_and(false_64off, tnum_const(~val)); 6589 if (is_power_of_2(val)) 6590 true_64off = tnum_or(true_64off, 6591 tnum_const(val)); 6592 } 6593 break; 6594 case BPF_JGE: 6595 case BPF_JGT: 6596 { 6597 if (is_jmp32) { 6598 u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1; 6599 u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32; 6600 6601 false_reg->u32_max_value = min(false_reg->u32_max_value, 6602 false_umax); 6603 true_reg->u32_min_value = max(true_reg->u32_min_value, 6604 true_umin); 6605 } else { 6606 u64 false_umax = opcode == BPF_JGT ? val : val - 1; 6607 u64 true_umin = opcode == BPF_JGT ? val + 1 : val; 6608 6609 false_reg->umax_value = min(false_reg->umax_value, false_umax); 6610 true_reg->umin_value = max(true_reg->umin_value, true_umin); 6611 } 6612 break; 6613 } 6614 case BPF_JSGE: 6615 case BPF_JSGT: 6616 { 6617 if (is_jmp32) { 6618 s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1; 6619 s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32; 6620 6621 false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax); 6622 true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin); 6623 } else { 6624 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; 6625 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval; 6626 6627 false_reg->smax_value = min(false_reg->smax_value, false_smax); 6628 true_reg->smin_value = max(true_reg->smin_value, true_smin); 6629 } 6630 break; 6631 } 6632 case BPF_JLE: 6633 case BPF_JLT: 6634 { 6635 if (is_jmp32) { 6636 u32 false_umin = opcode == BPF_JLT ? val32 : val32 + 1; 6637 u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32; 6638 6639 false_reg->u32_min_value = max(false_reg->u32_min_value, 6640 false_umin); 6641 true_reg->u32_max_value = min(true_reg->u32_max_value, 6642 true_umax); 6643 } else { 6644 u64 false_umin = opcode == BPF_JLT ? val : val + 1; 6645 u64 true_umax = opcode == BPF_JLT ? val - 1 : val; 6646 6647 false_reg->umin_value = max(false_reg->umin_value, false_umin); 6648 true_reg->umax_value = min(true_reg->umax_value, true_umax); 6649 } 6650 break; 6651 } 6652 case BPF_JSLE: 6653 case BPF_JSLT: 6654 { 6655 if (is_jmp32) { 6656 s32 false_smin = opcode == BPF_JSLT ? sval32 : sval32 + 1; 6657 s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32; 6658 6659 false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin); 6660 true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax); 6661 } else { 6662 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; 6663 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval; 6664 6665 false_reg->smin_value = max(false_reg->smin_value, false_smin); 6666 true_reg->smax_value = min(true_reg->smax_value, true_smax); 6667 } 6668 break; 6669 } 6670 default: 6671 return; 6672 } 6673 6674 if (is_jmp32) { 6675 false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off), 6676 tnum_subreg(false_32off)); 6677 true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off), 6678 tnum_subreg(true_32off)); 6679 __reg_combine_32_into_64(false_reg); 6680 __reg_combine_32_into_64(true_reg); 6681 } else { 6682 false_reg->var_off = false_64off; 6683 true_reg->var_off = true_64off; 6684 __reg_combine_64_into_32(false_reg); 6685 __reg_combine_64_into_32(true_reg); 6686 } 6687 } 6688 6689 /* Same as above, but for the case that dst_reg holds a constant and src_reg is 6690 * the variable reg. 6691 */ 6692 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, 6693 struct bpf_reg_state *false_reg, 6694 u64 val, u32 val32, 6695 u8 opcode, bool is_jmp32) 6696 { 6697 /* How can we transform "a <op> b" into "b <op> a"? */ 6698 static const u8 opcode_flip[16] = { 6699 /* these stay the same */ 6700 [BPF_JEQ >> 4] = BPF_JEQ, 6701 [BPF_JNE >> 4] = BPF_JNE, 6702 [BPF_JSET >> 4] = BPF_JSET, 6703 /* these swap "lesser" and "greater" (L and G in the opcodes) */ 6704 [BPF_JGE >> 4] = BPF_JLE, 6705 [BPF_JGT >> 4] = BPF_JLT, 6706 [BPF_JLE >> 4] = BPF_JGE, 6707 [BPF_JLT >> 4] = BPF_JGT, 6708 [BPF_JSGE >> 4] = BPF_JSLE, 6709 [BPF_JSGT >> 4] = BPF_JSLT, 6710 [BPF_JSLE >> 4] = BPF_JSGE, 6711 [BPF_JSLT >> 4] = BPF_JSGT 6712 }; 6713 opcode = opcode_flip[opcode >> 4]; 6714 /* This uses zero as "not present in table"; luckily the zero opcode, 6715 * BPF_JA, can't get here. 6716 */ 6717 if (opcode) 6718 reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32); 6719 } 6720 6721 /* Regs are known to be equal, so intersect their min/max/var_off */ 6722 static void __reg_combine_min_max(struct bpf_reg_state *src_reg, 6723 struct bpf_reg_state *dst_reg) 6724 { 6725 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, 6726 dst_reg->umin_value); 6727 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, 6728 dst_reg->umax_value); 6729 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, 6730 dst_reg->smin_value); 6731 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, 6732 dst_reg->smax_value); 6733 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, 6734 dst_reg->var_off); 6735 /* We might have learned new bounds from the var_off. */ 6736 __update_reg_bounds(src_reg); 6737 __update_reg_bounds(dst_reg); 6738 /* We might have learned something about the sign bit. */ 6739 __reg_deduce_bounds(src_reg); 6740 __reg_deduce_bounds(dst_reg); 6741 /* We might have learned some bits from the bounds. */ 6742 __reg_bound_offset(src_reg); 6743 __reg_bound_offset(dst_reg); 6744 /* Intersecting with the old var_off might have improved our bounds 6745 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 6746 * then new var_off is (0; 0x7f...fc) which improves our umax. 6747 */ 6748 __update_reg_bounds(src_reg); 6749 __update_reg_bounds(dst_reg); 6750 } 6751 6752 static void reg_combine_min_max(struct bpf_reg_state *true_src, 6753 struct bpf_reg_state *true_dst, 6754 struct bpf_reg_state *false_src, 6755 struct bpf_reg_state *false_dst, 6756 u8 opcode) 6757 { 6758 switch (opcode) { 6759 case BPF_JEQ: 6760 __reg_combine_min_max(true_src, true_dst); 6761 break; 6762 case BPF_JNE: 6763 __reg_combine_min_max(false_src, false_dst); 6764 break; 6765 } 6766 } 6767 6768 static void mark_ptr_or_null_reg(struct bpf_func_state *state, 6769 struct bpf_reg_state *reg, u32 id, 6770 bool is_null) 6771 { 6772 if (reg_type_may_be_null(reg->type) && reg->id == id) { 6773 /* Old offset (both fixed and variable parts) should 6774 * have been known-zero, because we don't allow pointer 6775 * arithmetic on pointers that might be NULL. 6776 */ 6777 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || 6778 !tnum_equals_const(reg->var_off, 0) || 6779 reg->off)) { 6780 __mark_reg_known_zero(reg); 6781 reg->off = 0; 6782 } 6783 if (is_null) { 6784 reg->type = SCALAR_VALUE; 6785 } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 6786 const struct bpf_map *map = reg->map_ptr; 6787 6788 if (map->inner_map_meta) { 6789 reg->type = CONST_PTR_TO_MAP; 6790 reg->map_ptr = map->inner_map_meta; 6791 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { 6792 reg->type = PTR_TO_XDP_SOCK; 6793 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || 6794 map->map_type == BPF_MAP_TYPE_SOCKHASH) { 6795 reg->type = PTR_TO_SOCKET; 6796 } else { 6797 reg->type = PTR_TO_MAP_VALUE; 6798 } 6799 } else if (reg->type == PTR_TO_SOCKET_OR_NULL) { 6800 reg->type = PTR_TO_SOCKET; 6801 } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) { 6802 reg->type = PTR_TO_SOCK_COMMON; 6803 } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) { 6804 reg->type = PTR_TO_TCP_SOCK; 6805 } else if (reg->type == PTR_TO_BTF_ID_OR_NULL) { 6806 reg->type = PTR_TO_BTF_ID; 6807 } else if (reg->type == PTR_TO_MEM_OR_NULL) { 6808 reg->type = PTR_TO_MEM; 6809 } 6810 if (is_null) { 6811 /* We don't need id and ref_obj_id from this point 6812 * onwards anymore, thus we should better reset it, 6813 * so that state pruning has chances to take effect. 6814 */ 6815 reg->id = 0; 6816 reg->ref_obj_id = 0; 6817 } else if (!reg_may_point_to_spin_lock(reg)) { 6818 /* For not-NULL ptr, reg->ref_obj_id will be reset 6819 * in release_reg_references(). 6820 * 6821 * reg->id is still used by spin_lock ptr. Other 6822 * than spin_lock ptr type, reg->id can be reset. 6823 */ 6824 reg->id = 0; 6825 } 6826 } 6827 } 6828 6829 static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id, 6830 bool is_null) 6831 { 6832 struct bpf_reg_state *reg; 6833 int i; 6834 6835 for (i = 0; i < MAX_BPF_REG; i++) 6836 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null); 6837 6838 bpf_for_each_spilled_reg(i, state, reg) { 6839 if (!reg) 6840 continue; 6841 mark_ptr_or_null_reg(state, reg, id, is_null); 6842 } 6843 } 6844 6845 /* The logic is similar to find_good_pkt_pointers(), both could eventually 6846 * be folded together at some point. 6847 */ 6848 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, 6849 bool is_null) 6850 { 6851 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 6852 struct bpf_reg_state *regs = state->regs; 6853 u32 ref_obj_id = regs[regno].ref_obj_id; 6854 u32 id = regs[regno].id; 6855 int i; 6856 6857 if (ref_obj_id && ref_obj_id == id && is_null) 6858 /* regs[regno] is in the " == NULL" branch. 6859 * No one could have freed the reference state before 6860 * doing the NULL check. 6861 */ 6862 WARN_ON_ONCE(release_reference_state(state, id)); 6863 6864 for (i = 0; i <= vstate->curframe; i++) 6865 __mark_ptr_or_null_regs(vstate->frame[i], id, is_null); 6866 } 6867 6868 static bool try_match_pkt_pointers(const struct bpf_insn *insn, 6869 struct bpf_reg_state *dst_reg, 6870 struct bpf_reg_state *src_reg, 6871 struct bpf_verifier_state *this_branch, 6872 struct bpf_verifier_state *other_branch) 6873 { 6874 if (BPF_SRC(insn->code) != BPF_X) 6875 return false; 6876 6877 /* Pointers are always 64-bit. */ 6878 if (BPF_CLASS(insn->code) == BPF_JMP32) 6879 return false; 6880 6881 switch (BPF_OP(insn->code)) { 6882 case BPF_JGT: 6883 if ((dst_reg->type == PTR_TO_PACKET && 6884 src_reg->type == PTR_TO_PACKET_END) || 6885 (dst_reg->type == PTR_TO_PACKET_META && 6886 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 6887 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ 6888 find_good_pkt_pointers(this_branch, dst_reg, 6889 dst_reg->type, false); 6890 } else if ((dst_reg->type == PTR_TO_PACKET_END && 6891 src_reg->type == PTR_TO_PACKET) || 6892 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 6893 src_reg->type == PTR_TO_PACKET_META)) { 6894 /* pkt_end > pkt_data', pkt_data > pkt_meta' */ 6895 find_good_pkt_pointers(other_branch, src_reg, 6896 src_reg->type, true); 6897 } else { 6898 return false; 6899 } 6900 break; 6901 case BPF_JLT: 6902 if ((dst_reg->type == PTR_TO_PACKET && 6903 src_reg->type == PTR_TO_PACKET_END) || 6904 (dst_reg->type == PTR_TO_PACKET_META && 6905 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 6906 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ 6907 find_good_pkt_pointers(other_branch, dst_reg, 6908 dst_reg->type, true); 6909 } else if ((dst_reg->type == PTR_TO_PACKET_END && 6910 src_reg->type == PTR_TO_PACKET) || 6911 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 6912 src_reg->type == PTR_TO_PACKET_META)) { 6913 /* pkt_end < pkt_data', pkt_data > pkt_meta' */ 6914 find_good_pkt_pointers(this_branch, src_reg, 6915 src_reg->type, false); 6916 } else { 6917 return false; 6918 } 6919 break; 6920 case BPF_JGE: 6921 if ((dst_reg->type == PTR_TO_PACKET && 6922 src_reg->type == PTR_TO_PACKET_END) || 6923 (dst_reg->type == PTR_TO_PACKET_META && 6924 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 6925 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ 6926 find_good_pkt_pointers(this_branch, dst_reg, 6927 dst_reg->type, true); 6928 } else if ((dst_reg->type == PTR_TO_PACKET_END && 6929 src_reg->type == PTR_TO_PACKET) || 6930 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 6931 src_reg->type == PTR_TO_PACKET_META)) { 6932 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ 6933 find_good_pkt_pointers(other_branch, src_reg, 6934 src_reg->type, false); 6935 } else { 6936 return false; 6937 } 6938 break; 6939 case BPF_JLE: 6940 if ((dst_reg->type == PTR_TO_PACKET && 6941 src_reg->type == PTR_TO_PACKET_END) || 6942 (dst_reg->type == PTR_TO_PACKET_META && 6943 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 6944 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ 6945 find_good_pkt_pointers(other_branch, dst_reg, 6946 dst_reg->type, false); 6947 } else if ((dst_reg->type == PTR_TO_PACKET_END && 6948 src_reg->type == PTR_TO_PACKET) || 6949 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 6950 src_reg->type == PTR_TO_PACKET_META)) { 6951 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ 6952 find_good_pkt_pointers(this_branch, src_reg, 6953 src_reg->type, true); 6954 } else { 6955 return false; 6956 } 6957 break; 6958 default: 6959 return false; 6960 } 6961 6962 return true; 6963 } 6964 6965 static int check_cond_jmp_op(struct bpf_verifier_env *env, 6966 struct bpf_insn *insn, int *insn_idx) 6967 { 6968 struct bpf_verifier_state *this_branch = env->cur_state; 6969 struct bpf_verifier_state *other_branch; 6970 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; 6971 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; 6972 u8 opcode = BPF_OP(insn->code); 6973 bool is_jmp32; 6974 int pred = -1; 6975 int err; 6976 6977 /* Only conditional jumps are expected to reach here. */ 6978 if (opcode == BPF_JA || opcode > BPF_JSLE) { 6979 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); 6980 return -EINVAL; 6981 } 6982 6983 if (BPF_SRC(insn->code) == BPF_X) { 6984 if (insn->imm != 0) { 6985 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 6986 return -EINVAL; 6987 } 6988 6989 /* check src1 operand */ 6990 err = check_reg_arg(env, insn->src_reg, SRC_OP); 6991 if (err) 6992 return err; 6993 6994 if (is_pointer_value(env, insn->src_reg)) { 6995 verbose(env, "R%d pointer comparison prohibited\n", 6996 insn->src_reg); 6997 return -EACCES; 6998 } 6999 src_reg = ®s[insn->src_reg]; 7000 } else { 7001 if (insn->src_reg != BPF_REG_0) { 7002 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 7003 return -EINVAL; 7004 } 7005 } 7006 7007 /* check src2 operand */ 7008 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 7009 if (err) 7010 return err; 7011 7012 dst_reg = ®s[insn->dst_reg]; 7013 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 7014 7015 if (BPF_SRC(insn->code) == BPF_K) { 7016 pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32); 7017 } else if (src_reg->type == SCALAR_VALUE && 7018 is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) { 7019 pred = is_branch_taken(dst_reg, 7020 tnum_subreg(src_reg->var_off).value, 7021 opcode, 7022 is_jmp32); 7023 } else if (src_reg->type == SCALAR_VALUE && 7024 !is_jmp32 && tnum_is_const(src_reg->var_off)) { 7025 pred = is_branch_taken(dst_reg, 7026 src_reg->var_off.value, 7027 opcode, 7028 is_jmp32); 7029 } 7030 7031 if (pred >= 0) { 7032 /* If we get here with a dst_reg pointer type it is because 7033 * above is_branch_taken() special cased the 0 comparison. 7034 */ 7035 if (!__is_pointer_value(false, dst_reg)) 7036 err = mark_chain_precision(env, insn->dst_reg); 7037 if (BPF_SRC(insn->code) == BPF_X && !err) 7038 err = mark_chain_precision(env, insn->src_reg); 7039 if (err) 7040 return err; 7041 } 7042 if (pred == 1) { 7043 /* only follow the goto, ignore fall-through */ 7044 *insn_idx += insn->off; 7045 return 0; 7046 } else if (pred == 0) { 7047 /* only follow fall-through branch, since 7048 * that's where the program will go 7049 */ 7050 return 0; 7051 } 7052 7053 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, 7054 false); 7055 if (!other_branch) 7056 return -EFAULT; 7057 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; 7058 7059 /* detect if we are comparing against a constant value so we can adjust 7060 * our min/max values for our dst register. 7061 * this is only legit if both are scalars (or pointers to the same 7062 * object, I suppose, but we don't support that right now), because 7063 * otherwise the different base pointers mean the offsets aren't 7064 * comparable. 7065 */ 7066 if (BPF_SRC(insn->code) == BPF_X) { 7067 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 7068 7069 if (dst_reg->type == SCALAR_VALUE && 7070 src_reg->type == SCALAR_VALUE) { 7071 if (tnum_is_const(src_reg->var_off) || 7072 (is_jmp32 && 7073 tnum_is_const(tnum_subreg(src_reg->var_off)))) 7074 reg_set_min_max(&other_branch_regs[insn->dst_reg], 7075 dst_reg, 7076 src_reg->var_off.value, 7077 tnum_subreg(src_reg->var_off).value, 7078 opcode, is_jmp32); 7079 else if (tnum_is_const(dst_reg->var_off) || 7080 (is_jmp32 && 7081 tnum_is_const(tnum_subreg(dst_reg->var_off)))) 7082 reg_set_min_max_inv(&other_branch_regs[insn->src_reg], 7083 src_reg, 7084 dst_reg->var_off.value, 7085 tnum_subreg(dst_reg->var_off).value, 7086 opcode, is_jmp32); 7087 else if (!is_jmp32 && 7088 (opcode == BPF_JEQ || opcode == BPF_JNE)) 7089 /* Comparing for equality, we can combine knowledge */ 7090 reg_combine_min_max(&other_branch_regs[insn->src_reg], 7091 &other_branch_regs[insn->dst_reg], 7092 src_reg, dst_reg, opcode); 7093 } 7094 } else if (dst_reg->type == SCALAR_VALUE) { 7095 reg_set_min_max(&other_branch_regs[insn->dst_reg], 7096 dst_reg, insn->imm, (u32)insn->imm, 7097 opcode, is_jmp32); 7098 } 7099 7100 /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). 7101 * NOTE: these optimizations below are related with pointer comparison 7102 * which will never be JMP32. 7103 */ 7104 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && 7105 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 7106 reg_type_may_be_null(dst_reg->type)) { 7107 /* Mark all identical registers in each branch as either 7108 * safe or unknown depending R == 0 or R != 0 conditional. 7109 */ 7110 mark_ptr_or_null_regs(this_branch, insn->dst_reg, 7111 opcode == BPF_JNE); 7112 mark_ptr_or_null_regs(other_branch, insn->dst_reg, 7113 opcode == BPF_JEQ); 7114 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], 7115 this_branch, other_branch) && 7116 is_pointer_value(env, insn->dst_reg)) { 7117 verbose(env, "R%d pointer comparison prohibited\n", 7118 insn->dst_reg); 7119 return -EACCES; 7120 } 7121 if (env->log.level & BPF_LOG_LEVEL) 7122 print_verifier_state(env, this_branch->frame[this_branch->curframe]); 7123 return 0; 7124 } 7125 7126 /* verify BPF_LD_IMM64 instruction */ 7127 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) 7128 { 7129 struct bpf_insn_aux_data *aux = cur_aux(env); 7130 struct bpf_reg_state *regs = cur_regs(env); 7131 struct bpf_map *map; 7132 int err; 7133 7134 if (BPF_SIZE(insn->code) != BPF_DW) { 7135 verbose(env, "invalid BPF_LD_IMM insn\n"); 7136 return -EINVAL; 7137 } 7138 if (insn->off != 0) { 7139 verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); 7140 return -EINVAL; 7141 } 7142 7143 err = check_reg_arg(env, insn->dst_reg, DST_OP); 7144 if (err) 7145 return err; 7146 7147 if (insn->src_reg == 0) { 7148 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 7149 7150 regs[insn->dst_reg].type = SCALAR_VALUE; 7151 __mark_reg_known(®s[insn->dst_reg], imm); 7152 return 0; 7153 } 7154 7155 map = env->used_maps[aux->map_index]; 7156 mark_reg_known_zero(env, regs, insn->dst_reg); 7157 regs[insn->dst_reg].map_ptr = map; 7158 7159 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) { 7160 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; 7161 regs[insn->dst_reg].off = aux->map_off; 7162 if (map_value_has_spin_lock(map)) 7163 regs[insn->dst_reg].id = ++env->id_gen; 7164 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) { 7165 regs[insn->dst_reg].type = CONST_PTR_TO_MAP; 7166 } else { 7167 verbose(env, "bpf verifier is misconfigured\n"); 7168 return -EINVAL; 7169 } 7170 7171 return 0; 7172 } 7173 7174 static bool may_access_skb(enum bpf_prog_type type) 7175 { 7176 switch (type) { 7177 case BPF_PROG_TYPE_SOCKET_FILTER: 7178 case BPF_PROG_TYPE_SCHED_CLS: 7179 case BPF_PROG_TYPE_SCHED_ACT: 7180 return true; 7181 default: 7182 return false; 7183 } 7184 } 7185 7186 /* verify safety of LD_ABS|LD_IND instructions: 7187 * - they can only appear in the programs where ctx == skb 7188 * - since they are wrappers of function calls, they scratch R1-R5 registers, 7189 * preserve R6-R9, and store return value into R0 7190 * 7191 * Implicit input: 7192 * ctx == skb == R6 == CTX 7193 * 7194 * Explicit input: 7195 * SRC == any register 7196 * IMM == 32-bit immediate 7197 * 7198 * Output: 7199 * R0 - 8/16/32-bit skb data converted to cpu endianness 7200 */ 7201 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 7202 { 7203 struct bpf_reg_state *regs = cur_regs(env); 7204 static const int ctx_reg = BPF_REG_6; 7205 u8 mode = BPF_MODE(insn->code); 7206 int i, err; 7207 7208 if (!may_access_skb(env->prog->type)) { 7209 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 7210 return -EINVAL; 7211 } 7212 7213 if (!env->ops->gen_ld_abs) { 7214 verbose(env, "bpf verifier is misconfigured\n"); 7215 return -EINVAL; 7216 } 7217 7218 if (env->subprog_cnt > 1) { 7219 /* when program has LD_ABS insn JITs and interpreter assume 7220 * that r1 == ctx == skb which is not the case for callees 7221 * that can have arbitrary arguments. It's problematic 7222 * for main prog as well since JITs would need to analyze 7223 * all functions in order to make proper register save/restore 7224 * decisions in the main prog. Hence disallow LD_ABS with calls 7225 */ 7226 verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n"); 7227 return -EINVAL; 7228 } 7229 7230 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 7231 BPF_SIZE(insn->code) == BPF_DW || 7232 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 7233 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); 7234 return -EINVAL; 7235 } 7236 7237 /* check whether implicit source operand (register R6) is readable */ 7238 err = check_reg_arg(env, ctx_reg, SRC_OP); 7239 if (err) 7240 return err; 7241 7242 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as 7243 * gen_ld_abs() may terminate the program at runtime, leading to 7244 * reference leak. 7245 */ 7246 err = check_reference_leak(env); 7247 if (err) { 7248 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); 7249 return err; 7250 } 7251 7252 if (env->cur_state->active_spin_lock) { 7253 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); 7254 return -EINVAL; 7255 } 7256 7257 if (regs[ctx_reg].type != PTR_TO_CTX) { 7258 verbose(env, 7259 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 7260 return -EINVAL; 7261 } 7262 7263 if (mode == BPF_IND) { 7264 /* check explicit source operand */ 7265 err = check_reg_arg(env, insn->src_reg, SRC_OP); 7266 if (err) 7267 return err; 7268 } 7269 7270 err = check_ctx_reg(env, ®s[ctx_reg], ctx_reg); 7271 if (err < 0) 7272 return err; 7273 7274 /* reset caller saved regs to unreadable */ 7275 for (i = 0; i < CALLER_SAVED_REGS; i++) { 7276 mark_reg_not_init(env, regs, caller_saved[i]); 7277 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 7278 } 7279 7280 /* mark destination R0 register as readable, since it contains 7281 * the value fetched from the packet. 7282 * Already marked as written above. 7283 */ 7284 mark_reg_unknown(env, regs, BPF_REG_0); 7285 /* ld_abs load up to 32-bit skb data. */ 7286 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; 7287 return 0; 7288 } 7289 7290 static int check_return_code(struct bpf_verifier_env *env) 7291 { 7292 struct tnum enforce_attach_type_range = tnum_unknown; 7293 const struct bpf_prog *prog = env->prog; 7294 struct bpf_reg_state *reg; 7295 struct tnum range = tnum_range(0, 1); 7296 int err; 7297 7298 /* LSM and struct_ops func-ptr's return type could be "void" */ 7299 if ((env->prog->type == BPF_PROG_TYPE_STRUCT_OPS || 7300 env->prog->type == BPF_PROG_TYPE_LSM) && 7301 !prog->aux->attach_func_proto->type) 7302 return 0; 7303 7304 /* eBPF calling convetion is such that R0 is used 7305 * to return the value from eBPF program. 7306 * Make sure that it's readable at this time 7307 * of bpf_exit, which means that program wrote 7308 * something into it earlier 7309 */ 7310 err = check_reg_arg(env, BPF_REG_0, SRC_OP); 7311 if (err) 7312 return err; 7313 7314 if (is_pointer_value(env, BPF_REG_0)) { 7315 verbose(env, "R0 leaks addr as return value\n"); 7316 return -EACCES; 7317 } 7318 7319 switch (env->prog->type) { 7320 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 7321 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || 7322 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || 7323 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME || 7324 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || 7325 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || 7326 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME) 7327 range = tnum_range(1, 1); 7328 break; 7329 case BPF_PROG_TYPE_CGROUP_SKB: 7330 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { 7331 range = tnum_range(0, 3); 7332 enforce_attach_type_range = tnum_range(2, 3); 7333 } 7334 break; 7335 case BPF_PROG_TYPE_CGROUP_SOCK: 7336 case BPF_PROG_TYPE_SOCK_OPS: 7337 case BPF_PROG_TYPE_CGROUP_DEVICE: 7338 case BPF_PROG_TYPE_CGROUP_SYSCTL: 7339 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 7340 break; 7341 case BPF_PROG_TYPE_RAW_TRACEPOINT: 7342 if (!env->prog->aux->attach_btf_id) 7343 return 0; 7344 range = tnum_const(0); 7345 break; 7346 case BPF_PROG_TYPE_TRACING: 7347 switch (env->prog->expected_attach_type) { 7348 case BPF_TRACE_FENTRY: 7349 case BPF_TRACE_FEXIT: 7350 range = tnum_const(0); 7351 break; 7352 case BPF_TRACE_RAW_TP: 7353 case BPF_MODIFY_RETURN: 7354 return 0; 7355 case BPF_TRACE_ITER: 7356 break; 7357 default: 7358 return -ENOTSUPP; 7359 } 7360 break; 7361 case BPF_PROG_TYPE_SK_LOOKUP: 7362 range = tnum_range(SK_DROP, SK_PASS); 7363 break; 7364 case BPF_PROG_TYPE_EXT: 7365 /* freplace program can return anything as its return value 7366 * depends on the to-be-replaced kernel func or bpf program. 7367 */ 7368 default: 7369 return 0; 7370 } 7371 7372 reg = cur_regs(env) + BPF_REG_0; 7373 if (reg->type != SCALAR_VALUE) { 7374 verbose(env, "At program exit the register R0 is not a known value (%s)\n", 7375 reg_type_str[reg->type]); 7376 return -EINVAL; 7377 } 7378 7379 if (!tnum_in(range, reg->var_off)) { 7380 char tn_buf[48]; 7381 7382 verbose(env, "At program exit the register R0 "); 7383 if (!tnum_is_unknown(reg->var_off)) { 7384 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 7385 verbose(env, "has value %s", tn_buf); 7386 } else { 7387 verbose(env, "has unknown scalar value"); 7388 } 7389 tnum_strn(tn_buf, sizeof(tn_buf), range); 7390 verbose(env, " should have been in %s\n", tn_buf); 7391 return -EINVAL; 7392 } 7393 7394 if (!tnum_is_unknown(enforce_attach_type_range) && 7395 tnum_in(enforce_attach_type_range, reg->var_off)) 7396 env->prog->enforce_expected_attach_type = 1; 7397 return 0; 7398 } 7399 7400 /* non-recursive DFS pseudo code 7401 * 1 procedure DFS-iterative(G,v): 7402 * 2 label v as discovered 7403 * 3 let S be a stack 7404 * 4 S.push(v) 7405 * 5 while S is not empty 7406 * 6 t <- S.pop() 7407 * 7 if t is what we're looking for: 7408 * 8 return t 7409 * 9 for all edges e in G.adjacentEdges(t) do 7410 * 10 if edge e is already labelled 7411 * 11 continue with the next edge 7412 * 12 w <- G.adjacentVertex(t,e) 7413 * 13 if vertex w is not discovered and not explored 7414 * 14 label e as tree-edge 7415 * 15 label w as discovered 7416 * 16 S.push(w) 7417 * 17 continue at 5 7418 * 18 else if vertex w is discovered 7419 * 19 label e as back-edge 7420 * 20 else 7421 * 21 // vertex w is explored 7422 * 22 label e as forward- or cross-edge 7423 * 23 label t as explored 7424 * 24 S.pop() 7425 * 7426 * convention: 7427 * 0x10 - discovered 7428 * 0x11 - discovered and fall-through edge labelled 7429 * 0x12 - discovered and fall-through and branch edges labelled 7430 * 0x20 - explored 7431 */ 7432 7433 enum { 7434 DISCOVERED = 0x10, 7435 EXPLORED = 0x20, 7436 FALLTHROUGH = 1, 7437 BRANCH = 2, 7438 }; 7439 7440 static u32 state_htab_size(struct bpf_verifier_env *env) 7441 { 7442 return env->prog->len; 7443 } 7444 7445 static struct bpf_verifier_state_list **explored_state( 7446 struct bpf_verifier_env *env, 7447 int idx) 7448 { 7449 struct bpf_verifier_state *cur = env->cur_state; 7450 struct bpf_func_state *state = cur->frame[cur->curframe]; 7451 7452 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; 7453 } 7454 7455 static void init_explored_state(struct bpf_verifier_env *env, int idx) 7456 { 7457 env->insn_aux_data[idx].prune_point = true; 7458 } 7459 7460 /* t, w, e - match pseudo-code above: 7461 * t - index of current instruction 7462 * w - next instruction 7463 * e - edge 7464 */ 7465 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, 7466 bool loop_ok) 7467 { 7468 int *insn_stack = env->cfg.insn_stack; 7469 int *insn_state = env->cfg.insn_state; 7470 7471 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 7472 return 0; 7473 7474 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 7475 return 0; 7476 7477 if (w < 0 || w >= env->prog->len) { 7478 verbose_linfo(env, t, "%d: ", t); 7479 verbose(env, "jump out of range from insn %d to %d\n", t, w); 7480 return -EINVAL; 7481 } 7482 7483 if (e == BRANCH) 7484 /* mark branch target for state pruning */ 7485 init_explored_state(env, w); 7486 7487 if (insn_state[w] == 0) { 7488 /* tree-edge */ 7489 insn_state[t] = DISCOVERED | e; 7490 insn_state[w] = DISCOVERED; 7491 if (env->cfg.cur_stack >= env->prog->len) 7492 return -E2BIG; 7493 insn_stack[env->cfg.cur_stack++] = w; 7494 return 1; 7495 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 7496 if (loop_ok && env->bpf_capable) 7497 return 0; 7498 verbose_linfo(env, t, "%d: ", t); 7499 verbose_linfo(env, w, "%d: ", w); 7500 verbose(env, "back-edge from insn %d to %d\n", t, w); 7501 return -EINVAL; 7502 } else if (insn_state[w] == EXPLORED) { 7503 /* forward- or cross-edge */ 7504 insn_state[t] = DISCOVERED | e; 7505 } else { 7506 verbose(env, "insn state internal bug\n"); 7507 return -EFAULT; 7508 } 7509 return 0; 7510 } 7511 7512 /* non-recursive depth-first-search to detect loops in BPF program 7513 * loop == back-edge in directed graph 7514 */ 7515 static int check_cfg(struct bpf_verifier_env *env) 7516 { 7517 struct bpf_insn *insns = env->prog->insnsi; 7518 int insn_cnt = env->prog->len; 7519 int *insn_stack, *insn_state; 7520 int ret = 0; 7521 int i, t; 7522 7523 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 7524 if (!insn_state) 7525 return -ENOMEM; 7526 7527 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 7528 if (!insn_stack) { 7529 kvfree(insn_state); 7530 return -ENOMEM; 7531 } 7532 7533 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 7534 insn_stack[0] = 0; /* 0 is the first instruction */ 7535 env->cfg.cur_stack = 1; 7536 7537 peek_stack: 7538 if (env->cfg.cur_stack == 0) 7539 goto check_state; 7540 t = insn_stack[env->cfg.cur_stack - 1]; 7541 7542 if (BPF_CLASS(insns[t].code) == BPF_JMP || 7543 BPF_CLASS(insns[t].code) == BPF_JMP32) { 7544 u8 opcode = BPF_OP(insns[t].code); 7545 7546 if (opcode == BPF_EXIT) { 7547 goto mark_explored; 7548 } else if (opcode == BPF_CALL) { 7549 ret = push_insn(t, t + 1, FALLTHROUGH, env, false); 7550 if (ret == 1) 7551 goto peek_stack; 7552 else if (ret < 0) 7553 goto err_free; 7554 if (t + 1 < insn_cnt) 7555 init_explored_state(env, t + 1); 7556 if (insns[t].src_reg == BPF_PSEUDO_CALL) { 7557 init_explored_state(env, t); 7558 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, 7559 env, false); 7560 if (ret == 1) 7561 goto peek_stack; 7562 else if (ret < 0) 7563 goto err_free; 7564 } 7565 } else if (opcode == BPF_JA) { 7566 if (BPF_SRC(insns[t].code) != BPF_K) { 7567 ret = -EINVAL; 7568 goto err_free; 7569 } 7570 /* unconditional jump with single edge */ 7571 ret = push_insn(t, t + insns[t].off + 1, 7572 FALLTHROUGH, env, true); 7573 if (ret == 1) 7574 goto peek_stack; 7575 else if (ret < 0) 7576 goto err_free; 7577 /* unconditional jmp is not a good pruning point, 7578 * but it's marked, since backtracking needs 7579 * to record jmp history in is_state_visited(). 7580 */ 7581 init_explored_state(env, t + insns[t].off + 1); 7582 /* tell verifier to check for equivalent states 7583 * after every call and jump 7584 */ 7585 if (t + 1 < insn_cnt) 7586 init_explored_state(env, t + 1); 7587 } else { 7588 /* conditional jump with two edges */ 7589 init_explored_state(env, t); 7590 ret = push_insn(t, t + 1, FALLTHROUGH, env, true); 7591 if (ret == 1) 7592 goto peek_stack; 7593 else if (ret < 0) 7594 goto err_free; 7595 7596 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true); 7597 if (ret == 1) 7598 goto peek_stack; 7599 else if (ret < 0) 7600 goto err_free; 7601 } 7602 } else { 7603 /* all other non-branch instructions with single 7604 * fall-through edge 7605 */ 7606 ret = push_insn(t, t + 1, FALLTHROUGH, env, false); 7607 if (ret == 1) 7608 goto peek_stack; 7609 else if (ret < 0) 7610 goto err_free; 7611 } 7612 7613 mark_explored: 7614 insn_state[t] = EXPLORED; 7615 if (env->cfg.cur_stack-- <= 0) { 7616 verbose(env, "pop stack internal bug\n"); 7617 ret = -EFAULT; 7618 goto err_free; 7619 } 7620 goto peek_stack; 7621 7622 check_state: 7623 for (i = 0; i < insn_cnt; i++) { 7624 if (insn_state[i] != EXPLORED) { 7625 verbose(env, "unreachable insn %d\n", i); 7626 ret = -EINVAL; 7627 goto err_free; 7628 } 7629 } 7630 ret = 0; /* cfg looks good */ 7631 7632 err_free: 7633 kvfree(insn_state); 7634 kvfree(insn_stack); 7635 env->cfg.insn_state = env->cfg.insn_stack = NULL; 7636 return ret; 7637 } 7638 7639 /* The minimum supported BTF func info size */ 7640 #define MIN_BPF_FUNCINFO_SIZE 8 7641 #define MAX_FUNCINFO_REC_SIZE 252 7642 7643 static int check_btf_func(struct bpf_verifier_env *env, 7644 const union bpf_attr *attr, 7645 union bpf_attr __user *uattr) 7646 { 7647 u32 i, nfuncs, urec_size, min_size; 7648 u32 krec_size = sizeof(struct bpf_func_info); 7649 struct bpf_func_info *krecord; 7650 struct bpf_func_info_aux *info_aux = NULL; 7651 const struct btf_type *type; 7652 struct bpf_prog *prog; 7653 const struct btf *btf; 7654 void __user *urecord; 7655 u32 prev_offset = 0; 7656 int ret = -ENOMEM; 7657 7658 nfuncs = attr->func_info_cnt; 7659 if (!nfuncs) 7660 return 0; 7661 7662 if (nfuncs != env->subprog_cnt) { 7663 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n"); 7664 return -EINVAL; 7665 } 7666 7667 urec_size = attr->func_info_rec_size; 7668 if (urec_size < MIN_BPF_FUNCINFO_SIZE || 7669 urec_size > MAX_FUNCINFO_REC_SIZE || 7670 urec_size % sizeof(u32)) { 7671 verbose(env, "invalid func info rec size %u\n", urec_size); 7672 return -EINVAL; 7673 } 7674 7675 prog = env->prog; 7676 btf = prog->aux->btf; 7677 7678 urecord = u64_to_user_ptr(attr->func_info); 7679 min_size = min_t(u32, krec_size, urec_size); 7680 7681 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); 7682 if (!krecord) 7683 return -ENOMEM; 7684 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN); 7685 if (!info_aux) 7686 goto err_free; 7687 7688 for (i = 0; i < nfuncs; i++) { 7689 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size); 7690 if (ret) { 7691 if (ret == -E2BIG) { 7692 verbose(env, "nonzero tailing record in func info"); 7693 /* set the size kernel expects so loader can zero 7694 * out the rest of the record. 7695 */ 7696 if (put_user(min_size, &uattr->func_info_rec_size)) 7697 ret = -EFAULT; 7698 } 7699 goto err_free; 7700 } 7701 7702 if (copy_from_user(&krecord[i], urecord, min_size)) { 7703 ret = -EFAULT; 7704 goto err_free; 7705 } 7706 7707 /* check insn_off */ 7708 if (i == 0) { 7709 if (krecord[i].insn_off) { 7710 verbose(env, 7711 "nonzero insn_off %u for the first func info record", 7712 krecord[i].insn_off); 7713 ret = -EINVAL; 7714 goto err_free; 7715 } 7716 } else if (krecord[i].insn_off <= prev_offset) { 7717 verbose(env, 7718 "same or smaller insn offset (%u) than previous func info record (%u)", 7719 krecord[i].insn_off, prev_offset); 7720 ret = -EINVAL; 7721 goto err_free; 7722 } 7723 7724 if (env->subprog_info[i].start != krecord[i].insn_off) { 7725 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); 7726 ret = -EINVAL; 7727 goto err_free; 7728 } 7729 7730 /* check type_id */ 7731 type = btf_type_by_id(btf, krecord[i].type_id); 7732 if (!type || !btf_type_is_func(type)) { 7733 verbose(env, "invalid type id %d in func info", 7734 krecord[i].type_id); 7735 ret = -EINVAL; 7736 goto err_free; 7737 } 7738 info_aux[i].linkage = BTF_INFO_VLEN(type->info); 7739 prev_offset = krecord[i].insn_off; 7740 urecord += urec_size; 7741 } 7742 7743 prog->aux->func_info = krecord; 7744 prog->aux->func_info_cnt = nfuncs; 7745 prog->aux->func_info_aux = info_aux; 7746 return 0; 7747 7748 err_free: 7749 kvfree(krecord); 7750 kfree(info_aux); 7751 return ret; 7752 } 7753 7754 static void adjust_btf_func(struct bpf_verifier_env *env) 7755 { 7756 struct bpf_prog_aux *aux = env->prog->aux; 7757 int i; 7758 7759 if (!aux->func_info) 7760 return; 7761 7762 for (i = 0; i < env->subprog_cnt; i++) 7763 aux->func_info[i].insn_off = env->subprog_info[i].start; 7764 } 7765 7766 #define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \ 7767 sizeof(((struct bpf_line_info *)(0))->line_col)) 7768 #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE 7769 7770 static int check_btf_line(struct bpf_verifier_env *env, 7771 const union bpf_attr *attr, 7772 union bpf_attr __user *uattr) 7773 { 7774 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0; 7775 struct bpf_subprog_info *sub; 7776 struct bpf_line_info *linfo; 7777 struct bpf_prog *prog; 7778 const struct btf *btf; 7779 void __user *ulinfo; 7780 int err; 7781 7782 nr_linfo = attr->line_info_cnt; 7783 if (!nr_linfo) 7784 return 0; 7785 7786 rec_size = attr->line_info_rec_size; 7787 if (rec_size < MIN_BPF_LINEINFO_SIZE || 7788 rec_size > MAX_LINEINFO_REC_SIZE || 7789 rec_size & (sizeof(u32) - 1)) 7790 return -EINVAL; 7791 7792 /* Need to zero it in case the userspace may 7793 * pass in a smaller bpf_line_info object. 7794 */ 7795 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info), 7796 GFP_KERNEL | __GFP_NOWARN); 7797 if (!linfo) 7798 return -ENOMEM; 7799 7800 prog = env->prog; 7801 btf = prog->aux->btf; 7802 7803 s = 0; 7804 sub = env->subprog_info; 7805 ulinfo = u64_to_user_ptr(attr->line_info); 7806 expected_size = sizeof(struct bpf_line_info); 7807 ncopy = min_t(u32, expected_size, rec_size); 7808 for (i = 0; i < nr_linfo; i++) { 7809 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size); 7810 if (err) { 7811 if (err == -E2BIG) { 7812 verbose(env, "nonzero tailing record in line_info"); 7813 if (put_user(expected_size, 7814 &uattr->line_info_rec_size)) 7815 err = -EFAULT; 7816 } 7817 goto err_free; 7818 } 7819 7820 if (copy_from_user(&linfo[i], ulinfo, ncopy)) { 7821 err = -EFAULT; 7822 goto err_free; 7823 } 7824 7825 /* 7826 * Check insn_off to ensure 7827 * 1) strictly increasing AND 7828 * 2) bounded by prog->len 7829 * 7830 * The linfo[0].insn_off == 0 check logically falls into 7831 * the later "missing bpf_line_info for func..." case 7832 * because the first linfo[0].insn_off must be the 7833 * first sub also and the first sub must have 7834 * subprog_info[0].start == 0. 7835 */ 7836 if ((i && linfo[i].insn_off <= prev_offset) || 7837 linfo[i].insn_off >= prog->len) { 7838 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", 7839 i, linfo[i].insn_off, prev_offset, 7840 prog->len); 7841 err = -EINVAL; 7842 goto err_free; 7843 } 7844 7845 if (!prog->insnsi[linfo[i].insn_off].code) { 7846 verbose(env, 7847 "Invalid insn code at line_info[%u].insn_off\n", 7848 i); 7849 err = -EINVAL; 7850 goto err_free; 7851 } 7852 7853 if (!btf_name_by_offset(btf, linfo[i].line_off) || 7854 !btf_name_by_offset(btf, linfo[i].file_name_off)) { 7855 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); 7856 err = -EINVAL; 7857 goto err_free; 7858 } 7859 7860 if (s != env->subprog_cnt) { 7861 if (linfo[i].insn_off == sub[s].start) { 7862 sub[s].linfo_idx = i; 7863 s++; 7864 } else if (sub[s].start < linfo[i].insn_off) { 7865 verbose(env, "missing bpf_line_info for func#%u\n", s); 7866 err = -EINVAL; 7867 goto err_free; 7868 } 7869 } 7870 7871 prev_offset = linfo[i].insn_off; 7872 ulinfo += rec_size; 7873 } 7874 7875 if (s != env->subprog_cnt) { 7876 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n", 7877 env->subprog_cnt - s, s); 7878 err = -EINVAL; 7879 goto err_free; 7880 } 7881 7882 prog->aux->linfo = linfo; 7883 prog->aux->nr_linfo = nr_linfo; 7884 7885 return 0; 7886 7887 err_free: 7888 kvfree(linfo); 7889 return err; 7890 } 7891 7892 static int check_btf_info(struct bpf_verifier_env *env, 7893 const union bpf_attr *attr, 7894 union bpf_attr __user *uattr) 7895 { 7896 struct btf *btf; 7897 int err; 7898 7899 if (!attr->func_info_cnt && !attr->line_info_cnt) 7900 return 0; 7901 7902 btf = btf_get_by_fd(attr->prog_btf_fd); 7903 if (IS_ERR(btf)) 7904 return PTR_ERR(btf); 7905 env->prog->aux->btf = btf; 7906 7907 err = check_btf_func(env, attr, uattr); 7908 if (err) 7909 return err; 7910 7911 err = check_btf_line(env, attr, uattr); 7912 if (err) 7913 return err; 7914 7915 return 0; 7916 } 7917 7918 /* check %cur's range satisfies %old's */ 7919 static bool range_within(struct bpf_reg_state *old, 7920 struct bpf_reg_state *cur) 7921 { 7922 return old->umin_value <= cur->umin_value && 7923 old->umax_value >= cur->umax_value && 7924 old->smin_value <= cur->smin_value && 7925 old->smax_value >= cur->smax_value; 7926 } 7927 7928 /* Maximum number of register states that can exist at once */ 7929 #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) 7930 struct idpair { 7931 u32 old; 7932 u32 cur; 7933 }; 7934 7935 /* If in the old state two registers had the same id, then they need to have 7936 * the same id in the new state as well. But that id could be different from 7937 * the old state, so we need to track the mapping from old to new ids. 7938 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent 7939 * regs with old id 5 must also have new id 9 for the new state to be safe. But 7940 * regs with a different old id could still have new id 9, we don't care about 7941 * that. 7942 * So we look through our idmap to see if this old id has been seen before. If 7943 * so, we require the new id to match; otherwise, we add the id pair to the map. 7944 */ 7945 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) 7946 { 7947 unsigned int i; 7948 7949 for (i = 0; i < ID_MAP_SIZE; i++) { 7950 if (!idmap[i].old) { 7951 /* Reached an empty slot; haven't seen this id before */ 7952 idmap[i].old = old_id; 7953 idmap[i].cur = cur_id; 7954 return true; 7955 } 7956 if (idmap[i].old == old_id) 7957 return idmap[i].cur == cur_id; 7958 } 7959 /* We ran out of idmap slots, which should be impossible */ 7960 WARN_ON_ONCE(1); 7961 return false; 7962 } 7963 7964 static void clean_func_state(struct bpf_verifier_env *env, 7965 struct bpf_func_state *st) 7966 { 7967 enum bpf_reg_liveness live; 7968 int i, j; 7969 7970 for (i = 0; i < BPF_REG_FP; i++) { 7971 live = st->regs[i].live; 7972 /* liveness must not touch this register anymore */ 7973 st->regs[i].live |= REG_LIVE_DONE; 7974 if (!(live & REG_LIVE_READ)) 7975 /* since the register is unused, clear its state 7976 * to make further comparison simpler 7977 */ 7978 __mark_reg_not_init(env, &st->regs[i]); 7979 } 7980 7981 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { 7982 live = st->stack[i].spilled_ptr.live; 7983 /* liveness must not touch this stack slot anymore */ 7984 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; 7985 if (!(live & REG_LIVE_READ)) { 7986 __mark_reg_not_init(env, &st->stack[i].spilled_ptr); 7987 for (j = 0; j < BPF_REG_SIZE; j++) 7988 st->stack[i].slot_type[j] = STACK_INVALID; 7989 } 7990 } 7991 } 7992 7993 static void clean_verifier_state(struct bpf_verifier_env *env, 7994 struct bpf_verifier_state *st) 7995 { 7996 int i; 7997 7998 if (st->frame[0]->regs[0].live & REG_LIVE_DONE) 7999 /* all regs in this state in all frames were already marked */ 8000 return; 8001 8002 for (i = 0; i <= st->curframe; i++) 8003 clean_func_state(env, st->frame[i]); 8004 } 8005 8006 /* the parentage chains form a tree. 8007 * the verifier states are added to state lists at given insn and 8008 * pushed into state stack for future exploration. 8009 * when the verifier reaches bpf_exit insn some of the verifer states 8010 * stored in the state lists have their final liveness state already, 8011 * but a lot of states will get revised from liveness point of view when 8012 * the verifier explores other branches. 8013 * Example: 8014 * 1: r0 = 1 8015 * 2: if r1 == 100 goto pc+1 8016 * 3: r0 = 2 8017 * 4: exit 8018 * when the verifier reaches exit insn the register r0 in the state list of 8019 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch 8020 * of insn 2 and goes exploring further. At the insn 4 it will walk the 8021 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ. 8022 * 8023 * Since the verifier pushes the branch states as it sees them while exploring 8024 * the program the condition of walking the branch instruction for the second 8025 * time means that all states below this branch were already explored and 8026 * their final liveness markes are already propagated. 8027 * Hence when the verifier completes the search of state list in is_state_visited() 8028 * we can call this clean_live_states() function to mark all liveness states 8029 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state' 8030 * will not be used. 8031 * This function also clears the registers and stack for states that !READ 8032 * to simplify state merging. 8033 * 8034 * Important note here that walking the same branch instruction in the callee 8035 * doesn't meant that the states are DONE. The verifier has to compare 8036 * the callsites 8037 */ 8038 static void clean_live_states(struct bpf_verifier_env *env, int insn, 8039 struct bpf_verifier_state *cur) 8040 { 8041 struct bpf_verifier_state_list *sl; 8042 int i; 8043 8044 sl = *explored_state(env, insn); 8045 while (sl) { 8046 if (sl->state.branches) 8047 goto next; 8048 if (sl->state.insn_idx != insn || 8049 sl->state.curframe != cur->curframe) 8050 goto next; 8051 for (i = 0; i <= cur->curframe; i++) 8052 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) 8053 goto next; 8054 clean_verifier_state(env, &sl->state); 8055 next: 8056 sl = sl->next; 8057 } 8058 } 8059 8060 /* Returns true if (rold safe implies rcur safe) */ 8061 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, 8062 struct idpair *idmap) 8063 { 8064 bool equal; 8065 8066 if (!(rold->live & REG_LIVE_READ)) 8067 /* explored state didn't use this */ 8068 return true; 8069 8070 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0; 8071 8072 if (rold->type == PTR_TO_STACK) 8073 /* two stack pointers are equal only if they're pointing to 8074 * the same stack frame, since fp-8 in foo != fp-8 in bar 8075 */ 8076 return equal && rold->frameno == rcur->frameno; 8077 8078 if (equal) 8079 return true; 8080 8081 if (rold->type == NOT_INIT) 8082 /* explored state can't have used this */ 8083 return true; 8084 if (rcur->type == NOT_INIT) 8085 return false; 8086 switch (rold->type) { 8087 case SCALAR_VALUE: 8088 if (rcur->type == SCALAR_VALUE) { 8089 if (!rold->precise && !rcur->precise) 8090 return true; 8091 /* new val must satisfy old val knowledge */ 8092 return range_within(rold, rcur) && 8093 tnum_in(rold->var_off, rcur->var_off); 8094 } else { 8095 /* We're trying to use a pointer in place of a scalar. 8096 * Even if the scalar was unbounded, this could lead to 8097 * pointer leaks because scalars are allowed to leak 8098 * while pointers are not. We could make this safe in 8099 * special cases if root is calling us, but it's 8100 * probably not worth the hassle. 8101 */ 8102 return false; 8103 } 8104 case PTR_TO_MAP_VALUE: 8105 /* If the new min/max/var_off satisfy the old ones and 8106 * everything else matches, we are OK. 8107 * 'id' is not compared, since it's only used for maps with 8108 * bpf_spin_lock inside map element and in such cases if 8109 * the rest of the prog is valid for one map element then 8110 * it's valid for all map elements regardless of the key 8111 * used in bpf_map_lookup() 8112 */ 8113 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 8114 range_within(rold, rcur) && 8115 tnum_in(rold->var_off, rcur->var_off); 8116 case PTR_TO_MAP_VALUE_OR_NULL: 8117 /* a PTR_TO_MAP_VALUE could be safe to use as a 8118 * PTR_TO_MAP_VALUE_OR_NULL into the same map. 8119 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- 8120 * checked, doing so could have affected others with the same 8121 * id, and we can't check for that because we lost the id when 8122 * we converted to a PTR_TO_MAP_VALUE. 8123 */ 8124 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) 8125 return false; 8126 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) 8127 return false; 8128 /* Check our ids match any regs they're supposed to */ 8129 return check_ids(rold->id, rcur->id, idmap); 8130 case PTR_TO_PACKET_META: 8131 case PTR_TO_PACKET: 8132 if (rcur->type != rold->type) 8133 return false; 8134 /* We must have at least as much range as the old ptr 8135 * did, so that any accesses which were safe before are 8136 * still safe. This is true even if old range < old off, 8137 * since someone could have accessed through (ptr - k), or 8138 * even done ptr -= k in a register, to get a safe access. 8139 */ 8140 if (rold->range > rcur->range) 8141 return false; 8142 /* If the offsets don't match, we can't trust our alignment; 8143 * nor can we be sure that we won't fall out of range. 8144 */ 8145 if (rold->off != rcur->off) 8146 return false; 8147 /* id relations must be preserved */ 8148 if (rold->id && !check_ids(rold->id, rcur->id, idmap)) 8149 return false; 8150 /* new val must satisfy old val knowledge */ 8151 return range_within(rold, rcur) && 8152 tnum_in(rold->var_off, rcur->var_off); 8153 case PTR_TO_CTX: 8154 case CONST_PTR_TO_MAP: 8155 case PTR_TO_PACKET_END: 8156 case PTR_TO_FLOW_KEYS: 8157 case PTR_TO_SOCKET: 8158 case PTR_TO_SOCKET_OR_NULL: 8159 case PTR_TO_SOCK_COMMON: 8160 case PTR_TO_SOCK_COMMON_OR_NULL: 8161 case PTR_TO_TCP_SOCK: 8162 case PTR_TO_TCP_SOCK_OR_NULL: 8163 case PTR_TO_XDP_SOCK: 8164 /* Only valid matches are exact, which memcmp() above 8165 * would have accepted 8166 */ 8167 default: 8168 /* Don't know what's going on, just say it's not safe */ 8169 return false; 8170 } 8171 8172 /* Shouldn't get here; if we do, say it's not safe */ 8173 WARN_ON_ONCE(1); 8174 return false; 8175 } 8176 8177 static bool stacksafe(struct bpf_func_state *old, 8178 struct bpf_func_state *cur, 8179 struct idpair *idmap) 8180 { 8181 int i, spi; 8182 8183 /* walk slots of the explored stack and ignore any additional 8184 * slots in the current stack, since explored(safe) state 8185 * didn't use them 8186 */ 8187 for (i = 0; i < old->allocated_stack; i++) { 8188 spi = i / BPF_REG_SIZE; 8189 8190 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) { 8191 i += BPF_REG_SIZE - 1; 8192 /* explored state didn't use this */ 8193 continue; 8194 } 8195 8196 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) 8197 continue; 8198 8199 /* explored stack has more populated slots than current stack 8200 * and these slots were used 8201 */ 8202 if (i >= cur->allocated_stack) 8203 return false; 8204 8205 /* if old state was safe with misc data in the stack 8206 * it will be safe with zero-initialized stack. 8207 * The opposite is not true 8208 */ 8209 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && 8210 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) 8211 continue; 8212 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != 8213 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) 8214 /* Ex: old explored (safe) state has STACK_SPILL in 8215 * this stack slot, but current has has STACK_MISC -> 8216 * this verifier states are not equivalent, 8217 * return false to continue verification of this path 8218 */ 8219 return false; 8220 if (i % BPF_REG_SIZE) 8221 continue; 8222 if (old->stack[spi].slot_type[0] != STACK_SPILL) 8223 continue; 8224 if (!regsafe(&old->stack[spi].spilled_ptr, 8225 &cur->stack[spi].spilled_ptr, 8226 idmap)) 8227 /* when explored and current stack slot are both storing 8228 * spilled registers, check that stored pointers types 8229 * are the same as well. 8230 * Ex: explored safe path could have stored 8231 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} 8232 * but current path has stored: 8233 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} 8234 * such verifier states are not equivalent. 8235 * return false to continue verification of this path 8236 */ 8237 return false; 8238 } 8239 return true; 8240 } 8241 8242 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur) 8243 { 8244 if (old->acquired_refs != cur->acquired_refs) 8245 return false; 8246 return !memcmp(old->refs, cur->refs, 8247 sizeof(*old->refs) * old->acquired_refs); 8248 } 8249 8250 /* compare two verifier states 8251 * 8252 * all states stored in state_list are known to be valid, since 8253 * verifier reached 'bpf_exit' instruction through them 8254 * 8255 * this function is called when verifier exploring different branches of 8256 * execution popped from the state stack. If it sees an old state that has 8257 * more strict register state and more strict stack state then this execution 8258 * branch doesn't need to be explored further, since verifier already 8259 * concluded that more strict state leads to valid finish. 8260 * 8261 * Therefore two states are equivalent if register state is more conservative 8262 * and explored stack state is more conservative than the current one. 8263 * Example: 8264 * explored current 8265 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 8266 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 8267 * 8268 * In other words if current stack state (one being explored) has more 8269 * valid slots than old one that already passed validation, it means 8270 * the verifier can stop exploring and conclude that current state is valid too 8271 * 8272 * Similarly with registers. If explored state has register type as invalid 8273 * whereas register type in current state is meaningful, it means that 8274 * the current state will reach 'bpf_exit' instruction safely 8275 */ 8276 static bool func_states_equal(struct bpf_func_state *old, 8277 struct bpf_func_state *cur) 8278 { 8279 struct idpair *idmap; 8280 bool ret = false; 8281 int i; 8282 8283 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); 8284 /* If we failed to allocate the idmap, just say it's not safe */ 8285 if (!idmap) 8286 return false; 8287 8288 for (i = 0; i < MAX_BPF_REG; i++) { 8289 if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) 8290 goto out_free; 8291 } 8292 8293 if (!stacksafe(old, cur, idmap)) 8294 goto out_free; 8295 8296 if (!refsafe(old, cur)) 8297 goto out_free; 8298 ret = true; 8299 out_free: 8300 kfree(idmap); 8301 return ret; 8302 } 8303 8304 static bool states_equal(struct bpf_verifier_env *env, 8305 struct bpf_verifier_state *old, 8306 struct bpf_verifier_state *cur) 8307 { 8308 int i; 8309 8310 if (old->curframe != cur->curframe) 8311 return false; 8312 8313 /* Verification state from speculative execution simulation 8314 * must never prune a non-speculative execution one. 8315 */ 8316 if (old->speculative && !cur->speculative) 8317 return false; 8318 8319 if (old->active_spin_lock != cur->active_spin_lock) 8320 return false; 8321 8322 /* for states to be equal callsites have to be the same 8323 * and all frame states need to be equivalent 8324 */ 8325 for (i = 0; i <= old->curframe; i++) { 8326 if (old->frame[i]->callsite != cur->frame[i]->callsite) 8327 return false; 8328 if (!func_states_equal(old->frame[i], cur->frame[i])) 8329 return false; 8330 } 8331 return true; 8332 } 8333 8334 /* Return 0 if no propagation happened. Return negative error code if error 8335 * happened. Otherwise, return the propagated bit. 8336 */ 8337 static int propagate_liveness_reg(struct bpf_verifier_env *env, 8338 struct bpf_reg_state *reg, 8339 struct bpf_reg_state *parent_reg) 8340 { 8341 u8 parent_flag = parent_reg->live & REG_LIVE_READ; 8342 u8 flag = reg->live & REG_LIVE_READ; 8343 int err; 8344 8345 /* When comes here, read flags of PARENT_REG or REG could be any of 8346 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need 8347 * of propagation if PARENT_REG has strongest REG_LIVE_READ64. 8348 */ 8349 if (parent_flag == REG_LIVE_READ64 || 8350 /* Or if there is no read flag from REG. */ 8351 !flag || 8352 /* Or if the read flag from REG is the same as PARENT_REG. */ 8353 parent_flag == flag) 8354 return 0; 8355 8356 err = mark_reg_read(env, reg, parent_reg, flag); 8357 if (err) 8358 return err; 8359 8360 return flag; 8361 } 8362 8363 /* A write screens off any subsequent reads; but write marks come from the 8364 * straight-line code between a state and its parent. When we arrive at an 8365 * equivalent state (jump target or such) we didn't arrive by the straight-line 8366 * code, so read marks in the state must propagate to the parent regardless 8367 * of the state's write marks. That's what 'parent == state->parent' comparison 8368 * in mark_reg_read() is for. 8369 */ 8370 static int propagate_liveness(struct bpf_verifier_env *env, 8371 const struct bpf_verifier_state *vstate, 8372 struct bpf_verifier_state *vparent) 8373 { 8374 struct bpf_reg_state *state_reg, *parent_reg; 8375 struct bpf_func_state *state, *parent; 8376 int i, frame, err = 0; 8377 8378 if (vparent->curframe != vstate->curframe) { 8379 WARN(1, "propagate_live: parent frame %d current frame %d\n", 8380 vparent->curframe, vstate->curframe); 8381 return -EFAULT; 8382 } 8383 /* Propagate read liveness of registers... */ 8384 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 8385 for (frame = 0; frame <= vstate->curframe; frame++) { 8386 parent = vparent->frame[frame]; 8387 state = vstate->frame[frame]; 8388 parent_reg = parent->regs; 8389 state_reg = state->regs; 8390 /* We don't need to worry about FP liveness, it's read-only */ 8391 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { 8392 err = propagate_liveness_reg(env, &state_reg[i], 8393 &parent_reg[i]); 8394 if (err < 0) 8395 return err; 8396 if (err == REG_LIVE_READ64) 8397 mark_insn_zext(env, &parent_reg[i]); 8398 } 8399 8400 /* Propagate stack slots. */ 8401 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && 8402 i < parent->allocated_stack / BPF_REG_SIZE; i++) { 8403 parent_reg = &parent->stack[i].spilled_ptr; 8404 state_reg = &state->stack[i].spilled_ptr; 8405 err = propagate_liveness_reg(env, state_reg, 8406 parent_reg); 8407 if (err < 0) 8408 return err; 8409 } 8410 } 8411 return 0; 8412 } 8413 8414 /* find precise scalars in the previous equivalent state and 8415 * propagate them into the current state 8416 */ 8417 static int propagate_precision(struct bpf_verifier_env *env, 8418 const struct bpf_verifier_state *old) 8419 { 8420 struct bpf_reg_state *state_reg; 8421 struct bpf_func_state *state; 8422 int i, err = 0; 8423 8424 state = old->frame[old->curframe]; 8425 state_reg = state->regs; 8426 for (i = 0; i < BPF_REG_FP; i++, state_reg++) { 8427 if (state_reg->type != SCALAR_VALUE || 8428 !state_reg->precise) 8429 continue; 8430 if (env->log.level & BPF_LOG_LEVEL2) 8431 verbose(env, "propagating r%d\n", i); 8432 err = mark_chain_precision(env, i); 8433 if (err < 0) 8434 return err; 8435 } 8436 8437 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 8438 if (state->stack[i].slot_type[0] != STACK_SPILL) 8439 continue; 8440 state_reg = &state->stack[i].spilled_ptr; 8441 if (state_reg->type != SCALAR_VALUE || 8442 !state_reg->precise) 8443 continue; 8444 if (env->log.level & BPF_LOG_LEVEL2) 8445 verbose(env, "propagating fp%d\n", 8446 (-i - 1) * BPF_REG_SIZE); 8447 err = mark_chain_precision_stack(env, i); 8448 if (err < 0) 8449 return err; 8450 } 8451 return 0; 8452 } 8453 8454 static bool states_maybe_looping(struct bpf_verifier_state *old, 8455 struct bpf_verifier_state *cur) 8456 { 8457 struct bpf_func_state *fold, *fcur; 8458 int i, fr = cur->curframe; 8459 8460 if (old->curframe != fr) 8461 return false; 8462 8463 fold = old->frame[fr]; 8464 fcur = cur->frame[fr]; 8465 for (i = 0; i < MAX_BPF_REG; i++) 8466 if (memcmp(&fold->regs[i], &fcur->regs[i], 8467 offsetof(struct bpf_reg_state, parent))) 8468 return false; 8469 return true; 8470 } 8471 8472 8473 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) 8474 { 8475 struct bpf_verifier_state_list *new_sl; 8476 struct bpf_verifier_state_list *sl, **pprev; 8477 struct bpf_verifier_state *cur = env->cur_state, *new; 8478 int i, j, err, states_cnt = 0; 8479 bool add_new_state = env->test_state_freq ? true : false; 8480 8481 cur->last_insn_idx = env->prev_insn_idx; 8482 if (!env->insn_aux_data[insn_idx].prune_point) 8483 /* this 'insn_idx' instruction wasn't marked, so we will not 8484 * be doing state search here 8485 */ 8486 return 0; 8487 8488 /* bpf progs typically have pruning point every 4 instructions 8489 * http://vger.kernel.org/bpfconf2019.html#session-1 8490 * Do not add new state for future pruning if the verifier hasn't seen 8491 * at least 2 jumps and at least 8 instructions. 8492 * This heuristics helps decrease 'total_states' and 'peak_states' metric. 8493 * In tests that amounts to up to 50% reduction into total verifier 8494 * memory consumption and 20% verifier time speedup. 8495 */ 8496 if (env->jmps_processed - env->prev_jmps_processed >= 2 && 8497 env->insn_processed - env->prev_insn_processed >= 8) 8498 add_new_state = true; 8499 8500 pprev = explored_state(env, insn_idx); 8501 sl = *pprev; 8502 8503 clean_live_states(env, insn_idx, cur); 8504 8505 while (sl) { 8506 states_cnt++; 8507 if (sl->state.insn_idx != insn_idx) 8508 goto next; 8509 if (sl->state.branches) { 8510 if (states_maybe_looping(&sl->state, cur) && 8511 states_equal(env, &sl->state, cur)) { 8512 verbose_linfo(env, insn_idx, "; "); 8513 verbose(env, "infinite loop detected at insn %d\n", insn_idx); 8514 return -EINVAL; 8515 } 8516 /* if the verifier is processing a loop, avoid adding new state 8517 * too often, since different loop iterations have distinct 8518 * states and may not help future pruning. 8519 * This threshold shouldn't be too low to make sure that 8520 * a loop with large bound will be rejected quickly. 8521 * The most abusive loop will be: 8522 * r1 += 1 8523 * if r1 < 1000000 goto pc-2 8524 * 1M insn_procssed limit / 100 == 10k peak states. 8525 * This threshold shouldn't be too high either, since states 8526 * at the end of the loop are likely to be useful in pruning. 8527 */ 8528 if (env->jmps_processed - env->prev_jmps_processed < 20 && 8529 env->insn_processed - env->prev_insn_processed < 100) 8530 add_new_state = false; 8531 goto miss; 8532 } 8533 if (states_equal(env, &sl->state, cur)) { 8534 sl->hit_cnt++; 8535 /* reached equivalent register/stack state, 8536 * prune the search. 8537 * Registers read by the continuation are read by us. 8538 * If we have any write marks in env->cur_state, they 8539 * will prevent corresponding reads in the continuation 8540 * from reaching our parent (an explored_state). Our 8541 * own state will get the read marks recorded, but 8542 * they'll be immediately forgotten as we're pruning 8543 * this state and will pop a new one. 8544 */ 8545 err = propagate_liveness(env, &sl->state, cur); 8546 8547 /* if previous state reached the exit with precision and 8548 * current state is equivalent to it (except precsion marks) 8549 * the precision needs to be propagated back in 8550 * the current state. 8551 */ 8552 err = err ? : push_jmp_history(env, cur); 8553 err = err ? : propagate_precision(env, &sl->state); 8554 if (err) 8555 return err; 8556 return 1; 8557 } 8558 miss: 8559 /* when new state is not going to be added do not increase miss count. 8560 * Otherwise several loop iterations will remove the state 8561 * recorded earlier. The goal of these heuristics is to have 8562 * states from some iterations of the loop (some in the beginning 8563 * and some at the end) to help pruning. 8564 */ 8565 if (add_new_state) 8566 sl->miss_cnt++; 8567 /* heuristic to determine whether this state is beneficial 8568 * to keep checking from state equivalence point of view. 8569 * Higher numbers increase max_states_per_insn and verification time, 8570 * but do not meaningfully decrease insn_processed. 8571 */ 8572 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) { 8573 /* the state is unlikely to be useful. Remove it to 8574 * speed up verification 8575 */ 8576 *pprev = sl->next; 8577 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) { 8578 u32 br = sl->state.branches; 8579 8580 WARN_ONCE(br, 8581 "BUG live_done but branches_to_explore %d\n", 8582 br); 8583 free_verifier_state(&sl->state, false); 8584 kfree(sl); 8585 env->peak_states--; 8586 } else { 8587 /* cannot free this state, since parentage chain may 8588 * walk it later. Add it for free_list instead to 8589 * be freed at the end of verification 8590 */ 8591 sl->next = env->free_list; 8592 env->free_list = sl; 8593 } 8594 sl = *pprev; 8595 continue; 8596 } 8597 next: 8598 pprev = &sl->next; 8599 sl = *pprev; 8600 } 8601 8602 if (env->max_states_per_insn < states_cnt) 8603 env->max_states_per_insn = states_cnt; 8604 8605 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) 8606 return push_jmp_history(env, cur); 8607 8608 if (!add_new_state) 8609 return push_jmp_history(env, cur); 8610 8611 /* There were no equivalent states, remember the current one. 8612 * Technically the current state is not proven to be safe yet, 8613 * but it will either reach outer most bpf_exit (which means it's safe) 8614 * or it will be rejected. When there are no loops the verifier won't be 8615 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) 8616 * again on the way to bpf_exit. 8617 * When looping the sl->state.branches will be > 0 and this state 8618 * will not be considered for equivalence until branches == 0. 8619 */ 8620 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); 8621 if (!new_sl) 8622 return -ENOMEM; 8623 env->total_states++; 8624 env->peak_states++; 8625 env->prev_jmps_processed = env->jmps_processed; 8626 env->prev_insn_processed = env->insn_processed; 8627 8628 /* add new state to the head of linked list */ 8629 new = &new_sl->state; 8630 err = copy_verifier_state(new, cur); 8631 if (err) { 8632 free_verifier_state(new, false); 8633 kfree(new_sl); 8634 return err; 8635 } 8636 new->insn_idx = insn_idx; 8637 WARN_ONCE(new->branches != 1, 8638 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); 8639 8640 cur->parent = new; 8641 cur->first_insn_idx = insn_idx; 8642 clear_jmp_history(cur); 8643 new_sl->next = *explored_state(env, insn_idx); 8644 *explored_state(env, insn_idx) = new_sl; 8645 /* connect new state to parentage chain. Current frame needs all 8646 * registers connected. Only r6 - r9 of the callers are alive (pushed 8647 * to the stack implicitly by JITs) so in callers' frames connect just 8648 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to 8649 * the state of the call instruction (with WRITTEN set), and r0 comes 8650 * from callee with its full parentage chain, anyway. 8651 */ 8652 /* clear write marks in current state: the writes we did are not writes 8653 * our child did, so they don't screen off its reads from us. 8654 * (There are no read marks in current state, because reads always mark 8655 * their parent and current state never has children yet. Only 8656 * explored_states can get read marks.) 8657 */ 8658 for (j = 0; j <= cur->curframe; j++) { 8659 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) 8660 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; 8661 for (i = 0; i < BPF_REG_FP; i++) 8662 cur->frame[j]->regs[i].live = REG_LIVE_NONE; 8663 } 8664 8665 /* all stack frames are accessible from callee, clear them all */ 8666 for (j = 0; j <= cur->curframe; j++) { 8667 struct bpf_func_state *frame = cur->frame[j]; 8668 struct bpf_func_state *newframe = new->frame[j]; 8669 8670 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { 8671 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; 8672 frame->stack[i].spilled_ptr.parent = 8673 &newframe->stack[i].spilled_ptr; 8674 } 8675 } 8676 return 0; 8677 } 8678 8679 /* Return true if it's OK to have the same insn return a different type. */ 8680 static bool reg_type_mismatch_ok(enum bpf_reg_type type) 8681 { 8682 switch (type) { 8683 case PTR_TO_CTX: 8684 case PTR_TO_SOCKET: 8685 case PTR_TO_SOCKET_OR_NULL: 8686 case PTR_TO_SOCK_COMMON: 8687 case PTR_TO_SOCK_COMMON_OR_NULL: 8688 case PTR_TO_TCP_SOCK: 8689 case PTR_TO_TCP_SOCK_OR_NULL: 8690 case PTR_TO_XDP_SOCK: 8691 case PTR_TO_BTF_ID: 8692 case PTR_TO_BTF_ID_OR_NULL: 8693 return false; 8694 default: 8695 return true; 8696 } 8697 } 8698 8699 /* If an instruction was previously used with particular pointer types, then we 8700 * need to be careful to avoid cases such as the below, where it may be ok 8701 * for one branch accessing the pointer, but not ok for the other branch: 8702 * 8703 * R1 = sock_ptr 8704 * goto X; 8705 * ... 8706 * R1 = some_other_valid_ptr; 8707 * goto X; 8708 * ... 8709 * R2 = *(u32 *)(R1 + 0); 8710 */ 8711 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) 8712 { 8713 return src != prev && (!reg_type_mismatch_ok(src) || 8714 !reg_type_mismatch_ok(prev)); 8715 } 8716 8717 static int do_check(struct bpf_verifier_env *env) 8718 { 8719 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); 8720 struct bpf_verifier_state *state = env->cur_state; 8721 struct bpf_insn *insns = env->prog->insnsi; 8722 struct bpf_reg_state *regs; 8723 int insn_cnt = env->prog->len; 8724 bool do_print_state = false; 8725 int prev_insn_idx = -1; 8726 8727 for (;;) { 8728 struct bpf_insn *insn; 8729 u8 class; 8730 int err; 8731 8732 env->prev_insn_idx = prev_insn_idx; 8733 if (env->insn_idx >= insn_cnt) { 8734 verbose(env, "invalid insn idx %d insn_cnt %d\n", 8735 env->insn_idx, insn_cnt); 8736 return -EFAULT; 8737 } 8738 8739 insn = &insns[env->insn_idx]; 8740 class = BPF_CLASS(insn->code); 8741 8742 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 8743 verbose(env, 8744 "BPF program is too large. Processed %d insn\n", 8745 env->insn_processed); 8746 return -E2BIG; 8747 } 8748 8749 err = is_state_visited(env, env->insn_idx); 8750 if (err < 0) 8751 return err; 8752 if (err == 1) { 8753 /* found equivalent state, can prune the search */ 8754 if (env->log.level & BPF_LOG_LEVEL) { 8755 if (do_print_state) 8756 verbose(env, "\nfrom %d to %d%s: safe\n", 8757 env->prev_insn_idx, env->insn_idx, 8758 env->cur_state->speculative ? 8759 " (speculative execution)" : ""); 8760 else 8761 verbose(env, "%d: safe\n", env->insn_idx); 8762 } 8763 goto process_bpf_exit; 8764 } 8765 8766 if (signal_pending(current)) 8767 return -EAGAIN; 8768 8769 if (need_resched()) 8770 cond_resched(); 8771 8772 if (env->log.level & BPF_LOG_LEVEL2 || 8773 (env->log.level & BPF_LOG_LEVEL && do_print_state)) { 8774 if (env->log.level & BPF_LOG_LEVEL2) 8775 verbose(env, "%d:", env->insn_idx); 8776 else 8777 verbose(env, "\nfrom %d to %d%s:", 8778 env->prev_insn_idx, env->insn_idx, 8779 env->cur_state->speculative ? 8780 " (speculative execution)" : ""); 8781 print_verifier_state(env, state->frame[state->curframe]); 8782 do_print_state = false; 8783 } 8784 8785 if (env->log.level & BPF_LOG_LEVEL) { 8786 const struct bpf_insn_cbs cbs = { 8787 .cb_print = verbose, 8788 .private_data = env, 8789 }; 8790 8791 verbose_linfo(env, env->insn_idx, "; "); 8792 verbose(env, "%d: ", env->insn_idx); 8793 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 8794 } 8795 8796 if (bpf_prog_is_dev_bound(env->prog->aux)) { 8797 err = bpf_prog_offload_verify_insn(env, env->insn_idx, 8798 env->prev_insn_idx); 8799 if (err) 8800 return err; 8801 } 8802 8803 regs = cur_regs(env); 8804 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; 8805 prev_insn_idx = env->insn_idx; 8806 8807 if (class == BPF_ALU || class == BPF_ALU64) { 8808 err = check_alu_op(env, insn); 8809 if (err) 8810 return err; 8811 8812 } else if (class == BPF_LDX) { 8813 enum bpf_reg_type *prev_src_type, src_reg_type; 8814 8815 /* check for reserved fields is already done */ 8816 8817 /* check src operand */ 8818 err = check_reg_arg(env, insn->src_reg, SRC_OP); 8819 if (err) 8820 return err; 8821 8822 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 8823 if (err) 8824 return err; 8825 8826 src_reg_type = regs[insn->src_reg].type; 8827 8828 /* check that memory (src_reg + off) is readable, 8829 * the state of dst_reg will be updated by this func 8830 */ 8831 err = check_mem_access(env, env->insn_idx, insn->src_reg, 8832 insn->off, BPF_SIZE(insn->code), 8833 BPF_READ, insn->dst_reg, false); 8834 if (err) 8835 return err; 8836 8837 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type; 8838 8839 if (*prev_src_type == NOT_INIT) { 8840 /* saw a valid insn 8841 * dst_reg = *(u32 *)(src_reg + off) 8842 * save type to validate intersecting paths 8843 */ 8844 *prev_src_type = src_reg_type; 8845 8846 } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) { 8847 /* ABuser program is trying to use the same insn 8848 * dst_reg = *(u32*) (src_reg + off) 8849 * with different pointer types: 8850 * src_reg == ctx in one branch and 8851 * src_reg == stack|map in some other branch. 8852 * Reject it. 8853 */ 8854 verbose(env, "same insn cannot be used with different pointers\n"); 8855 return -EINVAL; 8856 } 8857 8858 } else if (class == BPF_STX) { 8859 enum bpf_reg_type *prev_dst_type, dst_reg_type; 8860 8861 if (BPF_MODE(insn->code) == BPF_XADD) { 8862 err = check_xadd(env, env->insn_idx, insn); 8863 if (err) 8864 return err; 8865 env->insn_idx++; 8866 continue; 8867 } 8868 8869 /* check src1 operand */ 8870 err = check_reg_arg(env, insn->src_reg, SRC_OP); 8871 if (err) 8872 return err; 8873 /* check src2 operand */ 8874 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 8875 if (err) 8876 return err; 8877 8878 dst_reg_type = regs[insn->dst_reg].type; 8879 8880 /* check that memory (dst_reg + off) is writeable */ 8881 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 8882 insn->off, BPF_SIZE(insn->code), 8883 BPF_WRITE, insn->src_reg, false); 8884 if (err) 8885 return err; 8886 8887 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type; 8888 8889 if (*prev_dst_type == NOT_INIT) { 8890 *prev_dst_type = dst_reg_type; 8891 } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) { 8892 verbose(env, "same insn cannot be used with different pointers\n"); 8893 return -EINVAL; 8894 } 8895 8896 } else if (class == BPF_ST) { 8897 if (BPF_MODE(insn->code) != BPF_MEM || 8898 insn->src_reg != BPF_REG_0) { 8899 verbose(env, "BPF_ST uses reserved fields\n"); 8900 return -EINVAL; 8901 } 8902 /* check src operand */ 8903 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 8904 if (err) 8905 return err; 8906 8907 if (is_ctx_reg(env, insn->dst_reg)) { 8908 verbose(env, "BPF_ST stores into R%d %s is not allowed\n", 8909 insn->dst_reg, 8910 reg_type_str[reg_state(env, insn->dst_reg)->type]); 8911 return -EACCES; 8912 } 8913 8914 /* check that memory (dst_reg + off) is writeable */ 8915 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 8916 insn->off, BPF_SIZE(insn->code), 8917 BPF_WRITE, -1, false); 8918 if (err) 8919 return err; 8920 8921 } else if (class == BPF_JMP || class == BPF_JMP32) { 8922 u8 opcode = BPF_OP(insn->code); 8923 8924 env->jmps_processed++; 8925 if (opcode == BPF_CALL) { 8926 if (BPF_SRC(insn->code) != BPF_K || 8927 insn->off != 0 || 8928 (insn->src_reg != BPF_REG_0 && 8929 insn->src_reg != BPF_PSEUDO_CALL) || 8930 insn->dst_reg != BPF_REG_0 || 8931 class == BPF_JMP32) { 8932 verbose(env, "BPF_CALL uses reserved fields\n"); 8933 return -EINVAL; 8934 } 8935 8936 if (env->cur_state->active_spin_lock && 8937 (insn->src_reg == BPF_PSEUDO_CALL || 8938 insn->imm != BPF_FUNC_spin_unlock)) { 8939 verbose(env, "function calls are not allowed while holding a lock\n"); 8940 return -EINVAL; 8941 } 8942 if (insn->src_reg == BPF_PSEUDO_CALL) 8943 err = check_func_call(env, insn, &env->insn_idx); 8944 else 8945 err = check_helper_call(env, insn->imm, env->insn_idx); 8946 if (err) 8947 return err; 8948 8949 } else if (opcode == BPF_JA) { 8950 if (BPF_SRC(insn->code) != BPF_K || 8951 insn->imm != 0 || 8952 insn->src_reg != BPF_REG_0 || 8953 insn->dst_reg != BPF_REG_0 || 8954 class == BPF_JMP32) { 8955 verbose(env, "BPF_JA uses reserved fields\n"); 8956 return -EINVAL; 8957 } 8958 8959 env->insn_idx += insn->off + 1; 8960 continue; 8961 8962 } else if (opcode == BPF_EXIT) { 8963 if (BPF_SRC(insn->code) != BPF_K || 8964 insn->imm != 0 || 8965 insn->src_reg != BPF_REG_0 || 8966 insn->dst_reg != BPF_REG_0 || 8967 class == BPF_JMP32) { 8968 verbose(env, "BPF_EXIT uses reserved fields\n"); 8969 return -EINVAL; 8970 } 8971 8972 if (env->cur_state->active_spin_lock) { 8973 verbose(env, "bpf_spin_unlock is missing\n"); 8974 return -EINVAL; 8975 } 8976 8977 if (state->curframe) { 8978 /* exit from nested function */ 8979 err = prepare_func_exit(env, &env->insn_idx); 8980 if (err) 8981 return err; 8982 do_print_state = true; 8983 continue; 8984 } 8985 8986 err = check_reference_leak(env); 8987 if (err) 8988 return err; 8989 8990 err = check_return_code(env); 8991 if (err) 8992 return err; 8993 process_bpf_exit: 8994 update_branch_counts(env, env->cur_state); 8995 err = pop_stack(env, &prev_insn_idx, 8996 &env->insn_idx, pop_log); 8997 if (err < 0) { 8998 if (err != -ENOENT) 8999 return err; 9000 break; 9001 } else { 9002 do_print_state = true; 9003 continue; 9004 } 9005 } else { 9006 err = check_cond_jmp_op(env, insn, &env->insn_idx); 9007 if (err) 9008 return err; 9009 } 9010 } else if (class == BPF_LD) { 9011 u8 mode = BPF_MODE(insn->code); 9012 9013 if (mode == BPF_ABS || mode == BPF_IND) { 9014 err = check_ld_abs(env, insn); 9015 if (err) 9016 return err; 9017 9018 } else if (mode == BPF_IMM) { 9019 err = check_ld_imm(env, insn); 9020 if (err) 9021 return err; 9022 9023 env->insn_idx++; 9024 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; 9025 } else { 9026 verbose(env, "invalid BPF_LD mode\n"); 9027 return -EINVAL; 9028 } 9029 } else { 9030 verbose(env, "unknown insn class %d\n", class); 9031 return -EINVAL; 9032 } 9033 9034 env->insn_idx++; 9035 } 9036 9037 return 0; 9038 } 9039 9040 static int check_map_prealloc(struct bpf_map *map) 9041 { 9042 return (map->map_type != BPF_MAP_TYPE_HASH && 9043 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 9044 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || 9045 !(map->map_flags & BPF_F_NO_PREALLOC); 9046 } 9047 9048 static bool is_tracing_prog_type(enum bpf_prog_type type) 9049 { 9050 switch (type) { 9051 case BPF_PROG_TYPE_KPROBE: 9052 case BPF_PROG_TYPE_TRACEPOINT: 9053 case BPF_PROG_TYPE_PERF_EVENT: 9054 case BPF_PROG_TYPE_RAW_TRACEPOINT: 9055 return true; 9056 default: 9057 return false; 9058 } 9059 } 9060 9061 static bool is_preallocated_map(struct bpf_map *map) 9062 { 9063 if (!check_map_prealloc(map)) 9064 return false; 9065 if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta)) 9066 return false; 9067 return true; 9068 } 9069 9070 static int check_map_prog_compatibility(struct bpf_verifier_env *env, 9071 struct bpf_map *map, 9072 struct bpf_prog *prog) 9073 9074 { 9075 /* 9076 * Validate that trace type programs use preallocated hash maps. 9077 * 9078 * For programs attached to PERF events this is mandatory as the 9079 * perf NMI can hit any arbitrary code sequence. 9080 * 9081 * All other trace types using preallocated hash maps are unsafe as 9082 * well because tracepoint or kprobes can be inside locked regions 9083 * of the memory allocator or at a place where a recursion into the 9084 * memory allocator would see inconsistent state. 9085 * 9086 * On RT enabled kernels run-time allocation of all trace type 9087 * programs is strictly prohibited due to lock type constraints. On 9088 * !RT kernels it is allowed for backwards compatibility reasons for 9089 * now, but warnings are emitted so developers are made aware of 9090 * the unsafety and can fix their programs before this is enforced. 9091 */ 9092 if (is_tracing_prog_type(prog->type) && !is_preallocated_map(map)) { 9093 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { 9094 verbose(env, "perf_event programs can only use preallocated hash map\n"); 9095 return -EINVAL; 9096 } 9097 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 9098 verbose(env, "trace type programs can only use preallocated hash map\n"); 9099 return -EINVAL; 9100 } 9101 WARN_ONCE(1, "trace type BPF program uses run-time allocation\n"); 9102 verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n"); 9103 } 9104 9105 if ((is_tracing_prog_type(prog->type) || 9106 prog->type == BPF_PROG_TYPE_SOCKET_FILTER) && 9107 map_value_has_spin_lock(map)) { 9108 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); 9109 return -EINVAL; 9110 } 9111 9112 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && 9113 !bpf_offload_prog_map_match(prog, map)) { 9114 verbose(env, "offload device mismatch between prog and map\n"); 9115 return -EINVAL; 9116 } 9117 9118 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 9119 verbose(env, "bpf_struct_ops map cannot be used in prog\n"); 9120 return -EINVAL; 9121 } 9122 9123 return 0; 9124 } 9125 9126 static bool bpf_map_is_cgroup_storage(struct bpf_map *map) 9127 { 9128 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || 9129 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); 9130 } 9131 9132 /* look for pseudo eBPF instructions that access map FDs and 9133 * replace them with actual map pointers 9134 */ 9135 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) 9136 { 9137 struct bpf_insn *insn = env->prog->insnsi; 9138 int insn_cnt = env->prog->len; 9139 int i, j, err; 9140 9141 err = bpf_prog_calc_tag(env->prog); 9142 if (err) 9143 return err; 9144 9145 for (i = 0; i < insn_cnt; i++, insn++) { 9146 if (BPF_CLASS(insn->code) == BPF_LDX && 9147 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 9148 verbose(env, "BPF_LDX uses reserved fields\n"); 9149 return -EINVAL; 9150 } 9151 9152 if (BPF_CLASS(insn->code) == BPF_STX && 9153 ((BPF_MODE(insn->code) != BPF_MEM && 9154 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { 9155 verbose(env, "BPF_STX uses reserved fields\n"); 9156 return -EINVAL; 9157 } 9158 9159 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 9160 struct bpf_insn_aux_data *aux; 9161 struct bpf_map *map; 9162 struct fd f; 9163 u64 addr; 9164 9165 if (i == insn_cnt - 1 || insn[1].code != 0 || 9166 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 9167 insn[1].off != 0) { 9168 verbose(env, "invalid bpf_ld_imm64 insn\n"); 9169 return -EINVAL; 9170 } 9171 9172 if (insn[0].src_reg == 0) 9173 /* valid generic load 64-bit imm */ 9174 goto next_insn; 9175 9176 /* In final convert_pseudo_ld_imm64() step, this is 9177 * converted into regular 64-bit imm load insn. 9178 */ 9179 if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD && 9180 insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) || 9181 (insn[0].src_reg == BPF_PSEUDO_MAP_FD && 9182 insn[1].imm != 0)) { 9183 verbose(env, 9184 "unrecognized bpf_ld_imm64 insn\n"); 9185 return -EINVAL; 9186 } 9187 9188 f = fdget(insn[0].imm); 9189 map = __bpf_map_get(f); 9190 if (IS_ERR(map)) { 9191 verbose(env, "fd %d is not pointing to valid bpf_map\n", 9192 insn[0].imm); 9193 return PTR_ERR(map); 9194 } 9195 9196 err = check_map_prog_compatibility(env, map, env->prog); 9197 if (err) { 9198 fdput(f); 9199 return err; 9200 } 9201 9202 aux = &env->insn_aux_data[i]; 9203 if (insn->src_reg == BPF_PSEUDO_MAP_FD) { 9204 addr = (unsigned long)map; 9205 } else { 9206 u32 off = insn[1].imm; 9207 9208 if (off >= BPF_MAX_VAR_OFF) { 9209 verbose(env, "direct value offset of %u is not allowed\n", off); 9210 fdput(f); 9211 return -EINVAL; 9212 } 9213 9214 if (!map->ops->map_direct_value_addr) { 9215 verbose(env, "no direct value access support for this map type\n"); 9216 fdput(f); 9217 return -EINVAL; 9218 } 9219 9220 err = map->ops->map_direct_value_addr(map, &addr, off); 9221 if (err) { 9222 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n", 9223 map->value_size, off); 9224 fdput(f); 9225 return err; 9226 } 9227 9228 aux->map_off = off; 9229 addr += off; 9230 } 9231 9232 insn[0].imm = (u32)addr; 9233 insn[1].imm = addr >> 32; 9234 9235 /* check whether we recorded this map already */ 9236 for (j = 0; j < env->used_map_cnt; j++) { 9237 if (env->used_maps[j] == map) { 9238 aux->map_index = j; 9239 fdput(f); 9240 goto next_insn; 9241 } 9242 } 9243 9244 if (env->used_map_cnt >= MAX_USED_MAPS) { 9245 fdput(f); 9246 return -E2BIG; 9247 } 9248 9249 /* hold the map. If the program is rejected by verifier, 9250 * the map will be released by release_maps() or it 9251 * will be used by the valid program until it's unloaded 9252 * and all maps are released in free_used_maps() 9253 */ 9254 bpf_map_inc(map); 9255 9256 aux->map_index = env->used_map_cnt; 9257 env->used_maps[env->used_map_cnt++] = map; 9258 9259 if (bpf_map_is_cgroup_storage(map) && 9260 bpf_cgroup_storage_assign(env->prog->aux, map)) { 9261 verbose(env, "only one cgroup storage of each type is allowed\n"); 9262 fdput(f); 9263 return -EBUSY; 9264 } 9265 9266 fdput(f); 9267 next_insn: 9268 insn++; 9269 i++; 9270 continue; 9271 } 9272 9273 /* Basic sanity check before we invest more work here. */ 9274 if (!bpf_opcode_in_insntable(insn->code)) { 9275 verbose(env, "unknown opcode %02x\n", insn->code); 9276 return -EINVAL; 9277 } 9278 } 9279 9280 /* now all pseudo BPF_LD_IMM64 instructions load valid 9281 * 'struct bpf_map *' into a register instead of user map_fd. 9282 * These pointers will be used later by verifier to validate map access. 9283 */ 9284 return 0; 9285 } 9286 9287 /* drop refcnt of maps used by the rejected program */ 9288 static void release_maps(struct bpf_verifier_env *env) 9289 { 9290 __bpf_free_used_maps(env->prog->aux, env->used_maps, 9291 env->used_map_cnt); 9292 } 9293 9294 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 9295 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) 9296 { 9297 struct bpf_insn *insn = env->prog->insnsi; 9298 int insn_cnt = env->prog->len; 9299 int i; 9300 9301 for (i = 0; i < insn_cnt; i++, insn++) 9302 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) 9303 insn->src_reg = 0; 9304 } 9305 9306 /* single env->prog->insni[off] instruction was replaced with the range 9307 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying 9308 * [0, off) and [off, end) to new locations, so the patched range stays zero 9309 */ 9310 static int adjust_insn_aux_data(struct bpf_verifier_env *env, 9311 struct bpf_prog *new_prog, u32 off, u32 cnt) 9312 { 9313 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; 9314 struct bpf_insn *insn = new_prog->insnsi; 9315 u32 prog_len; 9316 int i; 9317 9318 /* aux info at OFF always needs adjustment, no matter fast path 9319 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the 9320 * original insn at old prog. 9321 */ 9322 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); 9323 9324 if (cnt == 1) 9325 return 0; 9326 prog_len = new_prog->len; 9327 new_data = vzalloc(array_size(prog_len, 9328 sizeof(struct bpf_insn_aux_data))); 9329 if (!new_data) 9330 return -ENOMEM; 9331 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 9332 memcpy(new_data + off + cnt - 1, old_data + off, 9333 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 9334 for (i = off; i < off + cnt - 1; i++) { 9335 new_data[i].seen = env->pass_cnt; 9336 new_data[i].zext_dst = insn_has_def32(env, insn + i); 9337 } 9338 env->insn_aux_data = new_data; 9339 vfree(old_data); 9340 return 0; 9341 } 9342 9343 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) 9344 { 9345 int i; 9346 9347 if (len == 1) 9348 return; 9349 /* NOTE: fake 'exit' subprog should be updated as well. */ 9350 for (i = 0; i <= env->subprog_cnt; i++) { 9351 if (env->subprog_info[i].start <= off) 9352 continue; 9353 env->subprog_info[i].start += len - 1; 9354 } 9355 } 9356 9357 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 9358 const struct bpf_insn *patch, u32 len) 9359 { 9360 struct bpf_prog *new_prog; 9361 9362 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); 9363 if (IS_ERR(new_prog)) { 9364 if (PTR_ERR(new_prog) == -ERANGE) 9365 verbose(env, 9366 "insn %d cannot be patched due to 16-bit range\n", 9367 env->insn_aux_data[off].orig_idx); 9368 return NULL; 9369 } 9370 if (adjust_insn_aux_data(env, new_prog, off, len)) 9371 return NULL; 9372 adjust_subprog_starts(env, off, len); 9373 return new_prog; 9374 } 9375 9376 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env, 9377 u32 off, u32 cnt) 9378 { 9379 int i, j; 9380 9381 /* find first prog starting at or after off (first to remove) */ 9382 for (i = 0; i < env->subprog_cnt; i++) 9383 if (env->subprog_info[i].start >= off) 9384 break; 9385 /* find first prog starting at or after off + cnt (first to stay) */ 9386 for (j = i; j < env->subprog_cnt; j++) 9387 if (env->subprog_info[j].start >= off + cnt) 9388 break; 9389 /* if j doesn't start exactly at off + cnt, we are just removing 9390 * the front of previous prog 9391 */ 9392 if (env->subprog_info[j].start != off + cnt) 9393 j--; 9394 9395 if (j > i) { 9396 struct bpf_prog_aux *aux = env->prog->aux; 9397 int move; 9398 9399 /* move fake 'exit' subprog as well */ 9400 move = env->subprog_cnt + 1 - j; 9401 9402 memmove(env->subprog_info + i, 9403 env->subprog_info + j, 9404 sizeof(*env->subprog_info) * move); 9405 env->subprog_cnt -= j - i; 9406 9407 /* remove func_info */ 9408 if (aux->func_info) { 9409 move = aux->func_info_cnt - j; 9410 9411 memmove(aux->func_info + i, 9412 aux->func_info + j, 9413 sizeof(*aux->func_info) * move); 9414 aux->func_info_cnt -= j - i; 9415 /* func_info->insn_off is set after all code rewrites, 9416 * in adjust_btf_func() - no need to adjust 9417 */ 9418 } 9419 } else { 9420 /* convert i from "first prog to remove" to "first to adjust" */ 9421 if (env->subprog_info[i].start == off) 9422 i++; 9423 } 9424 9425 /* update fake 'exit' subprog as well */ 9426 for (; i <= env->subprog_cnt; i++) 9427 env->subprog_info[i].start -= cnt; 9428 9429 return 0; 9430 } 9431 9432 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off, 9433 u32 cnt) 9434 { 9435 struct bpf_prog *prog = env->prog; 9436 u32 i, l_off, l_cnt, nr_linfo; 9437 struct bpf_line_info *linfo; 9438 9439 nr_linfo = prog->aux->nr_linfo; 9440 if (!nr_linfo) 9441 return 0; 9442 9443 linfo = prog->aux->linfo; 9444 9445 /* find first line info to remove, count lines to be removed */ 9446 for (i = 0; i < nr_linfo; i++) 9447 if (linfo[i].insn_off >= off) 9448 break; 9449 9450 l_off = i; 9451 l_cnt = 0; 9452 for (; i < nr_linfo; i++) 9453 if (linfo[i].insn_off < off + cnt) 9454 l_cnt++; 9455 else 9456 break; 9457 9458 /* First live insn doesn't match first live linfo, it needs to "inherit" 9459 * last removed linfo. prog is already modified, so prog->len == off 9460 * means no live instructions after (tail of the program was removed). 9461 */ 9462 if (prog->len != off && l_cnt && 9463 (i == nr_linfo || linfo[i].insn_off != off + cnt)) { 9464 l_cnt--; 9465 linfo[--i].insn_off = off + cnt; 9466 } 9467 9468 /* remove the line info which refer to the removed instructions */ 9469 if (l_cnt) { 9470 memmove(linfo + l_off, linfo + i, 9471 sizeof(*linfo) * (nr_linfo - i)); 9472 9473 prog->aux->nr_linfo -= l_cnt; 9474 nr_linfo = prog->aux->nr_linfo; 9475 } 9476 9477 /* pull all linfo[i].insn_off >= off + cnt in by cnt */ 9478 for (i = l_off; i < nr_linfo; i++) 9479 linfo[i].insn_off -= cnt; 9480 9481 /* fix up all subprogs (incl. 'exit') which start >= off */ 9482 for (i = 0; i <= env->subprog_cnt; i++) 9483 if (env->subprog_info[i].linfo_idx > l_off) { 9484 /* program may have started in the removed region but 9485 * may not be fully removed 9486 */ 9487 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) 9488 env->subprog_info[i].linfo_idx -= l_cnt; 9489 else 9490 env->subprog_info[i].linfo_idx = l_off; 9491 } 9492 9493 return 0; 9494 } 9495 9496 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) 9497 { 9498 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 9499 unsigned int orig_prog_len = env->prog->len; 9500 int err; 9501 9502 if (bpf_prog_is_dev_bound(env->prog->aux)) 9503 bpf_prog_offload_remove_insns(env, off, cnt); 9504 9505 err = bpf_remove_insns(env->prog, off, cnt); 9506 if (err) 9507 return err; 9508 9509 err = adjust_subprog_starts_after_remove(env, off, cnt); 9510 if (err) 9511 return err; 9512 9513 err = bpf_adj_linfo_after_remove(env, off, cnt); 9514 if (err) 9515 return err; 9516 9517 memmove(aux_data + off, aux_data + off + cnt, 9518 sizeof(*aux_data) * (orig_prog_len - off - cnt)); 9519 9520 return 0; 9521 } 9522 9523 /* The verifier does more data flow analysis than llvm and will not 9524 * explore branches that are dead at run time. Malicious programs can 9525 * have dead code too. Therefore replace all dead at-run-time code 9526 * with 'ja -1'. 9527 * 9528 * Just nops are not optimal, e.g. if they would sit at the end of the 9529 * program and through another bug we would manage to jump there, then 9530 * we'd execute beyond program memory otherwise. Returning exception 9531 * code also wouldn't work since we can have subprogs where the dead 9532 * code could be located. 9533 */ 9534 static void sanitize_dead_code(struct bpf_verifier_env *env) 9535 { 9536 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 9537 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); 9538 struct bpf_insn *insn = env->prog->insnsi; 9539 const int insn_cnt = env->prog->len; 9540 int i; 9541 9542 for (i = 0; i < insn_cnt; i++) { 9543 if (aux_data[i].seen) 9544 continue; 9545 memcpy(insn + i, &trap, sizeof(trap)); 9546 } 9547 } 9548 9549 static bool insn_is_cond_jump(u8 code) 9550 { 9551 u8 op; 9552 9553 if (BPF_CLASS(code) == BPF_JMP32) 9554 return true; 9555 9556 if (BPF_CLASS(code) != BPF_JMP) 9557 return false; 9558 9559 op = BPF_OP(code); 9560 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; 9561 } 9562 9563 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) 9564 { 9565 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 9566 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 9567 struct bpf_insn *insn = env->prog->insnsi; 9568 const int insn_cnt = env->prog->len; 9569 int i; 9570 9571 for (i = 0; i < insn_cnt; i++, insn++) { 9572 if (!insn_is_cond_jump(insn->code)) 9573 continue; 9574 9575 if (!aux_data[i + 1].seen) 9576 ja.off = insn->off; 9577 else if (!aux_data[i + 1 + insn->off].seen) 9578 ja.off = 0; 9579 else 9580 continue; 9581 9582 if (bpf_prog_is_dev_bound(env->prog->aux)) 9583 bpf_prog_offload_replace_insn(env, i, &ja); 9584 9585 memcpy(insn, &ja, sizeof(ja)); 9586 } 9587 } 9588 9589 static int opt_remove_dead_code(struct bpf_verifier_env *env) 9590 { 9591 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 9592 int insn_cnt = env->prog->len; 9593 int i, err; 9594 9595 for (i = 0; i < insn_cnt; i++) { 9596 int j; 9597 9598 j = 0; 9599 while (i + j < insn_cnt && !aux_data[i + j].seen) 9600 j++; 9601 if (!j) 9602 continue; 9603 9604 err = verifier_remove_insns(env, i, j); 9605 if (err) 9606 return err; 9607 insn_cnt = env->prog->len; 9608 } 9609 9610 return 0; 9611 } 9612 9613 static int opt_remove_nops(struct bpf_verifier_env *env) 9614 { 9615 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 9616 struct bpf_insn *insn = env->prog->insnsi; 9617 int insn_cnt = env->prog->len; 9618 int i, err; 9619 9620 for (i = 0; i < insn_cnt; i++) { 9621 if (memcmp(&insn[i], &ja, sizeof(ja))) 9622 continue; 9623 9624 err = verifier_remove_insns(env, i, 1); 9625 if (err) 9626 return err; 9627 insn_cnt--; 9628 i--; 9629 } 9630 9631 return 0; 9632 } 9633 9634 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, 9635 const union bpf_attr *attr) 9636 { 9637 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4]; 9638 struct bpf_insn_aux_data *aux = env->insn_aux_data; 9639 int i, patch_len, delta = 0, len = env->prog->len; 9640 struct bpf_insn *insns = env->prog->insnsi; 9641 struct bpf_prog *new_prog; 9642 bool rnd_hi32; 9643 9644 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; 9645 zext_patch[1] = BPF_ZEXT_REG(0); 9646 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0); 9647 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 9648 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX); 9649 for (i = 0; i < len; i++) { 9650 int adj_idx = i + delta; 9651 struct bpf_insn insn; 9652 9653 insn = insns[adj_idx]; 9654 if (!aux[adj_idx].zext_dst) { 9655 u8 code, class; 9656 u32 imm_rnd; 9657 9658 if (!rnd_hi32) 9659 continue; 9660 9661 code = insn.code; 9662 class = BPF_CLASS(code); 9663 if (insn_no_def(&insn)) 9664 continue; 9665 9666 /* NOTE: arg "reg" (the fourth one) is only used for 9667 * BPF_STX which has been ruled out in above 9668 * check, it is safe to pass NULL here. 9669 */ 9670 if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) { 9671 if (class == BPF_LD && 9672 BPF_MODE(code) == BPF_IMM) 9673 i++; 9674 continue; 9675 } 9676 9677 /* ctx load could be transformed into wider load. */ 9678 if (class == BPF_LDX && 9679 aux[adj_idx].ptr_type == PTR_TO_CTX) 9680 continue; 9681 9682 imm_rnd = get_random_int(); 9683 rnd_hi32_patch[0] = insn; 9684 rnd_hi32_patch[1].imm = imm_rnd; 9685 rnd_hi32_patch[3].dst_reg = insn.dst_reg; 9686 patch = rnd_hi32_patch; 9687 patch_len = 4; 9688 goto apply_patch_buffer; 9689 } 9690 9691 if (!bpf_jit_needs_zext()) 9692 continue; 9693 9694 zext_patch[0] = insn; 9695 zext_patch[1].dst_reg = insn.dst_reg; 9696 zext_patch[1].src_reg = insn.dst_reg; 9697 patch = zext_patch; 9698 patch_len = 2; 9699 apply_patch_buffer: 9700 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len); 9701 if (!new_prog) 9702 return -ENOMEM; 9703 env->prog = new_prog; 9704 insns = new_prog->insnsi; 9705 aux = env->insn_aux_data; 9706 delta += patch_len - 1; 9707 } 9708 9709 return 0; 9710 } 9711 9712 /* convert load instructions that access fields of a context type into a 9713 * sequence of instructions that access fields of the underlying structure: 9714 * struct __sk_buff -> struct sk_buff 9715 * struct bpf_sock_ops -> struct sock 9716 */ 9717 static int convert_ctx_accesses(struct bpf_verifier_env *env) 9718 { 9719 const struct bpf_verifier_ops *ops = env->ops; 9720 int i, cnt, size, ctx_field_size, delta = 0; 9721 const int insn_cnt = env->prog->len; 9722 struct bpf_insn insn_buf[16], *insn; 9723 u32 target_size, size_default, off; 9724 struct bpf_prog *new_prog; 9725 enum bpf_access_type type; 9726 bool is_narrower_load; 9727 9728 if (ops->gen_prologue || env->seen_direct_write) { 9729 if (!ops->gen_prologue) { 9730 verbose(env, "bpf verifier is misconfigured\n"); 9731 return -EINVAL; 9732 } 9733 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 9734 env->prog); 9735 if (cnt >= ARRAY_SIZE(insn_buf)) { 9736 verbose(env, "bpf verifier is misconfigured\n"); 9737 return -EINVAL; 9738 } else if (cnt) { 9739 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 9740 if (!new_prog) 9741 return -ENOMEM; 9742 9743 env->prog = new_prog; 9744 delta += cnt - 1; 9745 } 9746 } 9747 9748 if (bpf_prog_is_dev_bound(env->prog->aux)) 9749 return 0; 9750 9751 insn = env->prog->insnsi + delta; 9752 9753 for (i = 0; i < insn_cnt; i++, insn++) { 9754 bpf_convert_ctx_access_t convert_ctx_access; 9755 9756 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || 9757 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || 9758 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 9759 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) 9760 type = BPF_READ; 9761 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || 9762 insn->code == (BPF_STX | BPF_MEM | BPF_H) || 9763 insn->code == (BPF_STX | BPF_MEM | BPF_W) || 9764 insn->code == (BPF_STX | BPF_MEM | BPF_DW)) 9765 type = BPF_WRITE; 9766 else 9767 continue; 9768 9769 if (type == BPF_WRITE && 9770 env->insn_aux_data[i + delta].sanitize_stack_off) { 9771 struct bpf_insn patch[] = { 9772 /* Sanitize suspicious stack slot with zero. 9773 * There are no memory dependencies for this store, 9774 * since it's only using frame pointer and immediate 9775 * constant of zero 9776 */ 9777 BPF_ST_MEM(BPF_DW, BPF_REG_FP, 9778 env->insn_aux_data[i + delta].sanitize_stack_off, 9779 0), 9780 /* the original STX instruction will immediately 9781 * overwrite the same stack slot with appropriate value 9782 */ 9783 *insn, 9784 }; 9785 9786 cnt = ARRAY_SIZE(patch); 9787 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); 9788 if (!new_prog) 9789 return -ENOMEM; 9790 9791 delta += cnt - 1; 9792 env->prog = new_prog; 9793 insn = new_prog->insnsi + i + delta; 9794 continue; 9795 } 9796 9797 switch (env->insn_aux_data[i + delta].ptr_type) { 9798 case PTR_TO_CTX: 9799 if (!ops->convert_ctx_access) 9800 continue; 9801 convert_ctx_access = ops->convert_ctx_access; 9802 break; 9803 case PTR_TO_SOCKET: 9804 case PTR_TO_SOCK_COMMON: 9805 convert_ctx_access = bpf_sock_convert_ctx_access; 9806 break; 9807 case PTR_TO_TCP_SOCK: 9808 convert_ctx_access = bpf_tcp_sock_convert_ctx_access; 9809 break; 9810 case PTR_TO_XDP_SOCK: 9811 convert_ctx_access = bpf_xdp_sock_convert_ctx_access; 9812 break; 9813 case PTR_TO_BTF_ID: 9814 if (type == BPF_READ) { 9815 insn->code = BPF_LDX | BPF_PROBE_MEM | 9816 BPF_SIZE((insn)->code); 9817 env->prog->aux->num_exentries++; 9818 } else if (env->prog->type != BPF_PROG_TYPE_STRUCT_OPS) { 9819 verbose(env, "Writes through BTF pointers are not allowed\n"); 9820 return -EINVAL; 9821 } 9822 continue; 9823 default: 9824 continue; 9825 } 9826 9827 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; 9828 size = BPF_LDST_BYTES(insn); 9829 9830 /* If the read access is a narrower load of the field, 9831 * convert to a 4/8-byte load, to minimum program type specific 9832 * convert_ctx_access changes. If conversion is successful, 9833 * we will apply proper mask to the result. 9834 */ 9835 is_narrower_load = size < ctx_field_size; 9836 size_default = bpf_ctx_off_adjust_machine(ctx_field_size); 9837 off = insn->off; 9838 if (is_narrower_load) { 9839 u8 size_code; 9840 9841 if (type == BPF_WRITE) { 9842 verbose(env, "bpf verifier narrow ctx access misconfigured\n"); 9843 return -EINVAL; 9844 } 9845 9846 size_code = BPF_H; 9847 if (ctx_field_size == 4) 9848 size_code = BPF_W; 9849 else if (ctx_field_size == 8) 9850 size_code = BPF_DW; 9851 9852 insn->off = off & ~(size_default - 1); 9853 insn->code = BPF_LDX | BPF_MEM | size_code; 9854 } 9855 9856 target_size = 0; 9857 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, 9858 &target_size); 9859 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || 9860 (ctx_field_size && !target_size)) { 9861 verbose(env, "bpf verifier is misconfigured\n"); 9862 return -EINVAL; 9863 } 9864 9865 if (is_narrower_load && size < target_size) { 9866 u8 shift = bpf_ctx_narrow_access_offset( 9867 off, size, size_default) * 8; 9868 if (ctx_field_size <= 4) { 9869 if (shift) 9870 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, 9871 insn->dst_reg, 9872 shift); 9873 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 9874 (1 << size * 8) - 1); 9875 } else { 9876 if (shift) 9877 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, 9878 insn->dst_reg, 9879 shift); 9880 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, 9881 (1ULL << size * 8) - 1); 9882 } 9883 } 9884 9885 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 9886 if (!new_prog) 9887 return -ENOMEM; 9888 9889 delta += cnt - 1; 9890 9891 /* keep walking new program and skip insns we just inserted */ 9892 env->prog = new_prog; 9893 insn = new_prog->insnsi + i + delta; 9894 } 9895 9896 return 0; 9897 } 9898 9899 static int jit_subprogs(struct bpf_verifier_env *env) 9900 { 9901 struct bpf_prog *prog = env->prog, **func, *tmp; 9902 int i, j, subprog_start, subprog_end = 0, len, subprog; 9903 struct bpf_insn *insn; 9904 void *old_bpf_func; 9905 int err, num_exentries; 9906 9907 if (env->subprog_cnt <= 1) 9908 return 0; 9909 9910 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 9911 if (insn->code != (BPF_JMP | BPF_CALL) || 9912 insn->src_reg != BPF_PSEUDO_CALL) 9913 continue; 9914 /* Upon error here we cannot fall back to interpreter but 9915 * need a hard reject of the program. Thus -EFAULT is 9916 * propagated in any case. 9917 */ 9918 subprog = find_subprog(env, i + insn->imm + 1); 9919 if (subprog < 0) { 9920 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 9921 i + insn->imm + 1); 9922 return -EFAULT; 9923 } 9924 /* temporarily remember subprog id inside insn instead of 9925 * aux_data, since next loop will split up all insns into funcs 9926 */ 9927 insn->off = subprog; 9928 /* remember original imm in case JIT fails and fallback 9929 * to interpreter will be needed 9930 */ 9931 env->insn_aux_data[i].call_imm = insn->imm; 9932 /* point imm to __bpf_call_base+1 from JITs point of view */ 9933 insn->imm = 1; 9934 } 9935 9936 err = bpf_prog_alloc_jited_linfo(prog); 9937 if (err) 9938 goto out_undo_insn; 9939 9940 err = -ENOMEM; 9941 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); 9942 if (!func) 9943 goto out_undo_insn; 9944 9945 for (i = 0; i < env->subprog_cnt; i++) { 9946 subprog_start = subprog_end; 9947 subprog_end = env->subprog_info[i + 1].start; 9948 9949 len = subprog_end - subprog_start; 9950 /* BPF_PROG_RUN doesn't call subprogs directly, 9951 * hence main prog stats include the runtime of subprogs. 9952 * subprogs don't have IDs and not reachable via prog_get_next_id 9953 * func[i]->aux->stats will never be accessed and stays NULL 9954 */ 9955 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER); 9956 if (!func[i]) 9957 goto out_free; 9958 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], 9959 len * sizeof(struct bpf_insn)); 9960 func[i]->type = prog->type; 9961 func[i]->len = len; 9962 if (bpf_prog_calc_tag(func[i])) 9963 goto out_free; 9964 func[i]->is_func = 1; 9965 func[i]->aux->func_idx = i; 9966 /* the btf and func_info will be freed only at prog->aux */ 9967 func[i]->aux->btf = prog->aux->btf; 9968 func[i]->aux->func_info = prog->aux->func_info; 9969 9970 /* Use bpf_prog_F_tag to indicate functions in stack traces. 9971 * Long term would need debug info to populate names 9972 */ 9973 func[i]->aux->name[0] = 'F'; 9974 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; 9975 func[i]->jit_requested = 1; 9976 func[i]->aux->linfo = prog->aux->linfo; 9977 func[i]->aux->nr_linfo = prog->aux->nr_linfo; 9978 func[i]->aux->jited_linfo = prog->aux->jited_linfo; 9979 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; 9980 num_exentries = 0; 9981 insn = func[i]->insnsi; 9982 for (j = 0; j < func[i]->len; j++, insn++) { 9983 if (BPF_CLASS(insn->code) == BPF_LDX && 9984 BPF_MODE(insn->code) == BPF_PROBE_MEM) 9985 num_exentries++; 9986 } 9987 func[i]->aux->num_exentries = num_exentries; 9988 func[i] = bpf_int_jit_compile(func[i]); 9989 if (!func[i]->jited) { 9990 err = -ENOTSUPP; 9991 goto out_free; 9992 } 9993 cond_resched(); 9994 } 9995 /* at this point all bpf functions were successfully JITed 9996 * now populate all bpf_calls with correct addresses and 9997 * run last pass of JIT 9998 */ 9999 for (i = 0; i < env->subprog_cnt; i++) { 10000 insn = func[i]->insnsi; 10001 for (j = 0; j < func[i]->len; j++, insn++) { 10002 if (insn->code != (BPF_JMP | BPF_CALL) || 10003 insn->src_reg != BPF_PSEUDO_CALL) 10004 continue; 10005 subprog = insn->off; 10006 insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) - 10007 __bpf_call_base; 10008 } 10009 10010 /* we use the aux data to keep a list of the start addresses 10011 * of the JITed images for each function in the program 10012 * 10013 * for some architectures, such as powerpc64, the imm field 10014 * might not be large enough to hold the offset of the start 10015 * address of the callee's JITed image from __bpf_call_base 10016 * 10017 * in such cases, we can lookup the start address of a callee 10018 * by using its subprog id, available from the off field of 10019 * the call instruction, as an index for this list 10020 */ 10021 func[i]->aux->func = func; 10022 func[i]->aux->func_cnt = env->subprog_cnt; 10023 } 10024 for (i = 0; i < env->subprog_cnt; i++) { 10025 old_bpf_func = func[i]->bpf_func; 10026 tmp = bpf_int_jit_compile(func[i]); 10027 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { 10028 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); 10029 err = -ENOTSUPP; 10030 goto out_free; 10031 } 10032 cond_resched(); 10033 } 10034 10035 /* finally lock prog and jit images for all functions and 10036 * populate kallsysm 10037 */ 10038 for (i = 0; i < env->subprog_cnt; i++) { 10039 bpf_prog_lock_ro(func[i]); 10040 bpf_prog_kallsyms_add(func[i]); 10041 } 10042 10043 /* Last step: make now unused interpreter insns from main 10044 * prog consistent for later dump requests, so they can 10045 * later look the same as if they were interpreted only. 10046 */ 10047 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 10048 if (insn->code != (BPF_JMP | BPF_CALL) || 10049 insn->src_reg != BPF_PSEUDO_CALL) 10050 continue; 10051 insn->off = env->insn_aux_data[i].call_imm; 10052 subprog = find_subprog(env, i + insn->off + 1); 10053 insn->imm = subprog; 10054 } 10055 10056 prog->jited = 1; 10057 prog->bpf_func = func[0]->bpf_func; 10058 prog->aux->func = func; 10059 prog->aux->func_cnt = env->subprog_cnt; 10060 bpf_prog_free_unused_jited_linfo(prog); 10061 return 0; 10062 out_free: 10063 for (i = 0; i < env->subprog_cnt; i++) 10064 if (func[i]) 10065 bpf_jit_free(func[i]); 10066 kfree(func); 10067 out_undo_insn: 10068 /* cleanup main prog to be interpreted */ 10069 prog->jit_requested = 0; 10070 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 10071 if (insn->code != (BPF_JMP | BPF_CALL) || 10072 insn->src_reg != BPF_PSEUDO_CALL) 10073 continue; 10074 insn->off = 0; 10075 insn->imm = env->insn_aux_data[i].call_imm; 10076 } 10077 bpf_prog_free_jited_linfo(prog); 10078 return err; 10079 } 10080 10081 static int fixup_call_args(struct bpf_verifier_env *env) 10082 { 10083 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 10084 struct bpf_prog *prog = env->prog; 10085 struct bpf_insn *insn = prog->insnsi; 10086 int i, depth; 10087 #endif 10088 int err = 0; 10089 10090 if (env->prog->jit_requested && 10091 !bpf_prog_is_dev_bound(env->prog->aux)) { 10092 err = jit_subprogs(env); 10093 if (err == 0) 10094 return 0; 10095 if (err == -EFAULT) 10096 return err; 10097 } 10098 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 10099 for (i = 0; i < prog->len; i++, insn++) { 10100 if (insn->code != (BPF_JMP | BPF_CALL) || 10101 insn->src_reg != BPF_PSEUDO_CALL) 10102 continue; 10103 depth = get_callee_stack_depth(env, insn, i); 10104 if (depth < 0) 10105 return depth; 10106 bpf_patch_call_args(insn, depth); 10107 } 10108 err = 0; 10109 #endif 10110 return err; 10111 } 10112 10113 /* fixup insn->imm field of bpf_call instructions 10114 * and inline eligible helpers as explicit sequence of BPF instructions 10115 * 10116 * this function is called after eBPF program passed verification 10117 */ 10118 static int fixup_bpf_calls(struct bpf_verifier_env *env) 10119 { 10120 struct bpf_prog *prog = env->prog; 10121 bool expect_blinding = bpf_jit_blinding_enabled(prog); 10122 struct bpf_insn *insn = prog->insnsi; 10123 const struct bpf_func_proto *fn; 10124 const int insn_cnt = prog->len; 10125 const struct bpf_map_ops *ops; 10126 struct bpf_insn_aux_data *aux; 10127 struct bpf_insn insn_buf[16]; 10128 struct bpf_prog *new_prog; 10129 struct bpf_map *map_ptr; 10130 int i, ret, cnt, delta = 0; 10131 10132 for (i = 0; i < insn_cnt; i++, insn++) { 10133 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || 10134 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 10135 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || 10136 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 10137 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 10138 struct bpf_insn mask_and_div[] = { 10139 BPF_MOV32_REG(insn->src_reg, insn->src_reg), 10140 /* Rx div 0 -> 0 */ 10141 BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), 10142 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), 10143 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 10144 *insn, 10145 }; 10146 struct bpf_insn mask_and_mod[] = { 10147 BPF_MOV32_REG(insn->src_reg, insn->src_reg), 10148 /* Rx mod 0 -> Rx */ 10149 BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), 10150 *insn, 10151 }; 10152 struct bpf_insn *patchlet; 10153 10154 if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 10155 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 10156 patchlet = mask_and_div + (is64 ? 1 : 0); 10157 cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); 10158 } else { 10159 patchlet = mask_and_mod + (is64 ? 1 : 0); 10160 cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); 10161 } 10162 10163 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); 10164 if (!new_prog) 10165 return -ENOMEM; 10166 10167 delta += cnt - 1; 10168 env->prog = prog = new_prog; 10169 insn = new_prog->insnsi + i + delta; 10170 continue; 10171 } 10172 10173 if (BPF_CLASS(insn->code) == BPF_LD && 10174 (BPF_MODE(insn->code) == BPF_ABS || 10175 BPF_MODE(insn->code) == BPF_IND)) { 10176 cnt = env->ops->gen_ld_abs(insn, insn_buf); 10177 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 10178 verbose(env, "bpf verifier is misconfigured\n"); 10179 return -EINVAL; 10180 } 10181 10182 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 10183 if (!new_prog) 10184 return -ENOMEM; 10185 10186 delta += cnt - 1; 10187 env->prog = prog = new_prog; 10188 insn = new_prog->insnsi + i + delta; 10189 continue; 10190 } 10191 10192 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || 10193 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { 10194 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; 10195 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; 10196 struct bpf_insn insn_buf[16]; 10197 struct bpf_insn *patch = &insn_buf[0]; 10198 bool issrc, isneg; 10199 u32 off_reg; 10200 10201 aux = &env->insn_aux_data[i + delta]; 10202 if (!aux->alu_state || 10203 aux->alu_state == BPF_ALU_NON_POINTER) 10204 continue; 10205 10206 isneg = aux->alu_state & BPF_ALU_NEG_VALUE; 10207 issrc = (aux->alu_state & BPF_ALU_SANITIZE) == 10208 BPF_ALU_SANITIZE_SRC; 10209 10210 off_reg = issrc ? insn->src_reg : insn->dst_reg; 10211 if (isneg) 10212 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 10213 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1); 10214 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); 10215 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); 10216 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); 10217 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); 10218 if (issrc) { 10219 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, 10220 off_reg); 10221 insn->src_reg = BPF_REG_AX; 10222 } else { 10223 *patch++ = BPF_ALU64_REG(BPF_AND, off_reg, 10224 BPF_REG_AX); 10225 } 10226 if (isneg) 10227 insn->code = insn->code == code_add ? 10228 code_sub : code_add; 10229 *patch++ = *insn; 10230 if (issrc && isneg) 10231 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 10232 cnt = patch - insn_buf; 10233 10234 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 10235 if (!new_prog) 10236 return -ENOMEM; 10237 10238 delta += cnt - 1; 10239 env->prog = prog = new_prog; 10240 insn = new_prog->insnsi + i + delta; 10241 continue; 10242 } 10243 10244 if (insn->code != (BPF_JMP | BPF_CALL)) 10245 continue; 10246 if (insn->src_reg == BPF_PSEUDO_CALL) 10247 continue; 10248 10249 if (insn->imm == BPF_FUNC_get_route_realm) 10250 prog->dst_needed = 1; 10251 if (insn->imm == BPF_FUNC_get_prandom_u32) 10252 bpf_user_rnd_init_once(); 10253 if (insn->imm == BPF_FUNC_override_return) 10254 prog->kprobe_override = 1; 10255 if (insn->imm == BPF_FUNC_tail_call) { 10256 /* If we tail call into other programs, we 10257 * cannot make any assumptions since they can 10258 * be replaced dynamically during runtime in 10259 * the program array. 10260 */ 10261 prog->cb_access = 1; 10262 env->prog->aux->stack_depth = MAX_BPF_STACK; 10263 env->prog->aux->max_pkt_offset = MAX_PACKET_OFF; 10264 10265 /* mark bpf_tail_call as different opcode to avoid 10266 * conditional branch in the interpeter for every normal 10267 * call and to prevent accidental JITing by JIT compiler 10268 * that doesn't support bpf_tail_call yet 10269 */ 10270 insn->imm = 0; 10271 insn->code = BPF_JMP | BPF_TAIL_CALL; 10272 10273 aux = &env->insn_aux_data[i + delta]; 10274 if (env->bpf_capable && !expect_blinding && 10275 prog->jit_requested && 10276 !bpf_map_key_poisoned(aux) && 10277 !bpf_map_ptr_poisoned(aux) && 10278 !bpf_map_ptr_unpriv(aux)) { 10279 struct bpf_jit_poke_descriptor desc = { 10280 .reason = BPF_POKE_REASON_TAIL_CALL, 10281 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state), 10282 .tail_call.key = bpf_map_key_immediate(aux), 10283 }; 10284 10285 ret = bpf_jit_add_poke_descriptor(prog, &desc); 10286 if (ret < 0) { 10287 verbose(env, "adding tail call poke descriptor failed\n"); 10288 return ret; 10289 } 10290 10291 insn->imm = ret + 1; 10292 continue; 10293 } 10294 10295 if (!bpf_map_ptr_unpriv(aux)) 10296 continue; 10297 10298 /* instead of changing every JIT dealing with tail_call 10299 * emit two extra insns: 10300 * if (index >= max_entries) goto out; 10301 * index &= array->index_mask; 10302 * to avoid out-of-bounds cpu speculation 10303 */ 10304 if (bpf_map_ptr_poisoned(aux)) { 10305 verbose(env, "tail_call abusing map_ptr\n"); 10306 return -EINVAL; 10307 } 10308 10309 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 10310 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, 10311 map_ptr->max_entries, 2); 10312 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, 10313 container_of(map_ptr, 10314 struct bpf_array, 10315 map)->index_mask); 10316 insn_buf[2] = *insn; 10317 cnt = 3; 10318 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 10319 if (!new_prog) 10320 return -ENOMEM; 10321 10322 delta += cnt - 1; 10323 env->prog = prog = new_prog; 10324 insn = new_prog->insnsi + i + delta; 10325 continue; 10326 } 10327 10328 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup 10329 * and other inlining handlers are currently limited to 64 bit 10330 * only. 10331 */ 10332 if (prog->jit_requested && BITS_PER_LONG == 64 && 10333 (insn->imm == BPF_FUNC_map_lookup_elem || 10334 insn->imm == BPF_FUNC_map_update_elem || 10335 insn->imm == BPF_FUNC_map_delete_elem || 10336 insn->imm == BPF_FUNC_map_push_elem || 10337 insn->imm == BPF_FUNC_map_pop_elem || 10338 insn->imm == BPF_FUNC_map_peek_elem)) { 10339 aux = &env->insn_aux_data[i + delta]; 10340 if (bpf_map_ptr_poisoned(aux)) 10341 goto patch_call_imm; 10342 10343 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 10344 ops = map_ptr->ops; 10345 if (insn->imm == BPF_FUNC_map_lookup_elem && 10346 ops->map_gen_lookup) { 10347 cnt = ops->map_gen_lookup(map_ptr, insn_buf); 10348 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 10349 verbose(env, "bpf verifier is misconfigured\n"); 10350 return -EINVAL; 10351 } 10352 10353 new_prog = bpf_patch_insn_data(env, i + delta, 10354 insn_buf, cnt); 10355 if (!new_prog) 10356 return -ENOMEM; 10357 10358 delta += cnt - 1; 10359 env->prog = prog = new_prog; 10360 insn = new_prog->insnsi + i + delta; 10361 continue; 10362 } 10363 10364 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, 10365 (void *(*)(struct bpf_map *map, void *key))NULL)); 10366 BUILD_BUG_ON(!__same_type(ops->map_delete_elem, 10367 (int (*)(struct bpf_map *map, void *key))NULL)); 10368 BUILD_BUG_ON(!__same_type(ops->map_update_elem, 10369 (int (*)(struct bpf_map *map, void *key, void *value, 10370 u64 flags))NULL)); 10371 BUILD_BUG_ON(!__same_type(ops->map_push_elem, 10372 (int (*)(struct bpf_map *map, void *value, 10373 u64 flags))NULL)); 10374 BUILD_BUG_ON(!__same_type(ops->map_pop_elem, 10375 (int (*)(struct bpf_map *map, void *value))NULL)); 10376 BUILD_BUG_ON(!__same_type(ops->map_peek_elem, 10377 (int (*)(struct bpf_map *map, void *value))NULL)); 10378 10379 switch (insn->imm) { 10380 case BPF_FUNC_map_lookup_elem: 10381 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - 10382 __bpf_call_base; 10383 continue; 10384 case BPF_FUNC_map_update_elem: 10385 insn->imm = BPF_CAST_CALL(ops->map_update_elem) - 10386 __bpf_call_base; 10387 continue; 10388 case BPF_FUNC_map_delete_elem: 10389 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - 10390 __bpf_call_base; 10391 continue; 10392 case BPF_FUNC_map_push_elem: 10393 insn->imm = BPF_CAST_CALL(ops->map_push_elem) - 10394 __bpf_call_base; 10395 continue; 10396 case BPF_FUNC_map_pop_elem: 10397 insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - 10398 __bpf_call_base; 10399 continue; 10400 case BPF_FUNC_map_peek_elem: 10401 insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - 10402 __bpf_call_base; 10403 continue; 10404 } 10405 10406 goto patch_call_imm; 10407 } 10408 10409 if (prog->jit_requested && BITS_PER_LONG == 64 && 10410 insn->imm == BPF_FUNC_jiffies64) { 10411 struct bpf_insn ld_jiffies_addr[2] = { 10412 BPF_LD_IMM64(BPF_REG_0, 10413 (unsigned long)&jiffies), 10414 }; 10415 10416 insn_buf[0] = ld_jiffies_addr[0]; 10417 insn_buf[1] = ld_jiffies_addr[1]; 10418 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, 10419 BPF_REG_0, 0); 10420 cnt = 3; 10421 10422 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 10423 cnt); 10424 if (!new_prog) 10425 return -ENOMEM; 10426 10427 delta += cnt - 1; 10428 env->prog = prog = new_prog; 10429 insn = new_prog->insnsi + i + delta; 10430 continue; 10431 } 10432 10433 patch_call_imm: 10434 fn = env->ops->get_func_proto(insn->imm, env->prog); 10435 /* all functions that have prototype and verifier allowed 10436 * programs to call them, must be real in-kernel functions 10437 */ 10438 if (!fn->func) { 10439 verbose(env, 10440 "kernel subsystem misconfigured func %s#%d\n", 10441 func_id_name(insn->imm), insn->imm); 10442 return -EFAULT; 10443 } 10444 insn->imm = fn->func - __bpf_call_base; 10445 } 10446 10447 /* Since poke tab is now finalized, publish aux to tracker. */ 10448 for (i = 0; i < prog->aux->size_poke_tab; i++) { 10449 map_ptr = prog->aux->poke_tab[i].tail_call.map; 10450 if (!map_ptr->ops->map_poke_track || 10451 !map_ptr->ops->map_poke_untrack || 10452 !map_ptr->ops->map_poke_run) { 10453 verbose(env, "bpf verifier is misconfigured\n"); 10454 return -EINVAL; 10455 } 10456 10457 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); 10458 if (ret < 0) { 10459 verbose(env, "tracking tail call prog failed\n"); 10460 return ret; 10461 } 10462 } 10463 10464 return 0; 10465 } 10466 10467 static void free_states(struct bpf_verifier_env *env) 10468 { 10469 struct bpf_verifier_state_list *sl, *sln; 10470 int i; 10471 10472 sl = env->free_list; 10473 while (sl) { 10474 sln = sl->next; 10475 free_verifier_state(&sl->state, false); 10476 kfree(sl); 10477 sl = sln; 10478 } 10479 env->free_list = NULL; 10480 10481 if (!env->explored_states) 10482 return; 10483 10484 for (i = 0; i < state_htab_size(env); i++) { 10485 sl = env->explored_states[i]; 10486 10487 while (sl) { 10488 sln = sl->next; 10489 free_verifier_state(&sl->state, false); 10490 kfree(sl); 10491 sl = sln; 10492 } 10493 env->explored_states[i] = NULL; 10494 } 10495 } 10496 10497 /* The verifier is using insn_aux_data[] to store temporary data during 10498 * verification and to store information for passes that run after the 10499 * verification like dead code sanitization. do_check_common() for subprogram N 10500 * may analyze many other subprograms. sanitize_insn_aux_data() clears all 10501 * temporary data after do_check_common() finds that subprogram N cannot be 10502 * verified independently. pass_cnt counts the number of times 10503 * do_check_common() was run and insn->aux->seen tells the pass number 10504 * insn_aux_data was touched. These variables are compared to clear temporary 10505 * data from failed pass. For testing and experiments do_check_common() can be 10506 * run multiple times even when prior attempt to verify is unsuccessful. 10507 */ 10508 static void sanitize_insn_aux_data(struct bpf_verifier_env *env) 10509 { 10510 struct bpf_insn *insn = env->prog->insnsi; 10511 struct bpf_insn_aux_data *aux; 10512 int i, class; 10513 10514 for (i = 0; i < env->prog->len; i++) { 10515 class = BPF_CLASS(insn[i].code); 10516 if (class != BPF_LDX && class != BPF_STX) 10517 continue; 10518 aux = &env->insn_aux_data[i]; 10519 if (aux->seen != env->pass_cnt) 10520 continue; 10521 memset(aux, 0, offsetof(typeof(*aux), orig_idx)); 10522 } 10523 } 10524 10525 static int do_check_common(struct bpf_verifier_env *env, int subprog) 10526 { 10527 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); 10528 struct bpf_verifier_state *state; 10529 struct bpf_reg_state *regs; 10530 int ret, i; 10531 10532 env->prev_linfo = NULL; 10533 env->pass_cnt++; 10534 10535 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); 10536 if (!state) 10537 return -ENOMEM; 10538 state->curframe = 0; 10539 state->speculative = false; 10540 state->branches = 1; 10541 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); 10542 if (!state->frame[0]) { 10543 kfree(state); 10544 return -ENOMEM; 10545 } 10546 env->cur_state = state; 10547 init_func_state(env, state->frame[0], 10548 BPF_MAIN_FUNC /* callsite */, 10549 0 /* frameno */, 10550 subprog); 10551 10552 regs = state->frame[state->curframe]->regs; 10553 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { 10554 ret = btf_prepare_func_args(env, subprog, regs); 10555 if (ret) 10556 goto out; 10557 for (i = BPF_REG_1; i <= BPF_REG_5; i++) { 10558 if (regs[i].type == PTR_TO_CTX) 10559 mark_reg_known_zero(env, regs, i); 10560 else if (regs[i].type == SCALAR_VALUE) 10561 mark_reg_unknown(env, regs, i); 10562 } 10563 } else { 10564 /* 1st arg to a function */ 10565 regs[BPF_REG_1].type = PTR_TO_CTX; 10566 mark_reg_known_zero(env, regs, BPF_REG_1); 10567 ret = btf_check_func_arg_match(env, subprog, regs); 10568 if (ret == -EFAULT) 10569 /* unlikely verifier bug. abort. 10570 * ret == 0 and ret < 0 are sadly acceptable for 10571 * main() function due to backward compatibility. 10572 * Like socket filter program may be written as: 10573 * int bpf_prog(struct pt_regs *ctx) 10574 * and never dereference that ctx in the program. 10575 * 'struct pt_regs' is a type mismatch for socket 10576 * filter that should be using 'struct __sk_buff'. 10577 */ 10578 goto out; 10579 } 10580 10581 ret = do_check(env); 10582 out: 10583 /* check for NULL is necessary, since cur_state can be freed inside 10584 * do_check() under memory pressure. 10585 */ 10586 if (env->cur_state) { 10587 free_verifier_state(env->cur_state, true); 10588 env->cur_state = NULL; 10589 } 10590 while (!pop_stack(env, NULL, NULL, false)); 10591 if (!ret && pop_log) 10592 bpf_vlog_reset(&env->log, 0); 10593 free_states(env); 10594 if (ret) 10595 /* clean aux data in case subprog was rejected */ 10596 sanitize_insn_aux_data(env); 10597 return ret; 10598 } 10599 10600 /* Verify all global functions in a BPF program one by one based on their BTF. 10601 * All global functions must pass verification. Otherwise the whole program is rejected. 10602 * Consider: 10603 * int bar(int); 10604 * int foo(int f) 10605 * { 10606 * return bar(f); 10607 * } 10608 * int bar(int b) 10609 * { 10610 * ... 10611 * } 10612 * foo() will be verified first for R1=any_scalar_value. During verification it 10613 * will be assumed that bar() already verified successfully and call to bar() 10614 * from foo() will be checked for type match only. Later bar() will be verified 10615 * independently to check that it's safe for R1=any_scalar_value. 10616 */ 10617 static int do_check_subprogs(struct bpf_verifier_env *env) 10618 { 10619 struct bpf_prog_aux *aux = env->prog->aux; 10620 int i, ret; 10621 10622 if (!aux->func_info) 10623 return 0; 10624 10625 for (i = 1; i < env->subprog_cnt; i++) { 10626 if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL) 10627 continue; 10628 env->insn_idx = env->subprog_info[i].start; 10629 WARN_ON_ONCE(env->insn_idx == 0); 10630 ret = do_check_common(env, i); 10631 if (ret) { 10632 return ret; 10633 } else if (env->log.level & BPF_LOG_LEVEL) { 10634 verbose(env, 10635 "Func#%d is safe for any args that match its prototype\n", 10636 i); 10637 } 10638 } 10639 return 0; 10640 } 10641 10642 static int do_check_main(struct bpf_verifier_env *env) 10643 { 10644 int ret; 10645 10646 env->insn_idx = 0; 10647 ret = do_check_common(env, 0); 10648 if (!ret) 10649 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; 10650 return ret; 10651 } 10652 10653 10654 static void print_verification_stats(struct bpf_verifier_env *env) 10655 { 10656 int i; 10657 10658 if (env->log.level & BPF_LOG_STATS) { 10659 verbose(env, "verification time %lld usec\n", 10660 div_u64(env->verification_time, 1000)); 10661 verbose(env, "stack depth "); 10662 for (i = 0; i < env->subprog_cnt; i++) { 10663 u32 depth = env->subprog_info[i].stack_depth; 10664 10665 verbose(env, "%d", depth); 10666 if (i + 1 < env->subprog_cnt) 10667 verbose(env, "+"); 10668 } 10669 verbose(env, "\n"); 10670 } 10671 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d " 10672 "total_states %d peak_states %d mark_read %d\n", 10673 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, 10674 env->max_states_per_insn, env->total_states, 10675 env->peak_states, env->longest_mark_read_walk); 10676 } 10677 10678 static int check_struct_ops_btf_id(struct bpf_verifier_env *env) 10679 { 10680 const struct btf_type *t, *func_proto; 10681 const struct bpf_struct_ops *st_ops; 10682 const struct btf_member *member; 10683 struct bpf_prog *prog = env->prog; 10684 u32 btf_id, member_idx; 10685 const char *mname; 10686 10687 btf_id = prog->aux->attach_btf_id; 10688 st_ops = bpf_struct_ops_find(btf_id); 10689 if (!st_ops) { 10690 verbose(env, "attach_btf_id %u is not a supported struct\n", 10691 btf_id); 10692 return -ENOTSUPP; 10693 } 10694 10695 t = st_ops->type; 10696 member_idx = prog->expected_attach_type; 10697 if (member_idx >= btf_type_vlen(t)) { 10698 verbose(env, "attach to invalid member idx %u of struct %s\n", 10699 member_idx, st_ops->name); 10700 return -EINVAL; 10701 } 10702 10703 member = &btf_type_member(t)[member_idx]; 10704 mname = btf_name_by_offset(btf_vmlinux, member->name_off); 10705 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type, 10706 NULL); 10707 if (!func_proto) { 10708 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n", 10709 mname, member_idx, st_ops->name); 10710 return -EINVAL; 10711 } 10712 10713 if (st_ops->check_member) { 10714 int err = st_ops->check_member(t, member); 10715 10716 if (err) { 10717 verbose(env, "attach to unsupported member %s of struct %s\n", 10718 mname, st_ops->name); 10719 return err; 10720 } 10721 } 10722 10723 prog->aux->attach_func_proto = func_proto; 10724 prog->aux->attach_func_name = mname; 10725 env->ops = st_ops->verifier_ops; 10726 10727 return 0; 10728 } 10729 #define SECURITY_PREFIX "security_" 10730 10731 static int check_attach_modify_return(struct bpf_prog *prog, unsigned long addr) 10732 { 10733 if (within_error_injection_list(addr) || 10734 !strncmp(SECURITY_PREFIX, prog->aux->attach_func_name, 10735 sizeof(SECURITY_PREFIX) - 1)) 10736 return 0; 10737 10738 return -EINVAL; 10739 } 10740 10741 static int check_attach_btf_id(struct bpf_verifier_env *env) 10742 { 10743 struct bpf_prog *prog = env->prog; 10744 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; 10745 struct bpf_prog *tgt_prog = prog->aux->linked_prog; 10746 u32 btf_id = prog->aux->attach_btf_id; 10747 const char prefix[] = "btf_trace_"; 10748 struct btf_func_model fmodel; 10749 int ret = 0, subprog = -1, i; 10750 struct bpf_trampoline *tr; 10751 const struct btf_type *t; 10752 bool conservative = true; 10753 const char *tname; 10754 struct btf *btf; 10755 long addr; 10756 u64 key; 10757 10758 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) 10759 return check_struct_ops_btf_id(env); 10760 10761 if (prog->type != BPF_PROG_TYPE_TRACING && 10762 prog->type != BPF_PROG_TYPE_LSM && 10763 !prog_extension) 10764 return 0; 10765 10766 if (!btf_id) { 10767 verbose(env, "Tracing programs must provide btf_id\n"); 10768 return -EINVAL; 10769 } 10770 btf = bpf_prog_get_target_btf(prog); 10771 if (!btf) { 10772 verbose(env, 10773 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n"); 10774 return -EINVAL; 10775 } 10776 t = btf_type_by_id(btf, btf_id); 10777 if (!t) { 10778 verbose(env, "attach_btf_id %u is invalid\n", btf_id); 10779 return -EINVAL; 10780 } 10781 tname = btf_name_by_offset(btf, t->name_off); 10782 if (!tname) { 10783 verbose(env, "attach_btf_id %u doesn't have a name\n", btf_id); 10784 return -EINVAL; 10785 } 10786 if (tgt_prog) { 10787 struct bpf_prog_aux *aux = tgt_prog->aux; 10788 10789 for (i = 0; i < aux->func_info_cnt; i++) 10790 if (aux->func_info[i].type_id == btf_id) { 10791 subprog = i; 10792 break; 10793 } 10794 if (subprog == -1) { 10795 verbose(env, "Subprog %s doesn't exist\n", tname); 10796 return -EINVAL; 10797 } 10798 conservative = aux->func_info_aux[subprog].unreliable; 10799 if (prog_extension) { 10800 if (conservative) { 10801 verbose(env, 10802 "Cannot replace static functions\n"); 10803 return -EINVAL; 10804 } 10805 if (!prog->jit_requested) { 10806 verbose(env, 10807 "Extension programs should be JITed\n"); 10808 return -EINVAL; 10809 } 10810 env->ops = bpf_verifier_ops[tgt_prog->type]; 10811 prog->expected_attach_type = tgt_prog->expected_attach_type; 10812 } 10813 if (!tgt_prog->jited) { 10814 verbose(env, "Can attach to only JITed progs\n"); 10815 return -EINVAL; 10816 } 10817 if (tgt_prog->type == prog->type) { 10818 /* Cannot fentry/fexit another fentry/fexit program. 10819 * Cannot attach program extension to another extension. 10820 * It's ok to attach fentry/fexit to extension program. 10821 */ 10822 verbose(env, "Cannot recursively attach\n"); 10823 return -EINVAL; 10824 } 10825 if (tgt_prog->type == BPF_PROG_TYPE_TRACING && 10826 prog_extension && 10827 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY || 10828 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) { 10829 /* Program extensions can extend all program types 10830 * except fentry/fexit. The reason is the following. 10831 * The fentry/fexit programs are used for performance 10832 * analysis, stats and can be attached to any program 10833 * type except themselves. When extension program is 10834 * replacing XDP function it is necessary to allow 10835 * performance analysis of all functions. Both original 10836 * XDP program and its program extension. Hence 10837 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is 10838 * allowed. If extending of fentry/fexit was allowed it 10839 * would be possible to create long call chain 10840 * fentry->extension->fentry->extension beyond 10841 * reasonable stack size. Hence extending fentry is not 10842 * allowed. 10843 */ 10844 verbose(env, "Cannot extend fentry/fexit\n"); 10845 return -EINVAL; 10846 } 10847 key = ((u64)aux->id) << 32 | btf_id; 10848 } else { 10849 if (prog_extension) { 10850 verbose(env, "Cannot replace kernel functions\n"); 10851 return -EINVAL; 10852 } 10853 key = btf_id; 10854 } 10855 10856 switch (prog->expected_attach_type) { 10857 case BPF_TRACE_RAW_TP: 10858 if (tgt_prog) { 10859 verbose(env, 10860 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n"); 10861 return -EINVAL; 10862 } 10863 if (!btf_type_is_typedef(t)) { 10864 verbose(env, "attach_btf_id %u is not a typedef\n", 10865 btf_id); 10866 return -EINVAL; 10867 } 10868 if (strncmp(prefix, tname, sizeof(prefix) - 1)) { 10869 verbose(env, "attach_btf_id %u points to wrong type name %s\n", 10870 btf_id, tname); 10871 return -EINVAL; 10872 } 10873 tname += sizeof(prefix) - 1; 10874 t = btf_type_by_id(btf, t->type); 10875 if (!btf_type_is_ptr(t)) 10876 /* should never happen in valid vmlinux build */ 10877 return -EINVAL; 10878 t = btf_type_by_id(btf, t->type); 10879 if (!btf_type_is_func_proto(t)) 10880 /* should never happen in valid vmlinux build */ 10881 return -EINVAL; 10882 10883 /* remember two read only pointers that are valid for 10884 * the life time of the kernel 10885 */ 10886 prog->aux->attach_func_name = tname; 10887 prog->aux->attach_func_proto = t; 10888 prog->aux->attach_btf_trace = true; 10889 return 0; 10890 case BPF_TRACE_ITER: 10891 if (!btf_type_is_func(t)) { 10892 verbose(env, "attach_btf_id %u is not a function\n", 10893 btf_id); 10894 return -EINVAL; 10895 } 10896 t = btf_type_by_id(btf, t->type); 10897 if (!btf_type_is_func_proto(t)) 10898 return -EINVAL; 10899 prog->aux->attach_func_name = tname; 10900 prog->aux->attach_func_proto = t; 10901 if (!bpf_iter_prog_supported(prog)) 10902 return -EINVAL; 10903 ret = btf_distill_func_proto(&env->log, btf, t, 10904 tname, &fmodel); 10905 return ret; 10906 default: 10907 if (!prog_extension) 10908 return -EINVAL; 10909 /* fallthrough */ 10910 case BPF_MODIFY_RETURN: 10911 case BPF_LSM_MAC: 10912 case BPF_TRACE_FENTRY: 10913 case BPF_TRACE_FEXIT: 10914 prog->aux->attach_func_name = tname; 10915 if (prog->type == BPF_PROG_TYPE_LSM) { 10916 ret = bpf_lsm_verify_prog(&env->log, prog); 10917 if (ret < 0) 10918 return ret; 10919 } 10920 10921 if (!btf_type_is_func(t)) { 10922 verbose(env, "attach_btf_id %u is not a function\n", 10923 btf_id); 10924 return -EINVAL; 10925 } 10926 if (prog_extension && 10927 btf_check_type_match(env, prog, btf, t)) 10928 return -EINVAL; 10929 t = btf_type_by_id(btf, t->type); 10930 if (!btf_type_is_func_proto(t)) 10931 return -EINVAL; 10932 tr = bpf_trampoline_lookup(key); 10933 if (!tr) 10934 return -ENOMEM; 10935 /* t is either vmlinux type or another program's type */ 10936 prog->aux->attach_func_proto = t; 10937 mutex_lock(&tr->mutex); 10938 if (tr->func.addr) { 10939 prog->aux->trampoline = tr; 10940 goto out; 10941 } 10942 if (tgt_prog && conservative) { 10943 prog->aux->attach_func_proto = NULL; 10944 t = NULL; 10945 } 10946 ret = btf_distill_func_proto(&env->log, btf, t, 10947 tname, &tr->func.model); 10948 if (ret < 0) 10949 goto out; 10950 if (tgt_prog) { 10951 if (subprog == 0) 10952 addr = (long) tgt_prog->bpf_func; 10953 else 10954 addr = (long) tgt_prog->aux->func[subprog]->bpf_func; 10955 } else { 10956 addr = kallsyms_lookup_name(tname); 10957 if (!addr) { 10958 verbose(env, 10959 "The address of function %s cannot be found\n", 10960 tname); 10961 ret = -ENOENT; 10962 goto out; 10963 } 10964 } 10965 10966 if (prog->expected_attach_type == BPF_MODIFY_RETURN) { 10967 ret = check_attach_modify_return(prog, addr); 10968 if (ret) 10969 verbose(env, "%s() is not modifiable\n", 10970 prog->aux->attach_func_name); 10971 } 10972 10973 if (ret) 10974 goto out; 10975 tr->func.addr = (void *)addr; 10976 prog->aux->trampoline = tr; 10977 out: 10978 mutex_unlock(&tr->mutex); 10979 if (ret) 10980 bpf_trampoline_put(tr); 10981 return ret; 10982 } 10983 } 10984 10985 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, 10986 union bpf_attr __user *uattr) 10987 { 10988 u64 start_time = ktime_get_ns(); 10989 struct bpf_verifier_env *env; 10990 struct bpf_verifier_log *log; 10991 int i, len, ret = -EINVAL; 10992 bool is_priv; 10993 10994 /* no program is valid */ 10995 if (ARRAY_SIZE(bpf_verifier_ops) == 0) 10996 return -EINVAL; 10997 10998 /* 'struct bpf_verifier_env' can be global, but since it's not small, 10999 * allocate/free it every time bpf_check() is called 11000 */ 11001 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 11002 if (!env) 11003 return -ENOMEM; 11004 log = &env->log; 11005 11006 len = (*prog)->len; 11007 env->insn_aux_data = 11008 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len)); 11009 ret = -ENOMEM; 11010 if (!env->insn_aux_data) 11011 goto err_free_env; 11012 for (i = 0; i < len; i++) 11013 env->insn_aux_data[i].orig_idx = i; 11014 env->prog = *prog; 11015 env->ops = bpf_verifier_ops[env->prog->type]; 11016 is_priv = bpf_capable(); 11017 11018 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { 11019 mutex_lock(&bpf_verifier_lock); 11020 if (!btf_vmlinux) 11021 btf_vmlinux = btf_parse_vmlinux(); 11022 mutex_unlock(&bpf_verifier_lock); 11023 } 11024 11025 /* grab the mutex to protect few globals used by verifier */ 11026 if (!is_priv) 11027 mutex_lock(&bpf_verifier_lock); 11028 11029 if (attr->log_level || attr->log_buf || attr->log_size) { 11030 /* user requested verbose verifier output 11031 * and supplied buffer to store the verification trace 11032 */ 11033 log->level = attr->log_level; 11034 log->ubuf = (char __user *) (unsigned long) attr->log_buf; 11035 log->len_total = attr->log_size; 11036 11037 ret = -EINVAL; 11038 /* log attributes have to be sane */ 11039 if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 || 11040 !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK) 11041 goto err_unlock; 11042 } 11043 11044 if (IS_ERR(btf_vmlinux)) { 11045 /* Either gcc or pahole or kernel are broken. */ 11046 verbose(env, "in-kernel BTF is malformed\n"); 11047 ret = PTR_ERR(btf_vmlinux); 11048 goto skip_full_check; 11049 } 11050 11051 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); 11052 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 11053 env->strict_alignment = true; 11054 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) 11055 env->strict_alignment = false; 11056 11057 env->allow_ptr_leaks = bpf_allow_ptr_leaks(); 11058 env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access(); 11059 env->bypass_spec_v1 = bpf_bypass_spec_v1(); 11060 env->bypass_spec_v4 = bpf_bypass_spec_v4(); 11061 env->bpf_capable = bpf_capable(); 11062 11063 if (is_priv) 11064 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; 11065 11066 ret = replace_map_fd_with_map_ptr(env); 11067 if (ret < 0) 11068 goto skip_full_check; 11069 11070 if (bpf_prog_is_dev_bound(env->prog->aux)) { 11071 ret = bpf_prog_offload_verifier_prep(env->prog); 11072 if (ret) 11073 goto skip_full_check; 11074 } 11075 11076 env->explored_states = kvcalloc(state_htab_size(env), 11077 sizeof(struct bpf_verifier_state_list *), 11078 GFP_USER); 11079 ret = -ENOMEM; 11080 if (!env->explored_states) 11081 goto skip_full_check; 11082 11083 ret = check_subprogs(env); 11084 if (ret < 0) 11085 goto skip_full_check; 11086 11087 ret = check_btf_info(env, attr, uattr); 11088 if (ret < 0) 11089 goto skip_full_check; 11090 11091 ret = check_attach_btf_id(env); 11092 if (ret) 11093 goto skip_full_check; 11094 11095 ret = check_cfg(env); 11096 if (ret < 0) 11097 goto skip_full_check; 11098 11099 ret = do_check_subprogs(env); 11100 ret = ret ?: do_check_main(env); 11101 11102 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux)) 11103 ret = bpf_prog_offload_finalize(env); 11104 11105 skip_full_check: 11106 kvfree(env->explored_states); 11107 11108 if (ret == 0) 11109 ret = check_max_stack_depth(env); 11110 11111 /* instruction rewrites happen after this point */ 11112 if (is_priv) { 11113 if (ret == 0) 11114 opt_hard_wire_dead_code_branches(env); 11115 if (ret == 0) 11116 ret = opt_remove_dead_code(env); 11117 if (ret == 0) 11118 ret = opt_remove_nops(env); 11119 } else { 11120 if (ret == 0) 11121 sanitize_dead_code(env); 11122 } 11123 11124 if (ret == 0) 11125 /* program is valid, convert *(u32*)(ctx + off) accesses */ 11126 ret = convert_ctx_accesses(env); 11127 11128 if (ret == 0) 11129 ret = fixup_bpf_calls(env); 11130 11131 /* do 32-bit optimization after insn patching has done so those patched 11132 * insns could be handled correctly. 11133 */ 11134 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) { 11135 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr); 11136 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret 11137 : false; 11138 } 11139 11140 if (ret == 0) 11141 ret = fixup_call_args(env); 11142 11143 env->verification_time = ktime_get_ns() - start_time; 11144 print_verification_stats(env); 11145 11146 if (log->level && bpf_verifier_log_full(log)) 11147 ret = -ENOSPC; 11148 if (log->level && !log->ubuf) { 11149 ret = -EFAULT; 11150 goto err_release_maps; 11151 } 11152 11153 if (ret == 0 && env->used_map_cnt) { 11154 /* if program passed verifier, update used_maps in bpf_prog_info */ 11155 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 11156 sizeof(env->used_maps[0]), 11157 GFP_KERNEL); 11158 11159 if (!env->prog->aux->used_maps) { 11160 ret = -ENOMEM; 11161 goto err_release_maps; 11162 } 11163 11164 memcpy(env->prog->aux->used_maps, env->used_maps, 11165 sizeof(env->used_maps[0]) * env->used_map_cnt); 11166 env->prog->aux->used_map_cnt = env->used_map_cnt; 11167 11168 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 11169 * bpf_ld_imm64 instructions 11170 */ 11171 convert_pseudo_ld_imm64(env); 11172 } 11173 11174 if (ret == 0) 11175 adjust_btf_func(env); 11176 11177 err_release_maps: 11178 if (!env->prog->aux->used_maps) 11179 /* if we didn't copy map pointers into bpf_prog_info, release 11180 * them now. Otherwise free_used_maps() will release them. 11181 */ 11182 release_maps(env); 11183 11184 /* extension progs temporarily inherit the attach_type of their targets 11185 for verification purposes, so set it back to zero before returning 11186 */ 11187 if (env->prog->type == BPF_PROG_TYPE_EXT) 11188 env->prog->expected_attach_type = 0; 11189 11190 *prog = env->prog; 11191 err_unlock: 11192 if (!is_priv) 11193 mutex_unlock(&bpf_verifier_lock); 11194 vfree(env->insn_aux_data); 11195 err_free_env: 11196 kfree(env); 11197 return ret; 11198 } 11199