1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io 5 */ 6 #include <uapi/linux/btf.h> 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/slab.h> 10 #include <linux/bpf.h> 11 #include <linux/btf.h> 12 #include <linux/bpf_verifier.h> 13 #include <linux/filter.h> 14 #include <net/netlink.h> 15 #include <linux/file.h> 16 #include <linux/vmalloc.h> 17 #include <linux/stringify.h> 18 #include <linux/bsearch.h> 19 #include <linux/sort.h> 20 #include <linux/perf_event.h> 21 #include <linux/ctype.h> 22 23 #include "disasm.h" 24 25 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { 26 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 27 [_id] = & _name ## _verifier_ops, 28 #define BPF_MAP_TYPE(_id, _ops) 29 #include <linux/bpf_types.h> 30 #undef BPF_PROG_TYPE 31 #undef BPF_MAP_TYPE 32 }; 33 34 /* bpf_check() is a static code analyzer that walks eBPF program 35 * instruction by instruction and updates register/stack state. 36 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 37 * 38 * The first pass is depth-first-search to check that the program is a DAG. 39 * It rejects the following programs: 40 * - larger than BPF_MAXINSNS insns 41 * - if loop is present (detected via back-edge) 42 * - unreachable insns exist (shouldn't be a forest. program = one function) 43 * - out of bounds or malformed jumps 44 * The second pass is all possible path descent from the 1st insn. 45 * Since it's analyzing all pathes through the program, the length of the 46 * analysis is limited to 64k insn, which may be hit even if total number of 47 * insn is less then 4K, but there are too many branches that change stack/regs. 48 * Number of 'branches to be analyzed' is limited to 1k 49 * 50 * On entry to each instruction, each register has a type, and the instruction 51 * changes the types of the registers depending on instruction semantics. 52 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 53 * copied to R1. 54 * 55 * All registers are 64-bit. 56 * R0 - return register 57 * R1-R5 argument passing registers 58 * R6-R9 callee saved registers 59 * R10 - frame pointer read-only 60 * 61 * At the start of BPF program the register R1 contains a pointer to bpf_context 62 * and has type PTR_TO_CTX. 63 * 64 * Verifier tracks arithmetic operations on pointers in case: 65 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 66 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 67 * 1st insn copies R10 (which has FRAME_PTR) type into R1 68 * and 2nd arithmetic instruction is pattern matched to recognize 69 * that it wants to construct a pointer to some element within stack. 70 * So after 2nd insn, the register R1 has type PTR_TO_STACK 71 * (and -20 constant is saved for further stack bounds checking). 72 * Meaning that this reg is a pointer to stack plus known immediate constant. 73 * 74 * Most of the time the registers have SCALAR_VALUE type, which 75 * means the register has some value, but it's not a valid pointer. 76 * (like pointer plus pointer becomes SCALAR_VALUE type) 77 * 78 * When verifier sees load or store instructions the type of base register 79 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are 80 * four pointer types recognized by check_mem_access() function. 81 * 82 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 83 * and the range of [ptr, ptr + map's value_size) is accessible. 84 * 85 * registers used to pass values to function calls are checked against 86 * function argument constraints. 87 * 88 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 89 * It means that the register type passed to this function must be 90 * PTR_TO_STACK and it will be used inside the function as 91 * 'pointer to map element key' 92 * 93 * For example the argument constraints for bpf_map_lookup_elem(): 94 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 95 * .arg1_type = ARG_CONST_MAP_PTR, 96 * .arg2_type = ARG_PTR_TO_MAP_KEY, 97 * 98 * ret_type says that this function returns 'pointer to map elem value or null' 99 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 100 * 2nd argument should be a pointer to stack, which will be used inside 101 * the helper function as a pointer to map element key. 102 * 103 * On the kernel side the helper function looks like: 104 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 105 * { 106 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 107 * void *key = (void *) (unsigned long) r2; 108 * void *value; 109 * 110 * here kernel can access 'key' and 'map' pointers safely, knowing that 111 * [key, key + map->key_size) bytes are valid and were initialized on 112 * the stack of eBPF program. 113 * } 114 * 115 * Corresponding eBPF program may look like: 116 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 117 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 118 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 119 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 120 * here verifier looks at prototype of map_lookup_elem() and sees: 121 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 122 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 123 * 124 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 125 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 126 * and were initialized prior to this call. 127 * If it's ok, then verifier allows this BPF_CALL insn and looks at 128 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 129 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 130 * returns ether pointer to map value or NULL. 131 * 132 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 133 * insn, the register holding that pointer in the true branch changes state to 134 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 135 * branch. See check_cond_jmp_op(). 136 * 137 * After the call R0 is set to return type of the function and registers R1-R5 138 * are set to NOT_INIT to indicate that they are no longer readable. 139 * 140 * The following reference types represent a potential reference to a kernel 141 * resource which, after first being allocated, must be checked and freed by 142 * the BPF program: 143 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET 144 * 145 * When the verifier sees a helper call return a reference type, it allocates a 146 * pointer id for the reference and stores it in the current function state. 147 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into 148 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type 149 * passes through a NULL-check conditional. For the branch wherein the state is 150 * changed to CONST_IMM, the verifier releases the reference. 151 * 152 * For each helper function that allocates a reference, such as 153 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as 154 * bpf_sk_release(). When a reference type passes into the release function, 155 * the verifier also releases the reference. If any unchecked or unreleased 156 * reference remains at the end of the program, the verifier rejects it. 157 */ 158 159 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 160 struct bpf_verifier_stack_elem { 161 /* verifer state is 'st' 162 * before processing instruction 'insn_idx' 163 * and after processing instruction 'prev_insn_idx' 164 */ 165 struct bpf_verifier_state st; 166 int insn_idx; 167 int prev_insn_idx; 168 struct bpf_verifier_stack_elem *next; 169 }; 170 171 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192 172 #define BPF_COMPLEXITY_LIMIT_STATES 64 173 174 #define BPF_MAP_KEY_POISON (1ULL << 63) 175 #define BPF_MAP_KEY_SEEN (1ULL << 62) 176 177 #define BPF_MAP_PTR_UNPRIV 1UL 178 #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ 179 POISON_POINTER_DELTA)) 180 #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) 181 182 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) 183 { 184 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON; 185 } 186 187 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) 188 { 189 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV; 190 } 191 192 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, 193 const struct bpf_map *map, bool unpriv) 194 { 195 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); 196 unpriv |= bpf_map_ptr_unpriv(aux); 197 aux->map_ptr_state = (unsigned long)map | 198 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); 199 } 200 201 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux) 202 { 203 return aux->map_key_state & BPF_MAP_KEY_POISON; 204 } 205 206 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux) 207 { 208 return !(aux->map_key_state & BPF_MAP_KEY_SEEN); 209 } 210 211 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux) 212 { 213 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON); 214 } 215 216 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state) 217 { 218 bool poisoned = bpf_map_key_poisoned(aux); 219 220 aux->map_key_state = state | BPF_MAP_KEY_SEEN | 221 (poisoned ? BPF_MAP_KEY_POISON : 0ULL); 222 } 223 224 struct bpf_call_arg_meta { 225 struct bpf_map *map_ptr; 226 bool raw_mode; 227 bool pkt_access; 228 int regno; 229 int access_size; 230 s64 msize_smax_value; 231 u64 msize_umax_value; 232 int ref_obj_id; 233 int func_id; 234 u32 btf_id; 235 }; 236 237 struct btf *btf_vmlinux; 238 239 static DEFINE_MUTEX(bpf_verifier_lock); 240 241 static const struct bpf_line_info * 242 find_linfo(const struct bpf_verifier_env *env, u32 insn_off) 243 { 244 const struct bpf_line_info *linfo; 245 const struct bpf_prog *prog; 246 u32 i, nr_linfo; 247 248 prog = env->prog; 249 nr_linfo = prog->aux->nr_linfo; 250 251 if (!nr_linfo || insn_off >= prog->len) 252 return NULL; 253 254 linfo = prog->aux->linfo; 255 for (i = 1; i < nr_linfo; i++) 256 if (insn_off < linfo[i].insn_off) 257 break; 258 259 return &linfo[i - 1]; 260 } 261 262 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, 263 va_list args) 264 { 265 unsigned int n; 266 267 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); 268 269 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, 270 "verifier log line truncated - local buffer too short\n"); 271 272 n = min(log->len_total - log->len_used - 1, n); 273 log->kbuf[n] = '\0'; 274 275 if (log->level == BPF_LOG_KERNEL) { 276 pr_err("BPF:%s\n", log->kbuf); 277 return; 278 } 279 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) 280 log->len_used += n; 281 else 282 log->ubuf = NULL; 283 } 284 285 /* log_level controls verbosity level of eBPF verifier. 286 * bpf_verifier_log_write() is used to dump the verification trace to the log, 287 * so the user can figure out what's wrong with the program 288 */ 289 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, 290 const char *fmt, ...) 291 { 292 va_list args; 293 294 if (!bpf_verifier_log_needed(&env->log)) 295 return; 296 297 va_start(args, fmt); 298 bpf_verifier_vlog(&env->log, fmt, args); 299 va_end(args); 300 } 301 EXPORT_SYMBOL_GPL(bpf_verifier_log_write); 302 303 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) 304 { 305 struct bpf_verifier_env *env = private_data; 306 va_list args; 307 308 if (!bpf_verifier_log_needed(&env->log)) 309 return; 310 311 va_start(args, fmt); 312 bpf_verifier_vlog(&env->log, fmt, args); 313 va_end(args); 314 } 315 316 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log, 317 const char *fmt, ...) 318 { 319 va_list args; 320 321 if (!bpf_verifier_log_needed(log)) 322 return; 323 324 va_start(args, fmt); 325 bpf_verifier_vlog(log, fmt, args); 326 va_end(args); 327 } 328 329 static const char *ltrim(const char *s) 330 { 331 while (isspace(*s)) 332 s++; 333 334 return s; 335 } 336 337 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env, 338 u32 insn_off, 339 const char *prefix_fmt, ...) 340 { 341 const struct bpf_line_info *linfo; 342 343 if (!bpf_verifier_log_needed(&env->log)) 344 return; 345 346 linfo = find_linfo(env, insn_off); 347 if (!linfo || linfo == env->prev_linfo) 348 return; 349 350 if (prefix_fmt) { 351 va_list args; 352 353 va_start(args, prefix_fmt); 354 bpf_verifier_vlog(&env->log, prefix_fmt, args); 355 va_end(args); 356 } 357 358 verbose(env, "%s\n", 359 ltrim(btf_name_by_offset(env->prog->aux->btf, 360 linfo->line_off))); 361 362 env->prev_linfo = linfo; 363 } 364 365 static bool type_is_pkt_pointer(enum bpf_reg_type type) 366 { 367 return type == PTR_TO_PACKET || 368 type == PTR_TO_PACKET_META; 369 } 370 371 static bool type_is_sk_pointer(enum bpf_reg_type type) 372 { 373 return type == PTR_TO_SOCKET || 374 type == PTR_TO_SOCK_COMMON || 375 type == PTR_TO_TCP_SOCK || 376 type == PTR_TO_XDP_SOCK; 377 } 378 379 static bool reg_type_may_be_null(enum bpf_reg_type type) 380 { 381 return type == PTR_TO_MAP_VALUE_OR_NULL || 382 type == PTR_TO_SOCKET_OR_NULL || 383 type == PTR_TO_SOCK_COMMON_OR_NULL || 384 type == PTR_TO_TCP_SOCK_OR_NULL; 385 } 386 387 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) 388 { 389 return reg->type == PTR_TO_MAP_VALUE && 390 map_value_has_spin_lock(reg->map_ptr); 391 } 392 393 static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type) 394 { 395 return type == PTR_TO_SOCKET || 396 type == PTR_TO_SOCKET_OR_NULL || 397 type == PTR_TO_TCP_SOCK || 398 type == PTR_TO_TCP_SOCK_OR_NULL; 399 } 400 401 static bool arg_type_may_be_refcounted(enum bpf_arg_type type) 402 { 403 return type == ARG_PTR_TO_SOCK_COMMON; 404 } 405 406 /* Determine whether the function releases some resources allocated by another 407 * function call. The first reference type argument will be assumed to be 408 * released by release_reference(). 409 */ 410 static bool is_release_function(enum bpf_func_id func_id) 411 { 412 return func_id == BPF_FUNC_sk_release; 413 } 414 415 static bool is_acquire_function(enum bpf_func_id func_id) 416 { 417 return func_id == BPF_FUNC_sk_lookup_tcp || 418 func_id == BPF_FUNC_sk_lookup_udp || 419 func_id == BPF_FUNC_skc_lookup_tcp; 420 } 421 422 static bool is_ptr_cast_function(enum bpf_func_id func_id) 423 { 424 return func_id == BPF_FUNC_tcp_sock || 425 func_id == BPF_FUNC_sk_fullsock; 426 } 427 428 /* string representation of 'enum bpf_reg_type' */ 429 static const char * const reg_type_str[] = { 430 [NOT_INIT] = "?", 431 [SCALAR_VALUE] = "inv", 432 [PTR_TO_CTX] = "ctx", 433 [CONST_PTR_TO_MAP] = "map_ptr", 434 [PTR_TO_MAP_VALUE] = "map_value", 435 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", 436 [PTR_TO_STACK] = "fp", 437 [PTR_TO_PACKET] = "pkt", 438 [PTR_TO_PACKET_META] = "pkt_meta", 439 [PTR_TO_PACKET_END] = "pkt_end", 440 [PTR_TO_FLOW_KEYS] = "flow_keys", 441 [PTR_TO_SOCKET] = "sock", 442 [PTR_TO_SOCKET_OR_NULL] = "sock_or_null", 443 [PTR_TO_SOCK_COMMON] = "sock_common", 444 [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null", 445 [PTR_TO_TCP_SOCK] = "tcp_sock", 446 [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null", 447 [PTR_TO_TP_BUFFER] = "tp_buffer", 448 [PTR_TO_XDP_SOCK] = "xdp_sock", 449 [PTR_TO_BTF_ID] = "ptr_", 450 }; 451 452 static char slot_type_char[] = { 453 [STACK_INVALID] = '?', 454 [STACK_SPILL] = 'r', 455 [STACK_MISC] = 'm', 456 [STACK_ZERO] = '0', 457 }; 458 459 static void print_liveness(struct bpf_verifier_env *env, 460 enum bpf_reg_liveness live) 461 { 462 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE)) 463 verbose(env, "_"); 464 if (live & REG_LIVE_READ) 465 verbose(env, "r"); 466 if (live & REG_LIVE_WRITTEN) 467 verbose(env, "w"); 468 if (live & REG_LIVE_DONE) 469 verbose(env, "D"); 470 } 471 472 static struct bpf_func_state *func(struct bpf_verifier_env *env, 473 const struct bpf_reg_state *reg) 474 { 475 struct bpf_verifier_state *cur = env->cur_state; 476 477 return cur->frame[reg->frameno]; 478 } 479 480 const char *kernel_type_name(u32 id) 481 { 482 return btf_name_by_offset(btf_vmlinux, 483 btf_type_by_id(btf_vmlinux, id)->name_off); 484 } 485 486 static void print_verifier_state(struct bpf_verifier_env *env, 487 const struct bpf_func_state *state) 488 { 489 const struct bpf_reg_state *reg; 490 enum bpf_reg_type t; 491 int i; 492 493 if (state->frameno) 494 verbose(env, " frame%d:", state->frameno); 495 for (i = 0; i < MAX_BPF_REG; i++) { 496 reg = &state->regs[i]; 497 t = reg->type; 498 if (t == NOT_INIT) 499 continue; 500 verbose(env, " R%d", i); 501 print_liveness(env, reg->live); 502 verbose(env, "=%s", reg_type_str[t]); 503 if (t == SCALAR_VALUE && reg->precise) 504 verbose(env, "P"); 505 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && 506 tnum_is_const(reg->var_off)) { 507 /* reg->off should be 0 for SCALAR_VALUE */ 508 verbose(env, "%lld", reg->var_off.value + reg->off); 509 } else { 510 if (t == PTR_TO_BTF_ID) 511 verbose(env, "%s", kernel_type_name(reg->btf_id)); 512 verbose(env, "(id=%d", reg->id); 513 if (reg_type_may_be_refcounted_or_null(t)) 514 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id); 515 if (t != SCALAR_VALUE) 516 verbose(env, ",off=%d", reg->off); 517 if (type_is_pkt_pointer(t)) 518 verbose(env, ",r=%d", reg->range); 519 else if (t == CONST_PTR_TO_MAP || 520 t == PTR_TO_MAP_VALUE || 521 t == PTR_TO_MAP_VALUE_OR_NULL) 522 verbose(env, ",ks=%d,vs=%d", 523 reg->map_ptr->key_size, 524 reg->map_ptr->value_size); 525 if (tnum_is_const(reg->var_off)) { 526 /* Typically an immediate SCALAR_VALUE, but 527 * could be a pointer whose offset is too big 528 * for reg->off 529 */ 530 verbose(env, ",imm=%llx", reg->var_off.value); 531 } else { 532 if (reg->smin_value != reg->umin_value && 533 reg->smin_value != S64_MIN) 534 verbose(env, ",smin_value=%lld", 535 (long long)reg->smin_value); 536 if (reg->smax_value != reg->umax_value && 537 reg->smax_value != S64_MAX) 538 verbose(env, ",smax_value=%lld", 539 (long long)reg->smax_value); 540 if (reg->umin_value != 0) 541 verbose(env, ",umin_value=%llu", 542 (unsigned long long)reg->umin_value); 543 if (reg->umax_value != U64_MAX) 544 verbose(env, ",umax_value=%llu", 545 (unsigned long long)reg->umax_value); 546 if (!tnum_is_unknown(reg->var_off)) { 547 char tn_buf[48]; 548 549 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 550 verbose(env, ",var_off=%s", tn_buf); 551 } 552 } 553 verbose(env, ")"); 554 } 555 } 556 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 557 char types_buf[BPF_REG_SIZE + 1]; 558 bool valid = false; 559 int j; 560 561 for (j = 0; j < BPF_REG_SIZE; j++) { 562 if (state->stack[i].slot_type[j] != STACK_INVALID) 563 valid = true; 564 types_buf[j] = slot_type_char[ 565 state->stack[i].slot_type[j]]; 566 } 567 types_buf[BPF_REG_SIZE] = 0; 568 if (!valid) 569 continue; 570 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); 571 print_liveness(env, state->stack[i].spilled_ptr.live); 572 if (state->stack[i].slot_type[0] == STACK_SPILL) { 573 reg = &state->stack[i].spilled_ptr; 574 t = reg->type; 575 verbose(env, "=%s", reg_type_str[t]); 576 if (t == SCALAR_VALUE && reg->precise) 577 verbose(env, "P"); 578 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) 579 verbose(env, "%lld", reg->var_off.value + reg->off); 580 } else { 581 verbose(env, "=%s", types_buf); 582 } 583 } 584 if (state->acquired_refs && state->refs[0].id) { 585 verbose(env, " refs=%d", state->refs[0].id); 586 for (i = 1; i < state->acquired_refs; i++) 587 if (state->refs[i].id) 588 verbose(env, ",%d", state->refs[i].id); 589 } 590 verbose(env, "\n"); 591 } 592 593 #define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \ 594 static int copy_##NAME##_state(struct bpf_func_state *dst, \ 595 const struct bpf_func_state *src) \ 596 { \ 597 if (!src->FIELD) \ 598 return 0; \ 599 if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \ 600 /* internal bug, make state invalid to reject the program */ \ 601 memset(dst, 0, sizeof(*dst)); \ 602 return -EFAULT; \ 603 } \ 604 memcpy(dst->FIELD, src->FIELD, \ 605 sizeof(*src->FIELD) * (src->COUNT / SIZE)); \ 606 return 0; \ 607 } 608 /* copy_reference_state() */ 609 COPY_STATE_FN(reference, acquired_refs, refs, 1) 610 /* copy_stack_state() */ 611 COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) 612 #undef COPY_STATE_FN 613 614 #define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \ 615 static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \ 616 bool copy_old) \ 617 { \ 618 u32 old_size = state->COUNT; \ 619 struct bpf_##NAME##_state *new_##FIELD; \ 620 int slot = size / SIZE; \ 621 \ 622 if (size <= old_size || !size) { \ 623 if (copy_old) \ 624 return 0; \ 625 state->COUNT = slot * SIZE; \ 626 if (!size && old_size) { \ 627 kfree(state->FIELD); \ 628 state->FIELD = NULL; \ 629 } \ 630 return 0; \ 631 } \ 632 new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \ 633 GFP_KERNEL); \ 634 if (!new_##FIELD) \ 635 return -ENOMEM; \ 636 if (copy_old) { \ 637 if (state->FIELD) \ 638 memcpy(new_##FIELD, state->FIELD, \ 639 sizeof(*new_##FIELD) * (old_size / SIZE)); \ 640 memset(new_##FIELD + old_size / SIZE, 0, \ 641 sizeof(*new_##FIELD) * (size - old_size) / SIZE); \ 642 } \ 643 state->COUNT = slot * SIZE; \ 644 kfree(state->FIELD); \ 645 state->FIELD = new_##FIELD; \ 646 return 0; \ 647 } 648 /* realloc_reference_state() */ 649 REALLOC_STATE_FN(reference, acquired_refs, refs, 1) 650 /* realloc_stack_state() */ 651 REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) 652 #undef REALLOC_STATE_FN 653 654 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to 655 * make it consume minimal amount of memory. check_stack_write() access from 656 * the program calls into realloc_func_state() to grow the stack size. 657 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state 658 * which realloc_stack_state() copies over. It points to previous 659 * bpf_verifier_state which is never reallocated. 660 */ 661 static int realloc_func_state(struct bpf_func_state *state, int stack_size, 662 int refs_size, bool copy_old) 663 { 664 int err = realloc_reference_state(state, refs_size, copy_old); 665 if (err) 666 return err; 667 return realloc_stack_state(state, stack_size, copy_old); 668 } 669 670 /* Acquire a pointer id from the env and update the state->refs to include 671 * this new pointer reference. 672 * On success, returns a valid pointer id to associate with the register 673 * On failure, returns a negative errno. 674 */ 675 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) 676 { 677 struct bpf_func_state *state = cur_func(env); 678 int new_ofs = state->acquired_refs; 679 int id, err; 680 681 err = realloc_reference_state(state, state->acquired_refs + 1, true); 682 if (err) 683 return err; 684 id = ++env->id_gen; 685 state->refs[new_ofs].id = id; 686 state->refs[new_ofs].insn_idx = insn_idx; 687 688 return id; 689 } 690 691 /* release function corresponding to acquire_reference_state(). Idempotent. */ 692 static int release_reference_state(struct bpf_func_state *state, int ptr_id) 693 { 694 int i, last_idx; 695 696 last_idx = state->acquired_refs - 1; 697 for (i = 0; i < state->acquired_refs; i++) { 698 if (state->refs[i].id == ptr_id) { 699 if (last_idx && i != last_idx) 700 memcpy(&state->refs[i], &state->refs[last_idx], 701 sizeof(*state->refs)); 702 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); 703 state->acquired_refs--; 704 return 0; 705 } 706 } 707 return -EINVAL; 708 } 709 710 static int transfer_reference_state(struct bpf_func_state *dst, 711 struct bpf_func_state *src) 712 { 713 int err = realloc_reference_state(dst, src->acquired_refs, false); 714 if (err) 715 return err; 716 err = copy_reference_state(dst, src); 717 if (err) 718 return err; 719 return 0; 720 } 721 722 static void free_func_state(struct bpf_func_state *state) 723 { 724 if (!state) 725 return; 726 kfree(state->refs); 727 kfree(state->stack); 728 kfree(state); 729 } 730 731 static void clear_jmp_history(struct bpf_verifier_state *state) 732 { 733 kfree(state->jmp_history); 734 state->jmp_history = NULL; 735 state->jmp_history_cnt = 0; 736 } 737 738 static void free_verifier_state(struct bpf_verifier_state *state, 739 bool free_self) 740 { 741 int i; 742 743 for (i = 0; i <= state->curframe; i++) { 744 free_func_state(state->frame[i]); 745 state->frame[i] = NULL; 746 } 747 clear_jmp_history(state); 748 if (free_self) 749 kfree(state); 750 } 751 752 /* copy verifier state from src to dst growing dst stack space 753 * when necessary to accommodate larger src stack 754 */ 755 static int copy_func_state(struct bpf_func_state *dst, 756 const struct bpf_func_state *src) 757 { 758 int err; 759 760 err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs, 761 false); 762 if (err) 763 return err; 764 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); 765 err = copy_reference_state(dst, src); 766 if (err) 767 return err; 768 return copy_stack_state(dst, src); 769 } 770 771 static int copy_verifier_state(struct bpf_verifier_state *dst_state, 772 const struct bpf_verifier_state *src) 773 { 774 struct bpf_func_state *dst; 775 u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt; 776 int i, err; 777 778 if (dst_state->jmp_history_cnt < src->jmp_history_cnt) { 779 kfree(dst_state->jmp_history); 780 dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER); 781 if (!dst_state->jmp_history) 782 return -ENOMEM; 783 } 784 memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz); 785 dst_state->jmp_history_cnt = src->jmp_history_cnt; 786 787 /* if dst has more stack frames then src frame, free them */ 788 for (i = src->curframe + 1; i <= dst_state->curframe; i++) { 789 free_func_state(dst_state->frame[i]); 790 dst_state->frame[i] = NULL; 791 } 792 dst_state->speculative = src->speculative; 793 dst_state->curframe = src->curframe; 794 dst_state->active_spin_lock = src->active_spin_lock; 795 dst_state->branches = src->branches; 796 dst_state->parent = src->parent; 797 dst_state->first_insn_idx = src->first_insn_idx; 798 dst_state->last_insn_idx = src->last_insn_idx; 799 for (i = 0; i <= src->curframe; i++) { 800 dst = dst_state->frame[i]; 801 if (!dst) { 802 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 803 if (!dst) 804 return -ENOMEM; 805 dst_state->frame[i] = dst; 806 } 807 err = copy_func_state(dst, src->frame[i]); 808 if (err) 809 return err; 810 } 811 return 0; 812 } 813 814 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) 815 { 816 while (st) { 817 u32 br = --st->branches; 818 819 /* WARN_ON(br > 1) technically makes sense here, 820 * but see comment in push_stack(), hence: 821 */ 822 WARN_ONCE((int)br < 0, 823 "BUG update_branch_counts:branches_to_explore=%d\n", 824 br); 825 if (br) 826 break; 827 st = st->parent; 828 } 829 } 830 831 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, 832 int *insn_idx) 833 { 834 struct bpf_verifier_state *cur = env->cur_state; 835 struct bpf_verifier_stack_elem *elem, *head = env->head; 836 int err; 837 838 if (env->head == NULL) 839 return -ENOENT; 840 841 if (cur) { 842 err = copy_verifier_state(cur, &head->st); 843 if (err) 844 return err; 845 } 846 if (insn_idx) 847 *insn_idx = head->insn_idx; 848 if (prev_insn_idx) 849 *prev_insn_idx = head->prev_insn_idx; 850 elem = head->next; 851 free_verifier_state(&head->st, false); 852 kfree(head); 853 env->head = elem; 854 env->stack_size--; 855 return 0; 856 } 857 858 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, 859 int insn_idx, int prev_insn_idx, 860 bool speculative) 861 { 862 struct bpf_verifier_state *cur = env->cur_state; 863 struct bpf_verifier_stack_elem *elem; 864 int err; 865 866 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 867 if (!elem) 868 goto err; 869 870 elem->insn_idx = insn_idx; 871 elem->prev_insn_idx = prev_insn_idx; 872 elem->next = env->head; 873 env->head = elem; 874 env->stack_size++; 875 err = copy_verifier_state(&elem->st, cur); 876 if (err) 877 goto err; 878 elem->st.speculative |= speculative; 879 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { 880 verbose(env, "The sequence of %d jumps is too complex.\n", 881 env->stack_size); 882 goto err; 883 } 884 if (elem->st.parent) { 885 ++elem->st.parent->branches; 886 /* WARN_ON(branches > 2) technically makes sense here, 887 * but 888 * 1. speculative states will bump 'branches' for non-branch 889 * instructions 890 * 2. is_state_visited() heuristics may decide not to create 891 * a new state for a sequence of branches and all such current 892 * and cloned states will be pointing to a single parent state 893 * which might have large 'branches' count. 894 */ 895 } 896 return &elem->st; 897 err: 898 free_verifier_state(env->cur_state, true); 899 env->cur_state = NULL; 900 /* pop all elements and return */ 901 while (!pop_stack(env, NULL, NULL)); 902 return NULL; 903 } 904 905 #define CALLER_SAVED_REGS 6 906 static const int caller_saved[CALLER_SAVED_REGS] = { 907 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 908 }; 909 910 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 911 struct bpf_reg_state *reg); 912 913 /* Mark the unknown part of a register (variable offset or scalar value) as 914 * known to have the value @imm. 915 */ 916 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) 917 { 918 /* Clear id, off, and union(map_ptr, range) */ 919 memset(((u8 *)reg) + sizeof(reg->type), 0, 920 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); 921 reg->var_off = tnum_const(imm); 922 reg->smin_value = (s64)imm; 923 reg->smax_value = (s64)imm; 924 reg->umin_value = imm; 925 reg->umax_value = imm; 926 } 927 928 /* Mark the 'variable offset' part of a register as zero. This should be 929 * used only on registers holding a pointer type. 930 */ 931 static void __mark_reg_known_zero(struct bpf_reg_state *reg) 932 { 933 __mark_reg_known(reg, 0); 934 } 935 936 static void __mark_reg_const_zero(struct bpf_reg_state *reg) 937 { 938 __mark_reg_known(reg, 0); 939 reg->type = SCALAR_VALUE; 940 } 941 942 static void mark_reg_known_zero(struct bpf_verifier_env *env, 943 struct bpf_reg_state *regs, u32 regno) 944 { 945 if (WARN_ON(regno >= MAX_BPF_REG)) { 946 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); 947 /* Something bad happened, let's kill all regs */ 948 for (regno = 0; regno < MAX_BPF_REG; regno++) 949 __mark_reg_not_init(env, regs + regno); 950 return; 951 } 952 __mark_reg_known_zero(regs + regno); 953 } 954 955 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) 956 { 957 return type_is_pkt_pointer(reg->type); 958 } 959 960 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) 961 { 962 return reg_is_pkt_pointer(reg) || 963 reg->type == PTR_TO_PACKET_END; 964 } 965 966 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ 967 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, 968 enum bpf_reg_type which) 969 { 970 /* The register can already have a range from prior markings. 971 * This is fine as long as it hasn't been advanced from its 972 * origin. 973 */ 974 return reg->type == which && 975 reg->id == 0 && 976 reg->off == 0 && 977 tnum_equals_const(reg->var_off, 0); 978 } 979 980 /* Attempts to improve min/max values based on var_off information */ 981 static void __update_reg_bounds(struct bpf_reg_state *reg) 982 { 983 /* min signed is max(sign bit) | min(other bits) */ 984 reg->smin_value = max_t(s64, reg->smin_value, 985 reg->var_off.value | (reg->var_off.mask & S64_MIN)); 986 /* max signed is min(sign bit) | max(other bits) */ 987 reg->smax_value = min_t(s64, reg->smax_value, 988 reg->var_off.value | (reg->var_off.mask & S64_MAX)); 989 reg->umin_value = max(reg->umin_value, reg->var_off.value); 990 reg->umax_value = min(reg->umax_value, 991 reg->var_off.value | reg->var_off.mask); 992 } 993 994 /* Uses signed min/max values to inform unsigned, and vice-versa */ 995 static void __reg_deduce_bounds(struct bpf_reg_state *reg) 996 { 997 /* Learn sign from signed bounds. 998 * If we cannot cross the sign boundary, then signed and unsigned bounds 999 * are the same, so combine. This works even in the negative case, e.g. 1000 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 1001 */ 1002 if (reg->smin_value >= 0 || reg->smax_value < 0) { 1003 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 1004 reg->umin_value); 1005 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 1006 reg->umax_value); 1007 return; 1008 } 1009 /* Learn sign from unsigned bounds. Signed bounds cross the sign 1010 * boundary, so we must be careful. 1011 */ 1012 if ((s64)reg->umax_value >= 0) { 1013 /* Positive. We can't learn anything from the smin, but smax 1014 * is positive, hence safe. 1015 */ 1016 reg->smin_value = reg->umin_value; 1017 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 1018 reg->umax_value); 1019 } else if ((s64)reg->umin_value < 0) { 1020 /* Negative. We can't learn anything from the smax, but smin 1021 * is negative, hence safe. 1022 */ 1023 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 1024 reg->umin_value); 1025 reg->smax_value = reg->umax_value; 1026 } 1027 } 1028 1029 /* Attempts to improve var_off based on unsigned min/max information */ 1030 static void __reg_bound_offset(struct bpf_reg_state *reg) 1031 { 1032 reg->var_off = tnum_intersect(reg->var_off, 1033 tnum_range(reg->umin_value, 1034 reg->umax_value)); 1035 } 1036 1037 static void __reg_bound_offset32(struct bpf_reg_state *reg) 1038 { 1039 u64 mask = 0xffffFFFF; 1040 struct tnum range = tnum_range(reg->umin_value & mask, 1041 reg->umax_value & mask); 1042 struct tnum lo32 = tnum_cast(reg->var_off, 4); 1043 struct tnum hi32 = tnum_lshift(tnum_rshift(reg->var_off, 32), 32); 1044 1045 reg->var_off = tnum_or(hi32, tnum_intersect(lo32, range)); 1046 } 1047 1048 /* Reset the min/max bounds of a register */ 1049 static void __mark_reg_unbounded(struct bpf_reg_state *reg) 1050 { 1051 reg->smin_value = S64_MIN; 1052 reg->smax_value = S64_MAX; 1053 reg->umin_value = 0; 1054 reg->umax_value = U64_MAX; 1055 } 1056 1057 /* Mark a register as having a completely unknown (scalar) value. */ 1058 static void __mark_reg_unknown(const struct bpf_verifier_env *env, 1059 struct bpf_reg_state *reg) 1060 { 1061 /* 1062 * Clear type, id, off, and union(map_ptr, range) and 1063 * padding between 'type' and union 1064 */ 1065 memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); 1066 reg->type = SCALAR_VALUE; 1067 reg->var_off = tnum_unknown; 1068 reg->frameno = 0; 1069 reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ? 1070 true : false; 1071 __mark_reg_unbounded(reg); 1072 } 1073 1074 static void mark_reg_unknown(struct bpf_verifier_env *env, 1075 struct bpf_reg_state *regs, u32 regno) 1076 { 1077 if (WARN_ON(regno >= MAX_BPF_REG)) { 1078 verbose(env, "mark_reg_unknown(regs, %u)\n", regno); 1079 /* Something bad happened, let's kill all regs except FP */ 1080 for (regno = 0; regno < BPF_REG_FP; regno++) 1081 __mark_reg_not_init(env, regs + regno); 1082 return; 1083 } 1084 __mark_reg_unknown(env, regs + regno); 1085 } 1086 1087 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 1088 struct bpf_reg_state *reg) 1089 { 1090 __mark_reg_unknown(env, reg); 1091 reg->type = NOT_INIT; 1092 } 1093 1094 static void mark_reg_not_init(struct bpf_verifier_env *env, 1095 struct bpf_reg_state *regs, u32 regno) 1096 { 1097 if (WARN_ON(regno >= MAX_BPF_REG)) { 1098 verbose(env, "mark_reg_not_init(regs, %u)\n", regno); 1099 /* Something bad happened, let's kill all regs except FP */ 1100 for (regno = 0; regno < BPF_REG_FP; regno++) 1101 __mark_reg_not_init(env, regs + regno); 1102 return; 1103 } 1104 __mark_reg_not_init(env, regs + regno); 1105 } 1106 1107 #define DEF_NOT_SUBREG (0) 1108 static void init_reg_state(struct bpf_verifier_env *env, 1109 struct bpf_func_state *state) 1110 { 1111 struct bpf_reg_state *regs = state->regs; 1112 int i; 1113 1114 for (i = 0; i < MAX_BPF_REG; i++) { 1115 mark_reg_not_init(env, regs, i); 1116 regs[i].live = REG_LIVE_NONE; 1117 regs[i].parent = NULL; 1118 regs[i].subreg_def = DEF_NOT_SUBREG; 1119 } 1120 1121 /* frame pointer */ 1122 regs[BPF_REG_FP].type = PTR_TO_STACK; 1123 mark_reg_known_zero(env, regs, BPF_REG_FP); 1124 regs[BPF_REG_FP].frameno = state->frameno; 1125 1126 /* 1st arg to a function */ 1127 regs[BPF_REG_1].type = PTR_TO_CTX; 1128 mark_reg_known_zero(env, regs, BPF_REG_1); 1129 } 1130 1131 #define BPF_MAIN_FUNC (-1) 1132 static void init_func_state(struct bpf_verifier_env *env, 1133 struct bpf_func_state *state, 1134 int callsite, int frameno, int subprogno) 1135 { 1136 state->callsite = callsite; 1137 state->frameno = frameno; 1138 state->subprogno = subprogno; 1139 init_reg_state(env, state); 1140 } 1141 1142 enum reg_arg_type { 1143 SRC_OP, /* register is used as source operand */ 1144 DST_OP, /* register is used as destination operand */ 1145 DST_OP_NO_MARK /* same as above, check only, don't mark */ 1146 }; 1147 1148 static int cmp_subprogs(const void *a, const void *b) 1149 { 1150 return ((struct bpf_subprog_info *)a)->start - 1151 ((struct bpf_subprog_info *)b)->start; 1152 } 1153 1154 static int find_subprog(struct bpf_verifier_env *env, int off) 1155 { 1156 struct bpf_subprog_info *p; 1157 1158 p = bsearch(&off, env->subprog_info, env->subprog_cnt, 1159 sizeof(env->subprog_info[0]), cmp_subprogs); 1160 if (!p) 1161 return -ENOENT; 1162 return p - env->subprog_info; 1163 1164 } 1165 1166 static int add_subprog(struct bpf_verifier_env *env, int off) 1167 { 1168 int insn_cnt = env->prog->len; 1169 int ret; 1170 1171 if (off >= insn_cnt || off < 0) { 1172 verbose(env, "call to invalid destination\n"); 1173 return -EINVAL; 1174 } 1175 ret = find_subprog(env, off); 1176 if (ret >= 0) 1177 return 0; 1178 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { 1179 verbose(env, "too many subprograms\n"); 1180 return -E2BIG; 1181 } 1182 env->subprog_info[env->subprog_cnt++].start = off; 1183 sort(env->subprog_info, env->subprog_cnt, 1184 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); 1185 return 0; 1186 } 1187 1188 static int check_subprogs(struct bpf_verifier_env *env) 1189 { 1190 int i, ret, subprog_start, subprog_end, off, cur_subprog = 0; 1191 struct bpf_subprog_info *subprog = env->subprog_info; 1192 struct bpf_insn *insn = env->prog->insnsi; 1193 int insn_cnt = env->prog->len; 1194 1195 /* Add entry function. */ 1196 ret = add_subprog(env, 0); 1197 if (ret < 0) 1198 return ret; 1199 1200 /* determine subprog starts. The end is one before the next starts */ 1201 for (i = 0; i < insn_cnt; i++) { 1202 if (insn[i].code != (BPF_JMP | BPF_CALL)) 1203 continue; 1204 if (insn[i].src_reg != BPF_PSEUDO_CALL) 1205 continue; 1206 if (!env->allow_ptr_leaks) { 1207 verbose(env, "function calls to other bpf functions are allowed for root only\n"); 1208 return -EPERM; 1209 } 1210 ret = add_subprog(env, i + insn[i].imm + 1); 1211 if (ret < 0) 1212 return ret; 1213 } 1214 1215 /* Add a fake 'exit' subprog which could simplify subprog iteration 1216 * logic. 'subprog_cnt' should not be increased. 1217 */ 1218 subprog[env->subprog_cnt].start = insn_cnt; 1219 1220 if (env->log.level & BPF_LOG_LEVEL2) 1221 for (i = 0; i < env->subprog_cnt; i++) 1222 verbose(env, "func#%d @%d\n", i, subprog[i].start); 1223 1224 /* now check that all jumps are within the same subprog */ 1225 subprog_start = subprog[cur_subprog].start; 1226 subprog_end = subprog[cur_subprog + 1].start; 1227 for (i = 0; i < insn_cnt; i++) { 1228 u8 code = insn[i].code; 1229 1230 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) 1231 goto next; 1232 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) 1233 goto next; 1234 off = i + insn[i].off + 1; 1235 if (off < subprog_start || off >= subprog_end) { 1236 verbose(env, "jump out of range from insn %d to %d\n", i, off); 1237 return -EINVAL; 1238 } 1239 next: 1240 if (i == subprog_end - 1) { 1241 /* to avoid fall-through from one subprog into another 1242 * the last insn of the subprog should be either exit 1243 * or unconditional jump back 1244 */ 1245 if (code != (BPF_JMP | BPF_EXIT) && 1246 code != (BPF_JMP | BPF_JA)) { 1247 verbose(env, "last insn is not an exit or jmp\n"); 1248 return -EINVAL; 1249 } 1250 subprog_start = subprog_end; 1251 cur_subprog++; 1252 if (cur_subprog < env->subprog_cnt) 1253 subprog_end = subprog[cur_subprog + 1].start; 1254 } 1255 } 1256 return 0; 1257 } 1258 1259 /* Parentage chain of this register (or stack slot) should take care of all 1260 * issues like callee-saved registers, stack slot allocation time, etc. 1261 */ 1262 static int mark_reg_read(struct bpf_verifier_env *env, 1263 const struct bpf_reg_state *state, 1264 struct bpf_reg_state *parent, u8 flag) 1265 { 1266 bool writes = parent == state->parent; /* Observe write marks */ 1267 int cnt = 0; 1268 1269 while (parent) { 1270 /* if read wasn't screened by an earlier write ... */ 1271 if (writes && state->live & REG_LIVE_WRITTEN) 1272 break; 1273 if (parent->live & REG_LIVE_DONE) { 1274 verbose(env, "verifier BUG type %s var_off %lld off %d\n", 1275 reg_type_str[parent->type], 1276 parent->var_off.value, parent->off); 1277 return -EFAULT; 1278 } 1279 /* The first condition is more likely to be true than the 1280 * second, checked it first. 1281 */ 1282 if ((parent->live & REG_LIVE_READ) == flag || 1283 parent->live & REG_LIVE_READ64) 1284 /* The parentage chain never changes and 1285 * this parent was already marked as LIVE_READ. 1286 * There is no need to keep walking the chain again and 1287 * keep re-marking all parents as LIVE_READ. 1288 * This case happens when the same register is read 1289 * multiple times without writes into it in-between. 1290 * Also, if parent has the stronger REG_LIVE_READ64 set, 1291 * then no need to set the weak REG_LIVE_READ32. 1292 */ 1293 break; 1294 /* ... then we depend on parent's value */ 1295 parent->live |= flag; 1296 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */ 1297 if (flag == REG_LIVE_READ64) 1298 parent->live &= ~REG_LIVE_READ32; 1299 state = parent; 1300 parent = state->parent; 1301 writes = true; 1302 cnt++; 1303 } 1304 1305 if (env->longest_mark_read_walk < cnt) 1306 env->longest_mark_read_walk = cnt; 1307 return 0; 1308 } 1309 1310 /* This function is supposed to be used by the following 32-bit optimization 1311 * code only. It returns TRUE if the source or destination register operates 1312 * on 64-bit, otherwise return FALSE. 1313 */ 1314 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, 1315 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t) 1316 { 1317 u8 code, class, op; 1318 1319 code = insn->code; 1320 class = BPF_CLASS(code); 1321 op = BPF_OP(code); 1322 if (class == BPF_JMP) { 1323 /* BPF_EXIT for "main" will reach here. Return TRUE 1324 * conservatively. 1325 */ 1326 if (op == BPF_EXIT) 1327 return true; 1328 if (op == BPF_CALL) { 1329 /* BPF to BPF call will reach here because of marking 1330 * caller saved clobber with DST_OP_NO_MARK for which we 1331 * don't care the register def because they are anyway 1332 * marked as NOT_INIT already. 1333 */ 1334 if (insn->src_reg == BPF_PSEUDO_CALL) 1335 return false; 1336 /* Helper call will reach here because of arg type 1337 * check, conservatively return TRUE. 1338 */ 1339 if (t == SRC_OP) 1340 return true; 1341 1342 return false; 1343 } 1344 } 1345 1346 if (class == BPF_ALU64 || class == BPF_JMP || 1347 /* BPF_END always use BPF_ALU class. */ 1348 (class == BPF_ALU && op == BPF_END && insn->imm == 64)) 1349 return true; 1350 1351 if (class == BPF_ALU || class == BPF_JMP32) 1352 return false; 1353 1354 if (class == BPF_LDX) { 1355 if (t != SRC_OP) 1356 return BPF_SIZE(code) == BPF_DW; 1357 /* LDX source must be ptr. */ 1358 return true; 1359 } 1360 1361 if (class == BPF_STX) { 1362 if (reg->type != SCALAR_VALUE) 1363 return true; 1364 return BPF_SIZE(code) == BPF_DW; 1365 } 1366 1367 if (class == BPF_LD) { 1368 u8 mode = BPF_MODE(code); 1369 1370 /* LD_IMM64 */ 1371 if (mode == BPF_IMM) 1372 return true; 1373 1374 /* Both LD_IND and LD_ABS return 32-bit data. */ 1375 if (t != SRC_OP) 1376 return false; 1377 1378 /* Implicit ctx ptr. */ 1379 if (regno == BPF_REG_6) 1380 return true; 1381 1382 /* Explicit source could be any width. */ 1383 return true; 1384 } 1385 1386 if (class == BPF_ST) 1387 /* The only source register for BPF_ST is a ptr. */ 1388 return true; 1389 1390 /* Conservatively return true at default. */ 1391 return true; 1392 } 1393 1394 /* Return TRUE if INSN doesn't have explicit value define. */ 1395 static bool insn_no_def(struct bpf_insn *insn) 1396 { 1397 u8 class = BPF_CLASS(insn->code); 1398 1399 return (class == BPF_JMP || class == BPF_JMP32 || 1400 class == BPF_STX || class == BPF_ST); 1401 } 1402 1403 /* Return TRUE if INSN has defined any 32-bit value explicitly. */ 1404 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn) 1405 { 1406 if (insn_no_def(insn)) 1407 return false; 1408 1409 return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP); 1410 } 1411 1412 static void mark_insn_zext(struct bpf_verifier_env *env, 1413 struct bpf_reg_state *reg) 1414 { 1415 s32 def_idx = reg->subreg_def; 1416 1417 if (def_idx == DEF_NOT_SUBREG) 1418 return; 1419 1420 env->insn_aux_data[def_idx - 1].zext_dst = true; 1421 /* The dst will be zero extended, so won't be sub-register anymore. */ 1422 reg->subreg_def = DEF_NOT_SUBREG; 1423 } 1424 1425 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, 1426 enum reg_arg_type t) 1427 { 1428 struct bpf_verifier_state *vstate = env->cur_state; 1429 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 1430 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; 1431 struct bpf_reg_state *reg, *regs = state->regs; 1432 bool rw64; 1433 1434 if (regno >= MAX_BPF_REG) { 1435 verbose(env, "R%d is invalid\n", regno); 1436 return -EINVAL; 1437 } 1438 1439 reg = ®s[regno]; 1440 rw64 = is_reg64(env, insn, regno, reg, t); 1441 if (t == SRC_OP) { 1442 /* check whether register used as source operand can be read */ 1443 if (reg->type == NOT_INIT) { 1444 verbose(env, "R%d !read_ok\n", regno); 1445 return -EACCES; 1446 } 1447 /* We don't need to worry about FP liveness because it's read-only */ 1448 if (regno == BPF_REG_FP) 1449 return 0; 1450 1451 if (rw64) 1452 mark_insn_zext(env, reg); 1453 1454 return mark_reg_read(env, reg, reg->parent, 1455 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32); 1456 } else { 1457 /* check whether register used as dest operand can be written to */ 1458 if (regno == BPF_REG_FP) { 1459 verbose(env, "frame pointer is read only\n"); 1460 return -EACCES; 1461 } 1462 reg->live |= REG_LIVE_WRITTEN; 1463 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; 1464 if (t == DST_OP) 1465 mark_reg_unknown(env, regs, regno); 1466 } 1467 return 0; 1468 } 1469 1470 /* for any branch, call, exit record the history of jmps in the given state */ 1471 static int push_jmp_history(struct bpf_verifier_env *env, 1472 struct bpf_verifier_state *cur) 1473 { 1474 u32 cnt = cur->jmp_history_cnt; 1475 struct bpf_idx_pair *p; 1476 1477 cnt++; 1478 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER); 1479 if (!p) 1480 return -ENOMEM; 1481 p[cnt - 1].idx = env->insn_idx; 1482 p[cnt - 1].prev_idx = env->prev_insn_idx; 1483 cur->jmp_history = p; 1484 cur->jmp_history_cnt = cnt; 1485 return 0; 1486 } 1487 1488 /* Backtrack one insn at a time. If idx is not at the top of recorded 1489 * history then previous instruction came from straight line execution. 1490 */ 1491 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, 1492 u32 *history) 1493 { 1494 u32 cnt = *history; 1495 1496 if (cnt && st->jmp_history[cnt - 1].idx == i) { 1497 i = st->jmp_history[cnt - 1].prev_idx; 1498 (*history)--; 1499 } else { 1500 i--; 1501 } 1502 return i; 1503 } 1504 1505 /* For given verifier state backtrack_insn() is called from the last insn to 1506 * the first insn. Its purpose is to compute a bitmask of registers and 1507 * stack slots that needs precision in the parent verifier state. 1508 */ 1509 static int backtrack_insn(struct bpf_verifier_env *env, int idx, 1510 u32 *reg_mask, u64 *stack_mask) 1511 { 1512 const struct bpf_insn_cbs cbs = { 1513 .cb_print = verbose, 1514 .private_data = env, 1515 }; 1516 struct bpf_insn *insn = env->prog->insnsi + idx; 1517 u8 class = BPF_CLASS(insn->code); 1518 u8 opcode = BPF_OP(insn->code); 1519 u8 mode = BPF_MODE(insn->code); 1520 u32 dreg = 1u << insn->dst_reg; 1521 u32 sreg = 1u << insn->src_reg; 1522 u32 spi; 1523 1524 if (insn->code == 0) 1525 return 0; 1526 if (env->log.level & BPF_LOG_LEVEL) { 1527 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask); 1528 verbose(env, "%d: ", idx); 1529 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 1530 } 1531 1532 if (class == BPF_ALU || class == BPF_ALU64) { 1533 if (!(*reg_mask & dreg)) 1534 return 0; 1535 if (opcode == BPF_MOV) { 1536 if (BPF_SRC(insn->code) == BPF_X) { 1537 /* dreg = sreg 1538 * dreg needs precision after this insn 1539 * sreg needs precision before this insn 1540 */ 1541 *reg_mask &= ~dreg; 1542 *reg_mask |= sreg; 1543 } else { 1544 /* dreg = K 1545 * dreg needs precision after this insn. 1546 * Corresponding register is already marked 1547 * as precise=true in this verifier state. 1548 * No further markings in parent are necessary 1549 */ 1550 *reg_mask &= ~dreg; 1551 } 1552 } else { 1553 if (BPF_SRC(insn->code) == BPF_X) { 1554 /* dreg += sreg 1555 * both dreg and sreg need precision 1556 * before this insn 1557 */ 1558 *reg_mask |= sreg; 1559 } /* else dreg += K 1560 * dreg still needs precision before this insn 1561 */ 1562 } 1563 } else if (class == BPF_LDX) { 1564 if (!(*reg_mask & dreg)) 1565 return 0; 1566 *reg_mask &= ~dreg; 1567 1568 /* scalars can only be spilled into stack w/o losing precision. 1569 * Load from any other memory can be zero extended. 1570 * The desire to keep that precision is already indicated 1571 * by 'precise' mark in corresponding register of this state. 1572 * No further tracking necessary. 1573 */ 1574 if (insn->src_reg != BPF_REG_FP) 1575 return 0; 1576 if (BPF_SIZE(insn->code) != BPF_DW) 1577 return 0; 1578 1579 /* dreg = *(u64 *)[fp - off] was a fill from the stack. 1580 * that [fp - off] slot contains scalar that needs to be 1581 * tracked with precision 1582 */ 1583 spi = (-insn->off - 1) / BPF_REG_SIZE; 1584 if (spi >= 64) { 1585 verbose(env, "BUG spi %d\n", spi); 1586 WARN_ONCE(1, "verifier backtracking bug"); 1587 return -EFAULT; 1588 } 1589 *stack_mask |= 1ull << spi; 1590 } else if (class == BPF_STX || class == BPF_ST) { 1591 if (*reg_mask & dreg) 1592 /* stx & st shouldn't be using _scalar_ dst_reg 1593 * to access memory. It means backtracking 1594 * encountered a case of pointer subtraction. 1595 */ 1596 return -ENOTSUPP; 1597 /* scalars can only be spilled into stack */ 1598 if (insn->dst_reg != BPF_REG_FP) 1599 return 0; 1600 if (BPF_SIZE(insn->code) != BPF_DW) 1601 return 0; 1602 spi = (-insn->off - 1) / BPF_REG_SIZE; 1603 if (spi >= 64) { 1604 verbose(env, "BUG spi %d\n", spi); 1605 WARN_ONCE(1, "verifier backtracking bug"); 1606 return -EFAULT; 1607 } 1608 if (!(*stack_mask & (1ull << spi))) 1609 return 0; 1610 *stack_mask &= ~(1ull << spi); 1611 if (class == BPF_STX) 1612 *reg_mask |= sreg; 1613 } else if (class == BPF_JMP || class == BPF_JMP32) { 1614 if (opcode == BPF_CALL) { 1615 if (insn->src_reg == BPF_PSEUDO_CALL) 1616 return -ENOTSUPP; 1617 /* regular helper call sets R0 */ 1618 *reg_mask &= ~1; 1619 if (*reg_mask & 0x3f) { 1620 /* if backtracing was looking for registers R1-R5 1621 * they should have been found already. 1622 */ 1623 verbose(env, "BUG regs %x\n", *reg_mask); 1624 WARN_ONCE(1, "verifier backtracking bug"); 1625 return -EFAULT; 1626 } 1627 } else if (opcode == BPF_EXIT) { 1628 return -ENOTSUPP; 1629 } 1630 } else if (class == BPF_LD) { 1631 if (!(*reg_mask & dreg)) 1632 return 0; 1633 *reg_mask &= ~dreg; 1634 /* It's ld_imm64 or ld_abs or ld_ind. 1635 * For ld_imm64 no further tracking of precision 1636 * into parent is necessary 1637 */ 1638 if (mode == BPF_IND || mode == BPF_ABS) 1639 /* to be analyzed */ 1640 return -ENOTSUPP; 1641 } 1642 return 0; 1643 } 1644 1645 /* the scalar precision tracking algorithm: 1646 * . at the start all registers have precise=false. 1647 * . scalar ranges are tracked as normal through alu and jmp insns. 1648 * . once precise value of the scalar register is used in: 1649 * . ptr + scalar alu 1650 * . if (scalar cond K|scalar) 1651 * . helper_call(.., scalar, ...) where ARG_CONST is expected 1652 * backtrack through the verifier states and mark all registers and 1653 * stack slots with spilled constants that these scalar regisers 1654 * should be precise. 1655 * . during state pruning two registers (or spilled stack slots) 1656 * are equivalent if both are not precise. 1657 * 1658 * Note the verifier cannot simply walk register parentage chain, 1659 * since many different registers and stack slots could have been 1660 * used to compute single precise scalar. 1661 * 1662 * The approach of starting with precise=true for all registers and then 1663 * backtrack to mark a register as not precise when the verifier detects 1664 * that program doesn't care about specific value (e.g., when helper 1665 * takes register as ARG_ANYTHING parameter) is not safe. 1666 * 1667 * It's ok to walk single parentage chain of the verifier states. 1668 * It's possible that this backtracking will go all the way till 1st insn. 1669 * All other branches will be explored for needing precision later. 1670 * 1671 * The backtracking needs to deal with cases like: 1672 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0) 1673 * r9 -= r8 1674 * r5 = r9 1675 * if r5 > 0x79f goto pc+7 1676 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff)) 1677 * r5 += 1 1678 * ... 1679 * call bpf_perf_event_output#25 1680 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO 1681 * 1682 * and this case: 1683 * r6 = 1 1684 * call foo // uses callee's r6 inside to compute r0 1685 * r0 += r6 1686 * if r0 == 0 goto 1687 * 1688 * to track above reg_mask/stack_mask needs to be independent for each frame. 1689 * 1690 * Also if parent's curframe > frame where backtracking started, 1691 * the verifier need to mark registers in both frames, otherwise callees 1692 * may incorrectly prune callers. This is similar to 1693 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences") 1694 * 1695 * For now backtracking falls back into conservative marking. 1696 */ 1697 static void mark_all_scalars_precise(struct bpf_verifier_env *env, 1698 struct bpf_verifier_state *st) 1699 { 1700 struct bpf_func_state *func; 1701 struct bpf_reg_state *reg; 1702 int i, j; 1703 1704 /* big hammer: mark all scalars precise in this path. 1705 * pop_stack may still get !precise scalars. 1706 */ 1707 for (; st; st = st->parent) 1708 for (i = 0; i <= st->curframe; i++) { 1709 func = st->frame[i]; 1710 for (j = 0; j < BPF_REG_FP; j++) { 1711 reg = &func->regs[j]; 1712 if (reg->type != SCALAR_VALUE) 1713 continue; 1714 reg->precise = true; 1715 } 1716 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { 1717 if (func->stack[j].slot_type[0] != STACK_SPILL) 1718 continue; 1719 reg = &func->stack[j].spilled_ptr; 1720 if (reg->type != SCALAR_VALUE) 1721 continue; 1722 reg->precise = true; 1723 } 1724 } 1725 } 1726 1727 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, 1728 int spi) 1729 { 1730 struct bpf_verifier_state *st = env->cur_state; 1731 int first_idx = st->first_insn_idx; 1732 int last_idx = env->insn_idx; 1733 struct bpf_func_state *func; 1734 struct bpf_reg_state *reg; 1735 u32 reg_mask = regno >= 0 ? 1u << regno : 0; 1736 u64 stack_mask = spi >= 0 ? 1ull << spi : 0; 1737 bool skip_first = true; 1738 bool new_marks = false; 1739 int i, err; 1740 1741 if (!env->allow_ptr_leaks) 1742 /* backtracking is root only for now */ 1743 return 0; 1744 1745 func = st->frame[st->curframe]; 1746 if (regno >= 0) { 1747 reg = &func->regs[regno]; 1748 if (reg->type != SCALAR_VALUE) { 1749 WARN_ONCE(1, "backtracing misuse"); 1750 return -EFAULT; 1751 } 1752 if (!reg->precise) 1753 new_marks = true; 1754 else 1755 reg_mask = 0; 1756 reg->precise = true; 1757 } 1758 1759 while (spi >= 0) { 1760 if (func->stack[spi].slot_type[0] != STACK_SPILL) { 1761 stack_mask = 0; 1762 break; 1763 } 1764 reg = &func->stack[spi].spilled_ptr; 1765 if (reg->type != SCALAR_VALUE) { 1766 stack_mask = 0; 1767 break; 1768 } 1769 if (!reg->precise) 1770 new_marks = true; 1771 else 1772 stack_mask = 0; 1773 reg->precise = true; 1774 break; 1775 } 1776 1777 if (!new_marks) 1778 return 0; 1779 if (!reg_mask && !stack_mask) 1780 return 0; 1781 for (;;) { 1782 DECLARE_BITMAP(mask, 64); 1783 u32 history = st->jmp_history_cnt; 1784 1785 if (env->log.level & BPF_LOG_LEVEL) 1786 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx); 1787 for (i = last_idx;;) { 1788 if (skip_first) { 1789 err = 0; 1790 skip_first = false; 1791 } else { 1792 err = backtrack_insn(env, i, ®_mask, &stack_mask); 1793 } 1794 if (err == -ENOTSUPP) { 1795 mark_all_scalars_precise(env, st); 1796 return 0; 1797 } else if (err) { 1798 return err; 1799 } 1800 if (!reg_mask && !stack_mask) 1801 /* Found assignment(s) into tracked register in this state. 1802 * Since this state is already marked, just return. 1803 * Nothing to be tracked further in the parent state. 1804 */ 1805 return 0; 1806 if (i == first_idx) 1807 break; 1808 i = get_prev_insn_idx(st, i, &history); 1809 if (i >= env->prog->len) { 1810 /* This can happen if backtracking reached insn 0 1811 * and there are still reg_mask or stack_mask 1812 * to backtrack. 1813 * It means the backtracking missed the spot where 1814 * particular register was initialized with a constant. 1815 */ 1816 verbose(env, "BUG backtracking idx %d\n", i); 1817 WARN_ONCE(1, "verifier backtracking bug"); 1818 return -EFAULT; 1819 } 1820 } 1821 st = st->parent; 1822 if (!st) 1823 break; 1824 1825 new_marks = false; 1826 func = st->frame[st->curframe]; 1827 bitmap_from_u64(mask, reg_mask); 1828 for_each_set_bit(i, mask, 32) { 1829 reg = &func->regs[i]; 1830 if (reg->type != SCALAR_VALUE) { 1831 reg_mask &= ~(1u << i); 1832 continue; 1833 } 1834 if (!reg->precise) 1835 new_marks = true; 1836 reg->precise = true; 1837 } 1838 1839 bitmap_from_u64(mask, stack_mask); 1840 for_each_set_bit(i, mask, 64) { 1841 if (i >= func->allocated_stack / BPF_REG_SIZE) { 1842 /* the sequence of instructions: 1843 * 2: (bf) r3 = r10 1844 * 3: (7b) *(u64 *)(r3 -8) = r0 1845 * 4: (79) r4 = *(u64 *)(r10 -8) 1846 * doesn't contain jmps. It's backtracked 1847 * as a single block. 1848 * During backtracking insn 3 is not recognized as 1849 * stack access, so at the end of backtracking 1850 * stack slot fp-8 is still marked in stack_mask. 1851 * However the parent state may not have accessed 1852 * fp-8 and it's "unallocated" stack space. 1853 * In such case fallback to conservative. 1854 */ 1855 mark_all_scalars_precise(env, st); 1856 return 0; 1857 } 1858 1859 if (func->stack[i].slot_type[0] != STACK_SPILL) { 1860 stack_mask &= ~(1ull << i); 1861 continue; 1862 } 1863 reg = &func->stack[i].spilled_ptr; 1864 if (reg->type != SCALAR_VALUE) { 1865 stack_mask &= ~(1ull << i); 1866 continue; 1867 } 1868 if (!reg->precise) 1869 new_marks = true; 1870 reg->precise = true; 1871 } 1872 if (env->log.level & BPF_LOG_LEVEL) { 1873 print_verifier_state(env, func); 1874 verbose(env, "parent %s regs=%x stack=%llx marks\n", 1875 new_marks ? "didn't have" : "already had", 1876 reg_mask, stack_mask); 1877 } 1878 1879 if (!reg_mask && !stack_mask) 1880 break; 1881 if (!new_marks) 1882 break; 1883 1884 last_idx = st->last_insn_idx; 1885 first_idx = st->first_insn_idx; 1886 } 1887 return 0; 1888 } 1889 1890 static int mark_chain_precision(struct bpf_verifier_env *env, int regno) 1891 { 1892 return __mark_chain_precision(env, regno, -1); 1893 } 1894 1895 static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi) 1896 { 1897 return __mark_chain_precision(env, -1, spi); 1898 } 1899 1900 static bool is_spillable_regtype(enum bpf_reg_type type) 1901 { 1902 switch (type) { 1903 case PTR_TO_MAP_VALUE: 1904 case PTR_TO_MAP_VALUE_OR_NULL: 1905 case PTR_TO_STACK: 1906 case PTR_TO_CTX: 1907 case PTR_TO_PACKET: 1908 case PTR_TO_PACKET_META: 1909 case PTR_TO_PACKET_END: 1910 case PTR_TO_FLOW_KEYS: 1911 case CONST_PTR_TO_MAP: 1912 case PTR_TO_SOCKET: 1913 case PTR_TO_SOCKET_OR_NULL: 1914 case PTR_TO_SOCK_COMMON: 1915 case PTR_TO_SOCK_COMMON_OR_NULL: 1916 case PTR_TO_TCP_SOCK: 1917 case PTR_TO_TCP_SOCK_OR_NULL: 1918 case PTR_TO_XDP_SOCK: 1919 return true; 1920 default: 1921 return false; 1922 } 1923 } 1924 1925 /* Does this register contain a constant zero? */ 1926 static bool register_is_null(struct bpf_reg_state *reg) 1927 { 1928 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); 1929 } 1930 1931 static bool register_is_const(struct bpf_reg_state *reg) 1932 { 1933 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off); 1934 } 1935 1936 static void save_register_state(struct bpf_func_state *state, 1937 int spi, struct bpf_reg_state *reg) 1938 { 1939 int i; 1940 1941 state->stack[spi].spilled_ptr = *reg; 1942 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 1943 1944 for (i = 0; i < BPF_REG_SIZE; i++) 1945 state->stack[spi].slot_type[i] = STACK_SPILL; 1946 } 1947 1948 /* check_stack_read/write functions track spill/fill of registers, 1949 * stack boundary and alignment are checked in check_mem_access() 1950 */ 1951 static int check_stack_write(struct bpf_verifier_env *env, 1952 struct bpf_func_state *state, /* func where register points to */ 1953 int off, int size, int value_regno, int insn_idx) 1954 { 1955 struct bpf_func_state *cur; /* state of the current function */ 1956 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; 1957 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg; 1958 struct bpf_reg_state *reg = NULL; 1959 1960 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), 1961 state->acquired_refs, true); 1962 if (err) 1963 return err; 1964 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 1965 * so it's aligned access and [off, off + size) are within stack limits 1966 */ 1967 if (!env->allow_ptr_leaks && 1968 state->stack[spi].slot_type[0] == STACK_SPILL && 1969 size != BPF_REG_SIZE) { 1970 verbose(env, "attempt to corrupt spilled pointer on stack\n"); 1971 return -EACCES; 1972 } 1973 1974 cur = env->cur_state->frame[env->cur_state->curframe]; 1975 if (value_regno >= 0) 1976 reg = &cur->regs[value_regno]; 1977 1978 if (reg && size == BPF_REG_SIZE && register_is_const(reg) && 1979 !register_is_null(reg) && env->allow_ptr_leaks) { 1980 if (dst_reg != BPF_REG_FP) { 1981 /* The backtracking logic can only recognize explicit 1982 * stack slot address like [fp - 8]. Other spill of 1983 * scalar via different register has to be conervative. 1984 * Backtrack from here and mark all registers as precise 1985 * that contributed into 'reg' being a constant. 1986 */ 1987 err = mark_chain_precision(env, value_regno); 1988 if (err) 1989 return err; 1990 } 1991 save_register_state(state, spi, reg); 1992 } else if (reg && is_spillable_regtype(reg->type)) { 1993 /* register containing pointer is being spilled into stack */ 1994 if (size != BPF_REG_SIZE) { 1995 verbose_linfo(env, insn_idx, "; "); 1996 verbose(env, "invalid size of register spill\n"); 1997 return -EACCES; 1998 } 1999 2000 if (state != cur && reg->type == PTR_TO_STACK) { 2001 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); 2002 return -EINVAL; 2003 } 2004 2005 if (!env->allow_ptr_leaks) { 2006 bool sanitize = false; 2007 2008 if (state->stack[spi].slot_type[0] == STACK_SPILL && 2009 register_is_const(&state->stack[spi].spilled_ptr)) 2010 sanitize = true; 2011 for (i = 0; i < BPF_REG_SIZE; i++) 2012 if (state->stack[spi].slot_type[i] == STACK_MISC) { 2013 sanitize = true; 2014 break; 2015 } 2016 if (sanitize) { 2017 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; 2018 int soff = (-spi - 1) * BPF_REG_SIZE; 2019 2020 /* detected reuse of integer stack slot with a pointer 2021 * which means either llvm is reusing stack slot or 2022 * an attacker is trying to exploit CVE-2018-3639 2023 * (speculative store bypass) 2024 * Have to sanitize that slot with preemptive 2025 * store of zero. 2026 */ 2027 if (*poff && *poff != soff) { 2028 /* disallow programs where single insn stores 2029 * into two different stack slots, since verifier 2030 * cannot sanitize them 2031 */ 2032 verbose(env, 2033 "insn %d cannot access two stack slots fp%d and fp%d", 2034 insn_idx, *poff, soff); 2035 return -EINVAL; 2036 } 2037 *poff = soff; 2038 } 2039 } 2040 save_register_state(state, spi, reg); 2041 } else { 2042 u8 type = STACK_MISC; 2043 2044 /* regular write of data into stack destroys any spilled ptr */ 2045 state->stack[spi].spilled_ptr.type = NOT_INIT; 2046 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ 2047 if (state->stack[spi].slot_type[0] == STACK_SPILL) 2048 for (i = 0; i < BPF_REG_SIZE; i++) 2049 state->stack[spi].slot_type[i] = STACK_MISC; 2050 2051 /* only mark the slot as written if all 8 bytes were written 2052 * otherwise read propagation may incorrectly stop too soon 2053 * when stack slots are partially written. 2054 * This heuristic means that read propagation will be 2055 * conservative, since it will add reg_live_read marks 2056 * to stack slots all the way to first state when programs 2057 * writes+reads less than 8 bytes 2058 */ 2059 if (size == BPF_REG_SIZE) 2060 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 2061 2062 /* when we zero initialize stack slots mark them as such */ 2063 if (reg && register_is_null(reg)) { 2064 /* backtracking doesn't work for STACK_ZERO yet. */ 2065 err = mark_chain_precision(env, value_regno); 2066 if (err) 2067 return err; 2068 type = STACK_ZERO; 2069 } 2070 2071 /* Mark slots affected by this stack write. */ 2072 for (i = 0; i < size; i++) 2073 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = 2074 type; 2075 } 2076 return 0; 2077 } 2078 2079 static int check_stack_read(struct bpf_verifier_env *env, 2080 struct bpf_func_state *reg_state /* func where register points to */, 2081 int off, int size, int value_regno) 2082 { 2083 struct bpf_verifier_state *vstate = env->cur_state; 2084 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2085 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; 2086 struct bpf_reg_state *reg; 2087 u8 *stype; 2088 2089 if (reg_state->allocated_stack <= slot) { 2090 verbose(env, "invalid read from stack off %d+0 size %d\n", 2091 off, size); 2092 return -EACCES; 2093 } 2094 stype = reg_state->stack[spi].slot_type; 2095 reg = ®_state->stack[spi].spilled_ptr; 2096 2097 if (stype[0] == STACK_SPILL) { 2098 if (size != BPF_REG_SIZE) { 2099 if (reg->type != SCALAR_VALUE) { 2100 verbose_linfo(env, env->insn_idx, "; "); 2101 verbose(env, "invalid size of register fill\n"); 2102 return -EACCES; 2103 } 2104 if (value_regno >= 0) { 2105 mark_reg_unknown(env, state->regs, value_regno); 2106 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 2107 } 2108 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 2109 return 0; 2110 } 2111 for (i = 1; i < BPF_REG_SIZE; i++) { 2112 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { 2113 verbose(env, "corrupted spill memory\n"); 2114 return -EACCES; 2115 } 2116 } 2117 2118 if (value_regno >= 0) { 2119 /* restore register state from stack */ 2120 state->regs[value_regno] = *reg; 2121 /* mark reg as written since spilled pointer state likely 2122 * has its liveness marks cleared by is_state_visited() 2123 * which resets stack/reg liveness for state transitions 2124 */ 2125 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 2126 } 2127 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 2128 } else { 2129 int zeros = 0; 2130 2131 for (i = 0; i < size; i++) { 2132 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC) 2133 continue; 2134 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) { 2135 zeros++; 2136 continue; 2137 } 2138 verbose(env, "invalid read from stack off %d+%d size %d\n", 2139 off, i, size); 2140 return -EACCES; 2141 } 2142 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 2143 if (value_regno >= 0) { 2144 if (zeros == size) { 2145 /* any size read into register is zero extended, 2146 * so the whole register == const_zero 2147 */ 2148 __mark_reg_const_zero(&state->regs[value_regno]); 2149 /* backtracking doesn't support STACK_ZERO yet, 2150 * so mark it precise here, so that later 2151 * backtracking can stop here. 2152 * Backtracking may not need this if this register 2153 * doesn't participate in pointer adjustment. 2154 * Forward propagation of precise flag is not 2155 * necessary either. This mark is only to stop 2156 * backtracking. Any register that contributed 2157 * to const 0 was marked precise before spill. 2158 */ 2159 state->regs[value_regno].precise = true; 2160 } else { 2161 /* have read misc data from the stack */ 2162 mark_reg_unknown(env, state->regs, value_regno); 2163 } 2164 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 2165 } 2166 } 2167 return 0; 2168 } 2169 2170 static int check_stack_access(struct bpf_verifier_env *env, 2171 const struct bpf_reg_state *reg, 2172 int off, int size) 2173 { 2174 /* Stack accesses must be at a fixed offset, so that we 2175 * can determine what type of data were returned. See 2176 * check_stack_read(). 2177 */ 2178 if (!tnum_is_const(reg->var_off)) { 2179 char tn_buf[48]; 2180 2181 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2182 verbose(env, "variable stack access var_off=%s off=%d size=%d\n", 2183 tn_buf, off, size); 2184 return -EACCES; 2185 } 2186 2187 if (off >= 0 || off < -MAX_BPF_STACK) { 2188 verbose(env, "invalid stack off=%d size=%d\n", off, size); 2189 return -EACCES; 2190 } 2191 2192 return 0; 2193 } 2194 2195 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno, 2196 int off, int size, enum bpf_access_type type) 2197 { 2198 struct bpf_reg_state *regs = cur_regs(env); 2199 struct bpf_map *map = regs[regno].map_ptr; 2200 u32 cap = bpf_map_flags_to_cap(map); 2201 2202 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) { 2203 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n", 2204 map->value_size, off, size); 2205 return -EACCES; 2206 } 2207 2208 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) { 2209 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n", 2210 map->value_size, off, size); 2211 return -EACCES; 2212 } 2213 2214 return 0; 2215 } 2216 2217 /* check read/write into map element returned by bpf_map_lookup_elem() */ 2218 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, 2219 int size, bool zero_size_allowed) 2220 { 2221 struct bpf_reg_state *regs = cur_regs(env); 2222 struct bpf_map *map = regs[regno].map_ptr; 2223 2224 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || 2225 off + size > map->value_size) { 2226 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", 2227 map->value_size, off, size); 2228 return -EACCES; 2229 } 2230 return 0; 2231 } 2232 2233 /* check read/write into a map element with possible variable offset */ 2234 static int check_map_access(struct bpf_verifier_env *env, u32 regno, 2235 int off, int size, bool zero_size_allowed) 2236 { 2237 struct bpf_verifier_state *vstate = env->cur_state; 2238 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2239 struct bpf_reg_state *reg = &state->regs[regno]; 2240 int err; 2241 2242 /* We may have adjusted the register to this map value, so we 2243 * need to try adding each of min_value and max_value to off 2244 * to make sure our theoretical access will be safe. 2245 */ 2246 if (env->log.level & BPF_LOG_LEVEL) 2247 print_verifier_state(env, state); 2248 2249 /* The minimum value is only important with signed 2250 * comparisons where we can't assume the floor of a 2251 * value is 0. If we are using signed variables for our 2252 * index'es we need to make sure that whatever we use 2253 * will have a set floor within our range. 2254 */ 2255 if (reg->smin_value < 0 && 2256 (reg->smin_value == S64_MIN || 2257 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || 2258 reg->smin_value + off < 0)) { 2259 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 2260 regno); 2261 return -EACCES; 2262 } 2263 err = __check_map_access(env, regno, reg->smin_value + off, size, 2264 zero_size_allowed); 2265 if (err) { 2266 verbose(env, "R%d min value is outside of the array range\n", 2267 regno); 2268 return err; 2269 } 2270 2271 /* If we haven't set a max value then we need to bail since we can't be 2272 * sure we won't do bad things. 2273 * If reg->umax_value + off could overflow, treat that as unbounded too. 2274 */ 2275 if (reg->umax_value >= BPF_MAX_VAR_OFF) { 2276 verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n", 2277 regno); 2278 return -EACCES; 2279 } 2280 err = __check_map_access(env, regno, reg->umax_value + off, size, 2281 zero_size_allowed); 2282 if (err) 2283 verbose(env, "R%d max value is outside of the array range\n", 2284 regno); 2285 2286 if (map_value_has_spin_lock(reg->map_ptr)) { 2287 u32 lock = reg->map_ptr->spin_lock_off; 2288 2289 /* if any part of struct bpf_spin_lock can be touched by 2290 * load/store reject this program. 2291 * To check that [x1, x2) overlaps with [y1, y2) 2292 * it is sufficient to check x1 < y2 && y1 < x2. 2293 */ 2294 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) && 2295 lock < reg->umax_value + off + size) { 2296 verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n"); 2297 return -EACCES; 2298 } 2299 } 2300 return err; 2301 } 2302 2303 #define MAX_PACKET_OFF 0xffff 2304 2305 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, 2306 const struct bpf_call_arg_meta *meta, 2307 enum bpf_access_type t) 2308 { 2309 switch (env->prog->type) { 2310 /* Program types only with direct read access go here! */ 2311 case BPF_PROG_TYPE_LWT_IN: 2312 case BPF_PROG_TYPE_LWT_OUT: 2313 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2314 case BPF_PROG_TYPE_SK_REUSEPORT: 2315 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2316 case BPF_PROG_TYPE_CGROUP_SKB: 2317 if (t == BPF_WRITE) 2318 return false; 2319 /* fallthrough */ 2320 2321 /* Program types with direct read + write access go here! */ 2322 case BPF_PROG_TYPE_SCHED_CLS: 2323 case BPF_PROG_TYPE_SCHED_ACT: 2324 case BPF_PROG_TYPE_XDP: 2325 case BPF_PROG_TYPE_LWT_XMIT: 2326 case BPF_PROG_TYPE_SK_SKB: 2327 case BPF_PROG_TYPE_SK_MSG: 2328 if (meta) 2329 return meta->pkt_access; 2330 2331 env->seen_direct_write = true; 2332 return true; 2333 2334 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2335 if (t == BPF_WRITE) 2336 env->seen_direct_write = true; 2337 2338 return true; 2339 2340 default: 2341 return false; 2342 } 2343 } 2344 2345 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, 2346 int off, int size, bool zero_size_allowed) 2347 { 2348 struct bpf_reg_state *regs = cur_regs(env); 2349 struct bpf_reg_state *reg = ®s[regno]; 2350 2351 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || 2352 (u64)off + size > reg->range) { 2353 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 2354 off, size, regno, reg->id, reg->off, reg->range); 2355 return -EACCES; 2356 } 2357 return 0; 2358 } 2359 2360 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, 2361 int size, bool zero_size_allowed) 2362 { 2363 struct bpf_reg_state *regs = cur_regs(env); 2364 struct bpf_reg_state *reg = ®s[regno]; 2365 int err; 2366 2367 /* We may have added a variable offset to the packet pointer; but any 2368 * reg->range we have comes after that. We are only checking the fixed 2369 * offset. 2370 */ 2371 2372 /* We don't allow negative numbers, because we aren't tracking enough 2373 * detail to prove they're safe. 2374 */ 2375 if (reg->smin_value < 0) { 2376 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 2377 regno); 2378 return -EACCES; 2379 } 2380 err = __check_packet_access(env, regno, off, size, zero_size_allowed); 2381 if (err) { 2382 verbose(env, "R%d offset is outside of the packet\n", regno); 2383 return err; 2384 } 2385 2386 /* __check_packet_access has made sure "off + size - 1" is within u16. 2387 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, 2388 * otherwise find_good_pkt_pointers would have refused to set range info 2389 * that __check_packet_access would have rejected this pkt access. 2390 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. 2391 */ 2392 env->prog->aux->max_pkt_offset = 2393 max_t(u32, env->prog->aux->max_pkt_offset, 2394 off + reg->umax_value + size - 1); 2395 2396 return err; 2397 } 2398 2399 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ 2400 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, 2401 enum bpf_access_type t, enum bpf_reg_type *reg_type, 2402 u32 *btf_id) 2403 { 2404 struct bpf_insn_access_aux info = { 2405 .reg_type = *reg_type, 2406 .log = &env->log, 2407 }; 2408 2409 if (env->ops->is_valid_access && 2410 env->ops->is_valid_access(off, size, t, env->prog, &info)) { 2411 /* A non zero info.ctx_field_size indicates that this field is a 2412 * candidate for later verifier transformation to load the whole 2413 * field and then apply a mask when accessed with a narrower 2414 * access than actual ctx access size. A zero info.ctx_field_size 2415 * will only allow for whole field access and rejects any other 2416 * type of narrower access. 2417 */ 2418 *reg_type = info.reg_type; 2419 2420 if (*reg_type == PTR_TO_BTF_ID) 2421 *btf_id = info.btf_id; 2422 else 2423 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; 2424 /* remember the offset of last byte accessed in ctx */ 2425 if (env->prog->aux->max_ctx_offset < off + size) 2426 env->prog->aux->max_ctx_offset = off + size; 2427 return 0; 2428 } 2429 2430 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); 2431 return -EACCES; 2432 } 2433 2434 static int check_flow_keys_access(struct bpf_verifier_env *env, int off, 2435 int size) 2436 { 2437 if (size < 0 || off < 0 || 2438 (u64)off + size > sizeof(struct bpf_flow_keys)) { 2439 verbose(env, "invalid access to flow keys off=%d size=%d\n", 2440 off, size); 2441 return -EACCES; 2442 } 2443 return 0; 2444 } 2445 2446 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, 2447 u32 regno, int off, int size, 2448 enum bpf_access_type t) 2449 { 2450 struct bpf_reg_state *regs = cur_regs(env); 2451 struct bpf_reg_state *reg = ®s[regno]; 2452 struct bpf_insn_access_aux info = {}; 2453 bool valid; 2454 2455 if (reg->smin_value < 0) { 2456 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 2457 regno); 2458 return -EACCES; 2459 } 2460 2461 switch (reg->type) { 2462 case PTR_TO_SOCK_COMMON: 2463 valid = bpf_sock_common_is_valid_access(off, size, t, &info); 2464 break; 2465 case PTR_TO_SOCKET: 2466 valid = bpf_sock_is_valid_access(off, size, t, &info); 2467 break; 2468 case PTR_TO_TCP_SOCK: 2469 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info); 2470 break; 2471 case PTR_TO_XDP_SOCK: 2472 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info); 2473 break; 2474 default: 2475 valid = false; 2476 } 2477 2478 2479 if (valid) { 2480 env->insn_aux_data[insn_idx].ctx_field_size = 2481 info.ctx_field_size; 2482 return 0; 2483 } 2484 2485 verbose(env, "R%d invalid %s access off=%d size=%d\n", 2486 regno, reg_type_str[reg->type], off, size); 2487 2488 return -EACCES; 2489 } 2490 2491 static bool __is_pointer_value(bool allow_ptr_leaks, 2492 const struct bpf_reg_state *reg) 2493 { 2494 if (allow_ptr_leaks) 2495 return false; 2496 2497 return reg->type != SCALAR_VALUE; 2498 } 2499 2500 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) 2501 { 2502 return cur_regs(env) + regno; 2503 } 2504 2505 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 2506 { 2507 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); 2508 } 2509 2510 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) 2511 { 2512 const struct bpf_reg_state *reg = reg_state(env, regno); 2513 2514 return reg->type == PTR_TO_CTX; 2515 } 2516 2517 static bool is_sk_reg(struct bpf_verifier_env *env, int regno) 2518 { 2519 const struct bpf_reg_state *reg = reg_state(env, regno); 2520 2521 return type_is_sk_pointer(reg->type); 2522 } 2523 2524 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) 2525 { 2526 const struct bpf_reg_state *reg = reg_state(env, regno); 2527 2528 return type_is_pkt_pointer(reg->type); 2529 } 2530 2531 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) 2532 { 2533 const struct bpf_reg_state *reg = reg_state(env, regno); 2534 2535 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ 2536 return reg->type == PTR_TO_FLOW_KEYS; 2537 } 2538 2539 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, 2540 const struct bpf_reg_state *reg, 2541 int off, int size, bool strict) 2542 { 2543 struct tnum reg_off; 2544 int ip_align; 2545 2546 /* Byte size accesses are always allowed. */ 2547 if (!strict || size == 1) 2548 return 0; 2549 2550 /* For platforms that do not have a Kconfig enabling 2551 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of 2552 * NET_IP_ALIGN is universally set to '2'. And on platforms 2553 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get 2554 * to this code only in strict mode where we want to emulate 2555 * the NET_IP_ALIGN==2 checking. Therefore use an 2556 * unconditional IP align value of '2'. 2557 */ 2558 ip_align = 2; 2559 2560 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); 2561 if (!tnum_is_aligned(reg_off, size)) { 2562 char tn_buf[48]; 2563 2564 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2565 verbose(env, 2566 "misaligned packet access off %d+%s+%d+%d size %d\n", 2567 ip_align, tn_buf, reg->off, off, size); 2568 return -EACCES; 2569 } 2570 2571 return 0; 2572 } 2573 2574 static int check_generic_ptr_alignment(struct bpf_verifier_env *env, 2575 const struct bpf_reg_state *reg, 2576 const char *pointer_desc, 2577 int off, int size, bool strict) 2578 { 2579 struct tnum reg_off; 2580 2581 /* Byte size accesses are always allowed. */ 2582 if (!strict || size == 1) 2583 return 0; 2584 2585 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); 2586 if (!tnum_is_aligned(reg_off, size)) { 2587 char tn_buf[48]; 2588 2589 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2590 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", 2591 pointer_desc, tn_buf, reg->off, off, size); 2592 return -EACCES; 2593 } 2594 2595 return 0; 2596 } 2597 2598 static int check_ptr_alignment(struct bpf_verifier_env *env, 2599 const struct bpf_reg_state *reg, int off, 2600 int size, bool strict_alignment_once) 2601 { 2602 bool strict = env->strict_alignment || strict_alignment_once; 2603 const char *pointer_desc = ""; 2604 2605 switch (reg->type) { 2606 case PTR_TO_PACKET: 2607 case PTR_TO_PACKET_META: 2608 /* Special case, because of NET_IP_ALIGN. Given metadata sits 2609 * right in front, treat it the very same way. 2610 */ 2611 return check_pkt_ptr_alignment(env, reg, off, size, strict); 2612 case PTR_TO_FLOW_KEYS: 2613 pointer_desc = "flow keys "; 2614 break; 2615 case PTR_TO_MAP_VALUE: 2616 pointer_desc = "value "; 2617 break; 2618 case PTR_TO_CTX: 2619 pointer_desc = "context "; 2620 break; 2621 case PTR_TO_STACK: 2622 pointer_desc = "stack "; 2623 /* The stack spill tracking logic in check_stack_write() 2624 * and check_stack_read() relies on stack accesses being 2625 * aligned. 2626 */ 2627 strict = true; 2628 break; 2629 case PTR_TO_SOCKET: 2630 pointer_desc = "sock "; 2631 break; 2632 case PTR_TO_SOCK_COMMON: 2633 pointer_desc = "sock_common "; 2634 break; 2635 case PTR_TO_TCP_SOCK: 2636 pointer_desc = "tcp_sock "; 2637 break; 2638 case PTR_TO_XDP_SOCK: 2639 pointer_desc = "xdp_sock "; 2640 break; 2641 default: 2642 break; 2643 } 2644 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, 2645 strict); 2646 } 2647 2648 static int update_stack_depth(struct bpf_verifier_env *env, 2649 const struct bpf_func_state *func, 2650 int off) 2651 { 2652 u16 stack = env->subprog_info[func->subprogno].stack_depth; 2653 2654 if (stack >= -off) 2655 return 0; 2656 2657 /* update known max for given subprogram */ 2658 env->subprog_info[func->subprogno].stack_depth = -off; 2659 return 0; 2660 } 2661 2662 /* starting from main bpf function walk all instructions of the function 2663 * and recursively walk all callees that given function can call. 2664 * Ignore jump and exit insns. 2665 * Since recursion is prevented by check_cfg() this algorithm 2666 * only needs a local stack of MAX_CALL_FRAMES to remember callsites 2667 */ 2668 static int check_max_stack_depth(struct bpf_verifier_env *env) 2669 { 2670 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end; 2671 struct bpf_subprog_info *subprog = env->subprog_info; 2672 struct bpf_insn *insn = env->prog->insnsi; 2673 int ret_insn[MAX_CALL_FRAMES]; 2674 int ret_prog[MAX_CALL_FRAMES]; 2675 2676 process_func: 2677 /* round up to 32-bytes, since this is granularity 2678 * of interpreter stack size 2679 */ 2680 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 2681 if (depth > MAX_BPF_STACK) { 2682 verbose(env, "combined stack size of %d calls is %d. Too large\n", 2683 frame + 1, depth); 2684 return -EACCES; 2685 } 2686 continue_func: 2687 subprog_end = subprog[idx + 1].start; 2688 for (; i < subprog_end; i++) { 2689 if (insn[i].code != (BPF_JMP | BPF_CALL)) 2690 continue; 2691 if (insn[i].src_reg != BPF_PSEUDO_CALL) 2692 continue; 2693 /* remember insn and function to return to */ 2694 ret_insn[frame] = i + 1; 2695 ret_prog[frame] = idx; 2696 2697 /* find the callee */ 2698 i = i + insn[i].imm + 1; 2699 idx = find_subprog(env, i); 2700 if (idx < 0) { 2701 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 2702 i); 2703 return -EFAULT; 2704 } 2705 frame++; 2706 if (frame >= MAX_CALL_FRAMES) { 2707 verbose(env, "the call stack of %d frames is too deep !\n", 2708 frame); 2709 return -E2BIG; 2710 } 2711 goto process_func; 2712 } 2713 /* end of for() loop means the last insn of the 'subprog' 2714 * was reached. Doesn't matter whether it was JA or EXIT 2715 */ 2716 if (frame == 0) 2717 return 0; 2718 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 2719 frame--; 2720 i = ret_insn[frame]; 2721 idx = ret_prog[frame]; 2722 goto continue_func; 2723 } 2724 2725 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 2726 static int get_callee_stack_depth(struct bpf_verifier_env *env, 2727 const struct bpf_insn *insn, int idx) 2728 { 2729 int start = idx + insn->imm + 1, subprog; 2730 2731 subprog = find_subprog(env, start); 2732 if (subprog < 0) { 2733 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 2734 start); 2735 return -EFAULT; 2736 } 2737 return env->subprog_info[subprog].stack_depth; 2738 } 2739 #endif 2740 2741 static int check_ctx_reg(struct bpf_verifier_env *env, 2742 const struct bpf_reg_state *reg, int regno) 2743 { 2744 /* Access to ctx or passing it to a helper is only allowed in 2745 * its original, unmodified form. 2746 */ 2747 2748 if (reg->off) { 2749 verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n", 2750 regno, reg->off); 2751 return -EACCES; 2752 } 2753 2754 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 2755 char tn_buf[48]; 2756 2757 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2758 verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf); 2759 return -EACCES; 2760 } 2761 2762 return 0; 2763 } 2764 2765 static int check_tp_buffer_access(struct bpf_verifier_env *env, 2766 const struct bpf_reg_state *reg, 2767 int regno, int off, int size) 2768 { 2769 if (off < 0) { 2770 verbose(env, 2771 "R%d invalid tracepoint buffer access: off=%d, size=%d", 2772 regno, off, size); 2773 return -EACCES; 2774 } 2775 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 2776 char tn_buf[48]; 2777 2778 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2779 verbose(env, 2780 "R%d invalid variable buffer offset: off=%d, var_off=%s", 2781 regno, off, tn_buf); 2782 return -EACCES; 2783 } 2784 if (off + size > env->prog->aux->max_tp_access) 2785 env->prog->aux->max_tp_access = off + size; 2786 2787 return 0; 2788 } 2789 2790 2791 /* truncate register to smaller size (in bytes) 2792 * must be called with size < BPF_REG_SIZE 2793 */ 2794 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) 2795 { 2796 u64 mask; 2797 2798 /* clear high bits in bit representation */ 2799 reg->var_off = tnum_cast(reg->var_off, size); 2800 2801 /* fix arithmetic bounds */ 2802 mask = ((u64)1 << (size * 8)) - 1; 2803 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { 2804 reg->umin_value &= mask; 2805 reg->umax_value &= mask; 2806 } else { 2807 reg->umin_value = 0; 2808 reg->umax_value = mask; 2809 } 2810 reg->smin_value = reg->umin_value; 2811 reg->smax_value = reg->umax_value; 2812 } 2813 2814 static bool bpf_map_is_rdonly(const struct bpf_map *map) 2815 { 2816 return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen; 2817 } 2818 2819 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val) 2820 { 2821 void *ptr; 2822 u64 addr; 2823 int err; 2824 2825 err = map->ops->map_direct_value_addr(map, &addr, off); 2826 if (err) 2827 return err; 2828 ptr = (void *)(long)addr + off; 2829 2830 switch (size) { 2831 case sizeof(u8): 2832 *val = (u64)*(u8 *)ptr; 2833 break; 2834 case sizeof(u16): 2835 *val = (u64)*(u16 *)ptr; 2836 break; 2837 case sizeof(u32): 2838 *val = (u64)*(u32 *)ptr; 2839 break; 2840 case sizeof(u64): 2841 *val = *(u64 *)ptr; 2842 break; 2843 default: 2844 return -EINVAL; 2845 } 2846 return 0; 2847 } 2848 2849 static int check_ptr_to_btf_access(struct bpf_verifier_env *env, 2850 struct bpf_reg_state *regs, 2851 int regno, int off, int size, 2852 enum bpf_access_type atype, 2853 int value_regno) 2854 { 2855 struct bpf_reg_state *reg = regs + regno; 2856 const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id); 2857 const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off); 2858 u32 btf_id; 2859 int ret; 2860 2861 if (atype != BPF_READ) { 2862 verbose(env, "only read is supported\n"); 2863 return -EACCES; 2864 } 2865 2866 if (off < 0) { 2867 verbose(env, 2868 "R%d is ptr_%s invalid negative access: off=%d\n", 2869 regno, tname, off); 2870 return -EACCES; 2871 } 2872 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 2873 char tn_buf[48]; 2874 2875 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2876 verbose(env, 2877 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n", 2878 regno, tname, off, tn_buf); 2879 return -EACCES; 2880 } 2881 2882 ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id); 2883 if (ret < 0) 2884 return ret; 2885 2886 if (ret == SCALAR_VALUE) { 2887 mark_reg_unknown(env, regs, value_regno); 2888 return 0; 2889 } 2890 mark_reg_known_zero(env, regs, value_regno); 2891 regs[value_regno].type = PTR_TO_BTF_ID; 2892 regs[value_regno].btf_id = btf_id; 2893 return 0; 2894 } 2895 2896 /* check whether memory at (regno + off) is accessible for t = (read | write) 2897 * if t==write, value_regno is a register which value is stored into memory 2898 * if t==read, value_regno is a register which will receive the value from memory 2899 * if t==write && value_regno==-1, some unknown value is stored into memory 2900 * if t==read && value_regno==-1, don't care what we read from memory 2901 */ 2902 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, 2903 int off, int bpf_size, enum bpf_access_type t, 2904 int value_regno, bool strict_alignment_once) 2905 { 2906 struct bpf_reg_state *regs = cur_regs(env); 2907 struct bpf_reg_state *reg = regs + regno; 2908 struct bpf_func_state *state; 2909 int size, err = 0; 2910 2911 size = bpf_size_to_bytes(bpf_size); 2912 if (size < 0) 2913 return size; 2914 2915 /* alignment checks will add in reg->off themselves */ 2916 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); 2917 if (err) 2918 return err; 2919 2920 /* for access checks, reg->off is just part of off */ 2921 off += reg->off; 2922 2923 if (reg->type == PTR_TO_MAP_VALUE) { 2924 if (t == BPF_WRITE && value_regno >= 0 && 2925 is_pointer_value(env, value_regno)) { 2926 verbose(env, "R%d leaks addr into map\n", value_regno); 2927 return -EACCES; 2928 } 2929 err = check_map_access_type(env, regno, off, size, t); 2930 if (err) 2931 return err; 2932 err = check_map_access(env, regno, off, size, false); 2933 if (!err && t == BPF_READ && value_regno >= 0) { 2934 struct bpf_map *map = reg->map_ptr; 2935 2936 /* if map is read-only, track its contents as scalars */ 2937 if (tnum_is_const(reg->var_off) && 2938 bpf_map_is_rdonly(map) && 2939 map->ops->map_direct_value_addr) { 2940 int map_off = off + reg->var_off.value; 2941 u64 val = 0; 2942 2943 err = bpf_map_direct_read(map, map_off, size, 2944 &val); 2945 if (err) 2946 return err; 2947 2948 regs[value_regno].type = SCALAR_VALUE; 2949 __mark_reg_known(®s[value_regno], val); 2950 } else { 2951 mark_reg_unknown(env, regs, value_regno); 2952 } 2953 } 2954 } else if (reg->type == PTR_TO_CTX) { 2955 enum bpf_reg_type reg_type = SCALAR_VALUE; 2956 u32 btf_id = 0; 2957 2958 if (t == BPF_WRITE && value_regno >= 0 && 2959 is_pointer_value(env, value_regno)) { 2960 verbose(env, "R%d leaks addr into ctx\n", value_regno); 2961 return -EACCES; 2962 } 2963 2964 err = check_ctx_reg(env, reg, regno); 2965 if (err < 0) 2966 return err; 2967 2968 err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf_id); 2969 if (err) 2970 verbose_linfo(env, insn_idx, "; "); 2971 if (!err && t == BPF_READ && value_regno >= 0) { 2972 /* ctx access returns either a scalar, or a 2973 * PTR_TO_PACKET[_META,_END]. In the latter 2974 * case, we know the offset is zero. 2975 */ 2976 if (reg_type == SCALAR_VALUE) { 2977 mark_reg_unknown(env, regs, value_regno); 2978 } else { 2979 mark_reg_known_zero(env, regs, 2980 value_regno); 2981 if (reg_type_may_be_null(reg_type)) 2982 regs[value_regno].id = ++env->id_gen; 2983 /* A load of ctx field could have different 2984 * actual load size with the one encoded in the 2985 * insn. When the dst is PTR, it is for sure not 2986 * a sub-register. 2987 */ 2988 regs[value_regno].subreg_def = DEF_NOT_SUBREG; 2989 if (reg_type == PTR_TO_BTF_ID) 2990 regs[value_regno].btf_id = btf_id; 2991 } 2992 regs[value_regno].type = reg_type; 2993 } 2994 2995 } else if (reg->type == PTR_TO_STACK) { 2996 off += reg->var_off.value; 2997 err = check_stack_access(env, reg, off, size); 2998 if (err) 2999 return err; 3000 3001 state = func(env, reg); 3002 err = update_stack_depth(env, state, off); 3003 if (err) 3004 return err; 3005 3006 if (t == BPF_WRITE) 3007 err = check_stack_write(env, state, off, size, 3008 value_regno, insn_idx); 3009 else 3010 err = check_stack_read(env, state, off, size, 3011 value_regno); 3012 } else if (reg_is_pkt_pointer(reg)) { 3013 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { 3014 verbose(env, "cannot write into packet\n"); 3015 return -EACCES; 3016 } 3017 if (t == BPF_WRITE && value_regno >= 0 && 3018 is_pointer_value(env, value_regno)) { 3019 verbose(env, "R%d leaks addr into packet\n", 3020 value_regno); 3021 return -EACCES; 3022 } 3023 err = check_packet_access(env, regno, off, size, false); 3024 if (!err && t == BPF_READ && value_regno >= 0) 3025 mark_reg_unknown(env, regs, value_regno); 3026 } else if (reg->type == PTR_TO_FLOW_KEYS) { 3027 if (t == BPF_WRITE && value_regno >= 0 && 3028 is_pointer_value(env, value_regno)) { 3029 verbose(env, "R%d leaks addr into flow keys\n", 3030 value_regno); 3031 return -EACCES; 3032 } 3033 3034 err = check_flow_keys_access(env, off, size); 3035 if (!err && t == BPF_READ && value_regno >= 0) 3036 mark_reg_unknown(env, regs, value_regno); 3037 } else if (type_is_sk_pointer(reg->type)) { 3038 if (t == BPF_WRITE) { 3039 verbose(env, "R%d cannot write into %s\n", 3040 regno, reg_type_str[reg->type]); 3041 return -EACCES; 3042 } 3043 err = check_sock_access(env, insn_idx, regno, off, size, t); 3044 if (!err && value_regno >= 0) 3045 mark_reg_unknown(env, regs, value_regno); 3046 } else if (reg->type == PTR_TO_TP_BUFFER) { 3047 err = check_tp_buffer_access(env, reg, regno, off, size); 3048 if (!err && t == BPF_READ && value_regno >= 0) 3049 mark_reg_unknown(env, regs, value_regno); 3050 } else if (reg->type == PTR_TO_BTF_ID) { 3051 err = check_ptr_to_btf_access(env, regs, regno, off, size, t, 3052 value_regno); 3053 } else { 3054 verbose(env, "R%d invalid mem access '%s'\n", regno, 3055 reg_type_str[reg->type]); 3056 return -EACCES; 3057 } 3058 3059 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 3060 regs[value_regno].type == SCALAR_VALUE) { 3061 /* b/h/w load zero-extends, mark upper bits as known 0 */ 3062 coerce_reg_to_size(®s[value_regno], size); 3063 } 3064 return err; 3065 } 3066 3067 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) 3068 { 3069 int err; 3070 3071 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || 3072 insn->imm != 0) { 3073 verbose(env, "BPF_XADD uses reserved fields\n"); 3074 return -EINVAL; 3075 } 3076 3077 /* check src1 operand */ 3078 err = check_reg_arg(env, insn->src_reg, SRC_OP); 3079 if (err) 3080 return err; 3081 3082 /* check src2 operand */ 3083 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 3084 if (err) 3085 return err; 3086 3087 if (is_pointer_value(env, insn->src_reg)) { 3088 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); 3089 return -EACCES; 3090 } 3091 3092 if (is_ctx_reg(env, insn->dst_reg) || 3093 is_pkt_reg(env, insn->dst_reg) || 3094 is_flow_key_reg(env, insn->dst_reg) || 3095 is_sk_reg(env, insn->dst_reg)) { 3096 verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", 3097 insn->dst_reg, 3098 reg_type_str[reg_state(env, insn->dst_reg)->type]); 3099 return -EACCES; 3100 } 3101 3102 /* check whether atomic_add can read the memory */ 3103 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 3104 BPF_SIZE(insn->code), BPF_READ, -1, true); 3105 if (err) 3106 return err; 3107 3108 /* check whether atomic_add can write into the same memory */ 3109 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 3110 BPF_SIZE(insn->code), BPF_WRITE, -1, true); 3111 } 3112 3113 static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno, 3114 int off, int access_size, 3115 bool zero_size_allowed) 3116 { 3117 struct bpf_reg_state *reg = reg_state(env, regno); 3118 3119 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 3120 access_size < 0 || (access_size == 0 && !zero_size_allowed)) { 3121 if (tnum_is_const(reg->var_off)) { 3122 verbose(env, "invalid stack type R%d off=%d access_size=%d\n", 3123 regno, off, access_size); 3124 } else { 3125 char tn_buf[48]; 3126 3127 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3128 verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n", 3129 regno, tn_buf, access_size); 3130 } 3131 return -EACCES; 3132 } 3133 return 0; 3134 } 3135 3136 /* when register 'regno' is passed into function that will read 'access_size' 3137 * bytes from that pointer, make sure that it's within stack boundary 3138 * and all elements of stack are initialized. 3139 * Unlike most pointer bounds-checking functions, this one doesn't take an 3140 * 'off' argument, so it has to add in reg->off itself. 3141 */ 3142 static int check_stack_boundary(struct bpf_verifier_env *env, int regno, 3143 int access_size, bool zero_size_allowed, 3144 struct bpf_call_arg_meta *meta) 3145 { 3146 struct bpf_reg_state *reg = reg_state(env, regno); 3147 struct bpf_func_state *state = func(env, reg); 3148 int err, min_off, max_off, i, j, slot, spi; 3149 3150 if (reg->type != PTR_TO_STACK) { 3151 /* Allow zero-byte read from NULL, regardless of pointer type */ 3152 if (zero_size_allowed && access_size == 0 && 3153 register_is_null(reg)) 3154 return 0; 3155 3156 verbose(env, "R%d type=%s expected=%s\n", regno, 3157 reg_type_str[reg->type], 3158 reg_type_str[PTR_TO_STACK]); 3159 return -EACCES; 3160 } 3161 3162 if (tnum_is_const(reg->var_off)) { 3163 min_off = max_off = reg->var_off.value + reg->off; 3164 err = __check_stack_boundary(env, regno, min_off, access_size, 3165 zero_size_allowed); 3166 if (err) 3167 return err; 3168 } else { 3169 /* Variable offset is prohibited for unprivileged mode for 3170 * simplicity since it requires corresponding support in 3171 * Spectre masking for stack ALU. 3172 * See also retrieve_ptr_limit(). 3173 */ 3174 if (!env->allow_ptr_leaks) { 3175 char tn_buf[48]; 3176 3177 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3178 verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n", 3179 regno, tn_buf); 3180 return -EACCES; 3181 } 3182 /* Only initialized buffer on stack is allowed to be accessed 3183 * with variable offset. With uninitialized buffer it's hard to 3184 * guarantee that whole memory is marked as initialized on 3185 * helper return since specific bounds are unknown what may 3186 * cause uninitialized stack leaking. 3187 */ 3188 if (meta && meta->raw_mode) 3189 meta = NULL; 3190 3191 if (reg->smax_value >= BPF_MAX_VAR_OFF || 3192 reg->smax_value <= -BPF_MAX_VAR_OFF) { 3193 verbose(env, "R%d unbounded indirect variable offset stack access\n", 3194 regno); 3195 return -EACCES; 3196 } 3197 min_off = reg->smin_value + reg->off; 3198 max_off = reg->smax_value + reg->off; 3199 err = __check_stack_boundary(env, regno, min_off, access_size, 3200 zero_size_allowed); 3201 if (err) { 3202 verbose(env, "R%d min value is outside of stack bound\n", 3203 regno); 3204 return err; 3205 } 3206 err = __check_stack_boundary(env, regno, max_off, access_size, 3207 zero_size_allowed); 3208 if (err) { 3209 verbose(env, "R%d max value is outside of stack bound\n", 3210 regno); 3211 return err; 3212 } 3213 } 3214 3215 if (meta && meta->raw_mode) { 3216 meta->access_size = access_size; 3217 meta->regno = regno; 3218 return 0; 3219 } 3220 3221 for (i = min_off; i < max_off + access_size; i++) { 3222 u8 *stype; 3223 3224 slot = -i - 1; 3225 spi = slot / BPF_REG_SIZE; 3226 if (state->allocated_stack <= slot) 3227 goto err; 3228 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 3229 if (*stype == STACK_MISC) 3230 goto mark; 3231 if (*stype == STACK_ZERO) { 3232 /* helper can write anything into the stack */ 3233 *stype = STACK_MISC; 3234 goto mark; 3235 } 3236 if (state->stack[spi].slot_type[0] == STACK_SPILL && 3237 state->stack[spi].spilled_ptr.type == SCALAR_VALUE) { 3238 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); 3239 for (j = 0; j < BPF_REG_SIZE; j++) 3240 state->stack[spi].slot_type[j] = STACK_MISC; 3241 goto mark; 3242 } 3243 3244 err: 3245 if (tnum_is_const(reg->var_off)) { 3246 verbose(env, "invalid indirect read from stack off %d+%d size %d\n", 3247 min_off, i - min_off, access_size); 3248 } else { 3249 char tn_buf[48]; 3250 3251 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3252 verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n", 3253 tn_buf, i - min_off, access_size); 3254 } 3255 return -EACCES; 3256 mark: 3257 /* reading any byte out of 8-byte 'spill_slot' will cause 3258 * the whole slot to be marked as 'read' 3259 */ 3260 mark_reg_read(env, &state->stack[spi].spilled_ptr, 3261 state->stack[spi].spilled_ptr.parent, 3262 REG_LIVE_READ64); 3263 } 3264 return update_stack_depth(env, state, min_off); 3265 } 3266 3267 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, 3268 int access_size, bool zero_size_allowed, 3269 struct bpf_call_arg_meta *meta) 3270 { 3271 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 3272 3273 switch (reg->type) { 3274 case PTR_TO_PACKET: 3275 case PTR_TO_PACKET_META: 3276 return check_packet_access(env, regno, reg->off, access_size, 3277 zero_size_allowed); 3278 case PTR_TO_MAP_VALUE: 3279 if (check_map_access_type(env, regno, reg->off, access_size, 3280 meta && meta->raw_mode ? BPF_WRITE : 3281 BPF_READ)) 3282 return -EACCES; 3283 return check_map_access(env, regno, reg->off, access_size, 3284 zero_size_allowed); 3285 default: /* scalar_value|ptr_to_stack or invalid ptr */ 3286 return check_stack_boundary(env, regno, access_size, 3287 zero_size_allowed, meta); 3288 } 3289 } 3290 3291 /* Implementation details: 3292 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL 3293 * Two bpf_map_lookups (even with the same key) will have different reg->id. 3294 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after 3295 * value_or_null->value transition, since the verifier only cares about 3296 * the range of access to valid map value pointer and doesn't care about actual 3297 * address of the map element. 3298 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps 3299 * reg->id > 0 after value_or_null->value transition. By doing so 3300 * two bpf_map_lookups will be considered two different pointers that 3301 * point to different bpf_spin_locks. 3302 * The verifier allows taking only one bpf_spin_lock at a time to avoid 3303 * dead-locks. 3304 * Since only one bpf_spin_lock is allowed the checks are simpler than 3305 * reg_is_refcounted() logic. The verifier needs to remember only 3306 * one spin_lock instead of array of acquired_refs. 3307 * cur_state->active_spin_lock remembers which map value element got locked 3308 * and clears it after bpf_spin_unlock. 3309 */ 3310 static int process_spin_lock(struct bpf_verifier_env *env, int regno, 3311 bool is_lock) 3312 { 3313 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 3314 struct bpf_verifier_state *cur = env->cur_state; 3315 bool is_const = tnum_is_const(reg->var_off); 3316 struct bpf_map *map = reg->map_ptr; 3317 u64 val = reg->var_off.value; 3318 3319 if (reg->type != PTR_TO_MAP_VALUE) { 3320 verbose(env, "R%d is not a pointer to map_value\n", regno); 3321 return -EINVAL; 3322 } 3323 if (!is_const) { 3324 verbose(env, 3325 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n", 3326 regno); 3327 return -EINVAL; 3328 } 3329 if (!map->btf) { 3330 verbose(env, 3331 "map '%s' has to have BTF in order to use bpf_spin_lock\n", 3332 map->name); 3333 return -EINVAL; 3334 } 3335 if (!map_value_has_spin_lock(map)) { 3336 if (map->spin_lock_off == -E2BIG) 3337 verbose(env, 3338 "map '%s' has more than one 'struct bpf_spin_lock'\n", 3339 map->name); 3340 else if (map->spin_lock_off == -ENOENT) 3341 verbose(env, 3342 "map '%s' doesn't have 'struct bpf_spin_lock'\n", 3343 map->name); 3344 else 3345 verbose(env, 3346 "map '%s' is not a struct type or bpf_spin_lock is mangled\n", 3347 map->name); 3348 return -EINVAL; 3349 } 3350 if (map->spin_lock_off != val + reg->off) { 3351 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n", 3352 val + reg->off); 3353 return -EINVAL; 3354 } 3355 if (is_lock) { 3356 if (cur->active_spin_lock) { 3357 verbose(env, 3358 "Locking two bpf_spin_locks are not allowed\n"); 3359 return -EINVAL; 3360 } 3361 cur->active_spin_lock = reg->id; 3362 } else { 3363 if (!cur->active_spin_lock) { 3364 verbose(env, "bpf_spin_unlock without taking a lock\n"); 3365 return -EINVAL; 3366 } 3367 if (cur->active_spin_lock != reg->id) { 3368 verbose(env, "bpf_spin_unlock of different lock\n"); 3369 return -EINVAL; 3370 } 3371 cur->active_spin_lock = 0; 3372 } 3373 return 0; 3374 } 3375 3376 static bool arg_type_is_mem_ptr(enum bpf_arg_type type) 3377 { 3378 return type == ARG_PTR_TO_MEM || 3379 type == ARG_PTR_TO_MEM_OR_NULL || 3380 type == ARG_PTR_TO_UNINIT_MEM; 3381 } 3382 3383 static bool arg_type_is_mem_size(enum bpf_arg_type type) 3384 { 3385 return type == ARG_CONST_SIZE || 3386 type == ARG_CONST_SIZE_OR_ZERO; 3387 } 3388 3389 static bool arg_type_is_int_ptr(enum bpf_arg_type type) 3390 { 3391 return type == ARG_PTR_TO_INT || 3392 type == ARG_PTR_TO_LONG; 3393 } 3394 3395 static int int_ptr_type_to_size(enum bpf_arg_type type) 3396 { 3397 if (type == ARG_PTR_TO_INT) 3398 return sizeof(u32); 3399 else if (type == ARG_PTR_TO_LONG) 3400 return sizeof(u64); 3401 3402 return -EINVAL; 3403 } 3404 3405 static int check_func_arg(struct bpf_verifier_env *env, u32 regno, 3406 enum bpf_arg_type arg_type, 3407 struct bpf_call_arg_meta *meta) 3408 { 3409 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 3410 enum bpf_reg_type expected_type, type = reg->type; 3411 int err = 0; 3412 3413 if (arg_type == ARG_DONTCARE) 3414 return 0; 3415 3416 err = check_reg_arg(env, regno, SRC_OP); 3417 if (err) 3418 return err; 3419 3420 if (arg_type == ARG_ANYTHING) { 3421 if (is_pointer_value(env, regno)) { 3422 verbose(env, "R%d leaks addr into helper function\n", 3423 regno); 3424 return -EACCES; 3425 } 3426 return 0; 3427 } 3428 3429 if (type_is_pkt_pointer(type) && 3430 !may_access_direct_pkt_data(env, meta, BPF_READ)) { 3431 verbose(env, "helper access to the packet is not allowed\n"); 3432 return -EACCES; 3433 } 3434 3435 if (arg_type == ARG_PTR_TO_MAP_KEY || 3436 arg_type == ARG_PTR_TO_MAP_VALUE || 3437 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE || 3438 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) { 3439 expected_type = PTR_TO_STACK; 3440 if (register_is_null(reg) && 3441 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) 3442 /* final test in check_stack_boundary() */; 3443 else if (!type_is_pkt_pointer(type) && 3444 type != PTR_TO_MAP_VALUE && 3445 type != expected_type) 3446 goto err_type; 3447 } else if (arg_type == ARG_CONST_SIZE || 3448 arg_type == ARG_CONST_SIZE_OR_ZERO) { 3449 expected_type = SCALAR_VALUE; 3450 if (type != expected_type) 3451 goto err_type; 3452 } else if (arg_type == ARG_CONST_MAP_PTR) { 3453 expected_type = CONST_PTR_TO_MAP; 3454 if (type != expected_type) 3455 goto err_type; 3456 } else if (arg_type == ARG_PTR_TO_CTX) { 3457 expected_type = PTR_TO_CTX; 3458 if (type != expected_type) 3459 goto err_type; 3460 err = check_ctx_reg(env, reg, regno); 3461 if (err < 0) 3462 return err; 3463 } else if (arg_type == ARG_PTR_TO_SOCK_COMMON) { 3464 expected_type = PTR_TO_SOCK_COMMON; 3465 /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */ 3466 if (!type_is_sk_pointer(type)) 3467 goto err_type; 3468 if (reg->ref_obj_id) { 3469 if (meta->ref_obj_id) { 3470 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 3471 regno, reg->ref_obj_id, 3472 meta->ref_obj_id); 3473 return -EFAULT; 3474 } 3475 meta->ref_obj_id = reg->ref_obj_id; 3476 } 3477 } else if (arg_type == ARG_PTR_TO_SOCKET) { 3478 expected_type = PTR_TO_SOCKET; 3479 if (type != expected_type) 3480 goto err_type; 3481 } else if (arg_type == ARG_PTR_TO_BTF_ID) { 3482 expected_type = PTR_TO_BTF_ID; 3483 if (type != expected_type) 3484 goto err_type; 3485 if (reg->btf_id != meta->btf_id) { 3486 verbose(env, "Helper has type %s got %s in R%d\n", 3487 kernel_type_name(meta->btf_id), 3488 kernel_type_name(reg->btf_id), regno); 3489 3490 return -EACCES; 3491 } 3492 if (!tnum_is_const(reg->var_off) || reg->var_off.value || reg->off) { 3493 verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n", 3494 regno); 3495 return -EACCES; 3496 } 3497 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) { 3498 if (meta->func_id == BPF_FUNC_spin_lock) { 3499 if (process_spin_lock(env, regno, true)) 3500 return -EACCES; 3501 } else if (meta->func_id == BPF_FUNC_spin_unlock) { 3502 if (process_spin_lock(env, regno, false)) 3503 return -EACCES; 3504 } else { 3505 verbose(env, "verifier internal error\n"); 3506 return -EFAULT; 3507 } 3508 } else if (arg_type_is_mem_ptr(arg_type)) { 3509 expected_type = PTR_TO_STACK; 3510 /* One exception here. In case function allows for NULL to be 3511 * passed in as argument, it's a SCALAR_VALUE type. Final test 3512 * happens during stack boundary checking. 3513 */ 3514 if (register_is_null(reg) && 3515 arg_type == ARG_PTR_TO_MEM_OR_NULL) 3516 /* final test in check_stack_boundary() */; 3517 else if (!type_is_pkt_pointer(type) && 3518 type != PTR_TO_MAP_VALUE && 3519 type != expected_type) 3520 goto err_type; 3521 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; 3522 } else if (arg_type_is_int_ptr(arg_type)) { 3523 expected_type = PTR_TO_STACK; 3524 if (!type_is_pkt_pointer(type) && 3525 type != PTR_TO_MAP_VALUE && 3526 type != expected_type) 3527 goto err_type; 3528 } else { 3529 verbose(env, "unsupported arg_type %d\n", arg_type); 3530 return -EFAULT; 3531 } 3532 3533 if (arg_type == ARG_CONST_MAP_PTR) { 3534 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 3535 meta->map_ptr = reg->map_ptr; 3536 } else if (arg_type == ARG_PTR_TO_MAP_KEY) { 3537 /* bpf_map_xxx(..., map_ptr, ..., key) call: 3538 * check that [key, key + map->key_size) are within 3539 * stack limits and initialized 3540 */ 3541 if (!meta->map_ptr) { 3542 /* in function declaration map_ptr must come before 3543 * map_key, so that it's verified and known before 3544 * we have to check map_key here. Otherwise it means 3545 * that kernel subsystem misconfigured verifier 3546 */ 3547 verbose(env, "invalid map_ptr to access map->key\n"); 3548 return -EACCES; 3549 } 3550 err = check_helper_mem_access(env, regno, 3551 meta->map_ptr->key_size, false, 3552 NULL); 3553 } else if (arg_type == ARG_PTR_TO_MAP_VALUE || 3554 (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL && 3555 !register_is_null(reg)) || 3556 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) { 3557 /* bpf_map_xxx(..., map_ptr, ..., value) call: 3558 * check [value, value + map->value_size) validity 3559 */ 3560 if (!meta->map_ptr) { 3561 /* kernel subsystem misconfigured verifier */ 3562 verbose(env, "invalid map_ptr to access map->value\n"); 3563 return -EACCES; 3564 } 3565 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE); 3566 err = check_helper_mem_access(env, regno, 3567 meta->map_ptr->value_size, false, 3568 meta); 3569 } else if (arg_type_is_mem_size(arg_type)) { 3570 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); 3571 3572 /* remember the mem_size which may be used later 3573 * to refine return values. 3574 */ 3575 meta->msize_smax_value = reg->smax_value; 3576 meta->msize_umax_value = reg->umax_value; 3577 3578 /* The register is SCALAR_VALUE; the access check 3579 * happens using its boundaries. 3580 */ 3581 if (!tnum_is_const(reg->var_off)) 3582 /* For unprivileged variable accesses, disable raw 3583 * mode so that the program is required to 3584 * initialize all the memory that the helper could 3585 * just partially fill up. 3586 */ 3587 meta = NULL; 3588 3589 if (reg->smin_value < 0) { 3590 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", 3591 regno); 3592 return -EACCES; 3593 } 3594 3595 if (reg->umin_value == 0) { 3596 err = check_helper_mem_access(env, regno - 1, 0, 3597 zero_size_allowed, 3598 meta); 3599 if (err) 3600 return err; 3601 } 3602 3603 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { 3604 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", 3605 regno); 3606 return -EACCES; 3607 } 3608 err = check_helper_mem_access(env, regno - 1, 3609 reg->umax_value, 3610 zero_size_allowed, meta); 3611 if (!err) 3612 err = mark_chain_precision(env, regno); 3613 } else if (arg_type_is_int_ptr(arg_type)) { 3614 int size = int_ptr_type_to_size(arg_type); 3615 3616 err = check_helper_mem_access(env, regno, size, false, meta); 3617 if (err) 3618 return err; 3619 err = check_ptr_alignment(env, reg, 0, size, true); 3620 } 3621 3622 return err; 3623 err_type: 3624 verbose(env, "R%d type=%s expected=%s\n", regno, 3625 reg_type_str[type], reg_type_str[expected_type]); 3626 return -EACCES; 3627 } 3628 3629 static int check_map_func_compatibility(struct bpf_verifier_env *env, 3630 struct bpf_map *map, int func_id) 3631 { 3632 if (!map) 3633 return 0; 3634 3635 /* We need a two way check, first is from map perspective ... */ 3636 switch (map->map_type) { 3637 case BPF_MAP_TYPE_PROG_ARRAY: 3638 if (func_id != BPF_FUNC_tail_call) 3639 goto error; 3640 break; 3641 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 3642 if (func_id != BPF_FUNC_perf_event_read && 3643 func_id != BPF_FUNC_perf_event_output && 3644 func_id != BPF_FUNC_skb_output && 3645 func_id != BPF_FUNC_perf_event_read_value) 3646 goto error; 3647 break; 3648 case BPF_MAP_TYPE_STACK_TRACE: 3649 if (func_id != BPF_FUNC_get_stackid) 3650 goto error; 3651 break; 3652 case BPF_MAP_TYPE_CGROUP_ARRAY: 3653 if (func_id != BPF_FUNC_skb_under_cgroup && 3654 func_id != BPF_FUNC_current_task_under_cgroup) 3655 goto error; 3656 break; 3657 case BPF_MAP_TYPE_CGROUP_STORAGE: 3658 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 3659 if (func_id != BPF_FUNC_get_local_storage) 3660 goto error; 3661 break; 3662 case BPF_MAP_TYPE_DEVMAP: 3663 case BPF_MAP_TYPE_DEVMAP_HASH: 3664 if (func_id != BPF_FUNC_redirect_map && 3665 func_id != BPF_FUNC_map_lookup_elem) 3666 goto error; 3667 break; 3668 /* Restrict bpf side of cpumap and xskmap, open when use-cases 3669 * appear. 3670 */ 3671 case BPF_MAP_TYPE_CPUMAP: 3672 if (func_id != BPF_FUNC_redirect_map) 3673 goto error; 3674 break; 3675 case BPF_MAP_TYPE_XSKMAP: 3676 if (func_id != BPF_FUNC_redirect_map && 3677 func_id != BPF_FUNC_map_lookup_elem) 3678 goto error; 3679 break; 3680 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 3681 case BPF_MAP_TYPE_HASH_OF_MAPS: 3682 if (func_id != BPF_FUNC_map_lookup_elem) 3683 goto error; 3684 break; 3685 case BPF_MAP_TYPE_SOCKMAP: 3686 if (func_id != BPF_FUNC_sk_redirect_map && 3687 func_id != BPF_FUNC_sock_map_update && 3688 func_id != BPF_FUNC_map_delete_elem && 3689 func_id != BPF_FUNC_msg_redirect_map) 3690 goto error; 3691 break; 3692 case BPF_MAP_TYPE_SOCKHASH: 3693 if (func_id != BPF_FUNC_sk_redirect_hash && 3694 func_id != BPF_FUNC_sock_hash_update && 3695 func_id != BPF_FUNC_map_delete_elem && 3696 func_id != BPF_FUNC_msg_redirect_hash) 3697 goto error; 3698 break; 3699 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 3700 if (func_id != BPF_FUNC_sk_select_reuseport) 3701 goto error; 3702 break; 3703 case BPF_MAP_TYPE_QUEUE: 3704 case BPF_MAP_TYPE_STACK: 3705 if (func_id != BPF_FUNC_map_peek_elem && 3706 func_id != BPF_FUNC_map_pop_elem && 3707 func_id != BPF_FUNC_map_push_elem) 3708 goto error; 3709 break; 3710 case BPF_MAP_TYPE_SK_STORAGE: 3711 if (func_id != BPF_FUNC_sk_storage_get && 3712 func_id != BPF_FUNC_sk_storage_delete) 3713 goto error; 3714 break; 3715 default: 3716 break; 3717 } 3718 3719 /* ... and second from the function itself. */ 3720 switch (func_id) { 3721 case BPF_FUNC_tail_call: 3722 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 3723 goto error; 3724 if (env->subprog_cnt > 1) { 3725 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n"); 3726 return -EINVAL; 3727 } 3728 break; 3729 case BPF_FUNC_perf_event_read: 3730 case BPF_FUNC_perf_event_output: 3731 case BPF_FUNC_perf_event_read_value: 3732 case BPF_FUNC_skb_output: 3733 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 3734 goto error; 3735 break; 3736 case BPF_FUNC_get_stackid: 3737 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 3738 goto error; 3739 break; 3740 case BPF_FUNC_current_task_under_cgroup: 3741 case BPF_FUNC_skb_under_cgroup: 3742 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 3743 goto error; 3744 break; 3745 case BPF_FUNC_redirect_map: 3746 if (map->map_type != BPF_MAP_TYPE_DEVMAP && 3747 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && 3748 map->map_type != BPF_MAP_TYPE_CPUMAP && 3749 map->map_type != BPF_MAP_TYPE_XSKMAP) 3750 goto error; 3751 break; 3752 case BPF_FUNC_sk_redirect_map: 3753 case BPF_FUNC_msg_redirect_map: 3754 case BPF_FUNC_sock_map_update: 3755 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) 3756 goto error; 3757 break; 3758 case BPF_FUNC_sk_redirect_hash: 3759 case BPF_FUNC_msg_redirect_hash: 3760 case BPF_FUNC_sock_hash_update: 3761 if (map->map_type != BPF_MAP_TYPE_SOCKHASH) 3762 goto error; 3763 break; 3764 case BPF_FUNC_get_local_storage: 3765 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 3766 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 3767 goto error; 3768 break; 3769 case BPF_FUNC_sk_select_reuseport: 3770 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) 3771 goto error; 3772 break; 3773 case BPF_FUNC_map_peek_elem: 3774 case BPF_FUNC_map_pop_elem: 3775 case BPF_FUNC_map_push_elem: 3776 if (map->map_type != BPF_MAP_TYPE_QUEUE && 3777 map->map_type != BPF_MAP_TYPE_STACK) 3778 goto error; 3779 break; 3780 case BPF_FUNC_sk_storage_get: 3781 case BPF_FUNC_sk_storage_delete: 3782 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) 3783 goto error; 3784 break; 3785 default: 3786 break; 3787 } 3788 3789 return 0; 3790 error: 3791 verbose(env, "cannot pass map_type %d into func %s#%d\n", 3792 map->map_type, func_id_name(func_id), func_id); 3793 return -EINVAL; 3794 } 3795 3796 static bool check_raw_mode_ok(const struct bpf_func_proto *fn) 3797 { 3798 int count = 0; 3799 3800 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) 3801 count++; 3802 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) 3803 count++; 3804 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) 3805 count++; 3806 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) 3807 count++; 3808 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) 3809 count++; 3810 3811 /* We only support one arg being in raw mode at the moment, 3812 * which is sufficient for the helper functions we have 3813 * right now. 3814 */ 3815 return count <= 1; 3816 } 3817 3818 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr, 3819 enum bpf_arg_type arg_next) 3820 { 3821 return (arg_type_is_mem_ptr(arg_curr) && 3822 !arg_type_is_mem_size(arg_next)) || 3823 (!arg_type_is_mem_ptr(arg_curr) && 3824 arg_type_is_mem_size(arg_next)); 3825 } 3826 3827 static bool check_arg_pair_ok(const struct bpf_func_proto *fn) 3828 { 3829 /* bpf_xxx(..., buf, len) call will access 'len' 3830 * bytes from memory 'buf'. Both arg types need 3831 * to be paired, so make sure there's no buggy 3832 * helper function specification. 3833 */ 3834 if (arg_type_is_mem_size(fn->arg1_type) || 3835 arg_type_is_mem_ptr(fn->arg5_type) || 3836 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) || 3837 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) || 3838 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) || 3839 check_args_pair_invalid(fn->arg4_type, fn->arg5_type)) 3840 return false; 3841 3842 return true; 3843 } 3844 3845 static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id) 3846 { 3847 int count = 0; 3848 3849 if (arg_type_may_be_refcounted(fn->arg1_type)) 3850 count++; 3851 if (arg_type_may_be_refcounted(fn->arg2_type)) 3852 count++; 3853 if (arg_type_may_be_refcounted(fn->arg3_type)) 3854 count++; 3855 if (arg_type_may_be_refcounted(fn->arg4_type)) 3856 count++; 3857 if (arg_type_may_be_refcounted(fn->arg5_type)) 3858 count++; 3859 3860 /* A reference acquiring function cannot acquire 3861 * another refcounted ptr. 3862 */ 3863 if (is_acquire_function(func_id) && count) 3864 return false; 3865 3866 /* We only support one arg being unreferenced at the moment, 3867 * which is sufficient for the helper functions we have right now. 3868 */ 3869 return count <= 1; 3870 } 3871 3872 static int check_func_proto(const struct bpf_func_proto *fn, int func_id) 3873 { 3874 return check_raw_mode_ok(fn) && 3875 check_arg_pair_ok(fn) && 3876 check_refcount_ok(fn, func_id) ? 0 : -EINVAL; 3877 } 3878 3879 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] 3880 * are now invalid, so turn them into unknown SCALAR_VALUE. 3881 */ 3882 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, 3883 struct bpf_func_state *state) 3884 { 3885 struct bpf_reg_state *regs = state->regs, *reg; 3886 int i; 3887 3888 for (i = 0; i < MAX_BPF_REG; i++) 3889 if (reg_is_pkt_pointer_any(®s[i])) 3890 mark_reg_unknown(env, regs, i); 3891 3892 bpf_for_each_spilled_reg(i, state, reg) { 3893 if (!reg) 3894 continue; 3895 if (reg_is_pkt_pointer_any(reg)) 3896 __mark_reg_unknown(env, reg); 3897 } 3898 } 3899 3900 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) 3901 { 3902 struct bpf_verifier_state *vstate = env->cur_state; 3903 int i; 3904 3905 for (i = 0; i <= vstate->curframe; i++) 3906 __clear_all_pkt_pointers(env, vstate->frame[i]); 3907 } 3908 3909 static void release_reg_references(struct bpf_verifier_env *env, 3910 struct bpf_func_state *state, 3911 int ref_obj_id) 3912 { 3913 struct bpf_reg_state *regs = state->regs, *reg; 3914 int i; 3915 3916 for (i = 0; i < MAX_BPF_REG; i++) 3917 if (regs[i].ref_obj_id == ref_obj_id) 3918 mark_reg_unknown(env, regs, i); 3919 3920 bpf_for_each_spilled_reg(i, state, reg) { 3921 if (!reg) 3922 continue; 3923 if (reg->ref_obj_id == ref_obj_id) 3924 __mark_reg_unknown(env, reg); 3925 } 3926 } 3927 3928 /* The pointer with the specified id has released its reference to kernel 3929 * resources. Identify all copies of the same pointer and clear the reference. 3930 */ 3931 static int release_reference(struct bpf_verifier_env *env, 3932 int ref_obj_id) 3933 { 3934 struct bpf_verifier_state *vstate = env->cur_state; 3935 int err; 3936 int i; 3937 3938 err = release_reference_state(cur_func(env), ref_obj_id); 3939 if (err) 3940 return err; 3941 3942 for (i = 0; i <= vstate->curframe; i++) 3943 release_reg_references(env, vstate->frame[i], ref_obj_id); 3944 3945 return 0; 3946 } 3947 3948 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 3949 int *insn_idx) 3950 { 3951 struct bpf_verifier_state *state = env->cur_state; 3952 struct bpf_func_state *caller, *callee; 3953 int i, err, subprog, target_insn; 3954 3955 if (state->curframe + 1 >= MAX_CALL_FRAMES) { 3956 verbose(env, "the call stack of %d frames is too deep\n", 3957 state->curframe + 2); 3958 return -E2BIG; 3959 } 3960 3961 target_insn = *insn_idx + insn->imm; 3962 subprog = find_subprog(env, target_insn + 1); 3963 if (subprog < 0) { 3964 verbose(env, "verifier bug. No program starts at insn %d\n", 3965 target_insn + 1); 3966 return -EFAULT; 3967 } 3968 3969 caller = state->frame[state->curframe]; 3970 if (state->frame[state->curframe + 1]) { 3971 verbose(env, "verifier bug. Frame %d already allocated\n", 3972 state->curframe + 1); 3973 return -EFAULT; 3974 } 3975 3976 callee = kzalloc(sizeof(*callee), GFP_KERNEL); 3977 if (!callee) 3978 return -ENOMEM; 3979 state->frame[state->curframe + 1] = callee; 3980 3981 /* callee cannot access r0, r6 - r9 for reading and has to write 3982 * into its own stack before reading from it. 3983 * callee can read/write into caller's stack 3984 */ 3985 init_func_state(env, callee, 3986 /* remember the callsite, it will be used by bpf_exit */ 3987 *insn_idx /* callsite */, 3988 state->curframe + 1 /* frameno within this callchain */, 3989 subprog /* subprog number within this prog */); 3990 3991 /* Transfer references to the callee */ 3992 err = transfer_reference_state(callee, caller); 3993 if (err) 3994 return err; 3995 3996 /* copy r1 - r5 args that callee can access. The copy includes parent 3997 * pointers, which connects us up to the liveness chain 3998 */ 3999 for (i = BPF_REG_1; i <= BPF_REG_5; i++) 4000 callee->regs[i] = caller->regs[i]; 4001 4002 /* after the call registers r0 - r5 were scratched */ 4003 for (i = 0; i < CALLER_SAVED_REGS; i++) { 4004 mark_reg_not_init(env, caller->regs, caller_saved[i]); 4005 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 4006 } 4007 4008 /* only increment it after check_reg_arg() finished */ 4009 state->curframe++; 4010 4011 if (btf_check_func_arg_match(env, subprog)) 4012 return -EINVAL; 4013 4014 /* and go analyze first insn of the callee */ 4015 *insn_idx = target_insn; 4016 4017 if (env->log.level & BPF_LOG_LEVEL) { 4018 verbose(env, "caller:\n"); 4019 print_verifier_state(env, caller); 4020 verbose(env, "callee:\n"); 4021 print_verifier_state(env, callee); 4022 } 4023 return 0; 4024 } 4025 4026 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) 4027 { 4028 struct bpf_verifier_state *state = env->cur_state; 4029 struct bpf_func_state *caller, *callee; 4030 struct bpf_reg_state *r0; 4031 int err; 4032 4033 callee = state->frame[state->curframe]; 4034 r0 = &callee->regs[BPF_REG_0]; 4035 if (r0->type == PTR_TO_STACK) { 4036 /* technically it's ok to return caller's stack pointer 4037 * (or caller's caller's pointer) back to the caller, 4038 * since these pointers are valid. Only current stack 4039 * pointer will be invalid as soon as function exits, 4040 * but let's be conservative 4041 */ 4042 verbose(env, "cannot return stack pointer to the caller\n"); 4043 return -EINVAL; 4044 } 4045 4046 state->curframe--; 4047 caller = state->frame[state->curframe]; 4048 /* return to the caller whatever r0 had in the callee */ 4049 caller->regs[BPF_REG_0] = *r0; 4050 4051 /* Transfer references to the caller */ 4052 err = transfer_reference_state(caller, callee); 4053 if (err) 4054 return err; 4055 4056 *insn_idx = callee->callsite + 1; 4057 if (env->log.level & BPF_LOG_LEVEL) { 4058 verbose(env, "returning from callee:\n"); 4059 print_verifier_state(env, callee); 4060 verbose(env, "to caller at %d:\n", *insn_idx); 4061 print_verifier_state(env, caller); 4062 } 4063 /* clear everything in the callee */ 4064 free_func_state(callee); 4065 state->frame[state->curframe + 1] = NULL; 4066 return 0; 4067 } 4068 4069 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, 4070 int func_id, 4071 struct bpf_call_arg_meta *meta) 4072 { 4073 struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; 4074 4075 if (ret_type != RET_INTEGER || 4076 (func_id != BPF_FUNC_get_stack && 4077 func_id != BPF_FUNC_probe_read_str)) 4078 return; 4079 4080 ret_reg->smax_value = meta->msize_smax_value; 4081 ret_reg->umax_value = meta->msize_umax_value; 4082 __reg_deduce_bounds(ret_reg); 4083 __reg_bound_offset(ret_reg); 4084 } 4085 4086 static int 4087 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 4088 int func_id, int insn_idx) 4089 { 4090 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 4091 struct bpf_map *map = meta->map_ptr; 4092 4093 if (func_id != BPF_FUNC_tail_call && 4094 func_id != BPF_FUNC_map_lookup_elem && 4095 func_id != BPF_FUNC_map_update_elem && 4096 func_id != BPF_FUNC_map_delete_elem && 4097 func_id != BPF_FUNC_map_push_elem && 4098 func_id != BPF_FUNC_map_pop_elem && 4099 func_id != BPF_FUNC_map_peek_elem) 4100 return 0; 4101 4102 if (map == NULL) { 4103 verbose(env, "kernel subsystem misconfigured verifier\n"); 4104 return -EINVAL; 4105 } 4106 4107 /* In case of read-only, some additional restrictions 4108 * need to be applied in order to prevent altering the 4109 * state of the map from program side. 4110 */ 4111 if ((map->map_flags & BPF_F_RDONLY_PROG) && 4112 (func_id == BPF_FUNC_map_delete_elem || 4113 func_id == BPF_FUNC_map_update_elem || 4114 func_id == BPF_FUNC_map_push_elem || 4115 func_id == BPF_FUNC_map_pop_elem)) { 4116 verbose(env, "write into map forbidden\n"); 4117 return -EACCES; 4118 } 4119 4120 if (!BPF_MAP_PTR(aux->map_ptr_state)) 4121 bpf_map_ptr_store(aux, meta->map_ptr, 4122 meta->map_ptr->unpriv_array); 4123 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr) 4124 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, 4125 meta->map_ptr->unpriv_array); 4126 return 0; 4127 } 4128 4129 static int 4130 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 4131 int func_id, int insn_idx) 4132 { 4133 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 4134 struct bpf_reg_state *regs = cur_regs(env), *reg; 4135 struct bpf_map *map = meta->map_ptr; 4136 struct tnum range; 4137 u64 val; 4138 int err; 4139 4140 if (func_id != BPF_FUNC_tail_call) 4141 return 0; 4142 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) { 4143 verbose(env, "kernel subsystem misconfigured verifier\n"); 4144 return -EINVAL; 4145 } 4146 4147 range = tnum_range(0, map->max_entries - 1); 4148 reg = ®s[BPF_REG_3]; 4149 4150 if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) { 4151 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 4152 return 0; 4153 } 4154 4155 err = mark_chain_precision(env, BPF_REG_3); 4156 if (err) 4157 return err; 4158 4159 val = reg->var_off.value; 4160 if (bpf_map_key_unseen(aux)) 4161 bpf_map_key_store(aux, val); 4162 else if (!bpf_map_key_poisoned(aux) && 4163 bpf_map_key_immediate(aux) != val) 4164 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 4165 return 0; 4166 } 4167 4168 static int check_reference_leak(struct bpf_verifier_env *env) 4169 { 4170 struct bpf_func_state *state = cur_func(env); 4171 int i; 4172 4173 for (i = 0; i < state->acquired_refs; i++) { 4174 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", 4175 state->refs[i].id, state->refs[i].insn_idx); 4176 } 4177 return state->acquired_refs ? -EINVAL : 0; 4178 } 4179 4180 static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) 4181 { 4182 const struct bpf_func_proto *fn = NULL; 4183 struct bpf_reg_state *regs; 4184 struct bpf_call_arg_meta meta; 4185 bool changes_data; 4186 int i, err; 4187 4188 /* find function prototype */ 4189 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 4190 verbose(env, "invalid func %s#%d\n", func_id_name(func_id), 4191 func_id); 4192 return -EINVAL; 4193 } 4194 4195 if (env->ops->get_func_proto) 4196 fn = env->ops->get_func_proto(func_id, env->prog); 4197 if (!fn) { 4198 verbose(env, "unknown func %s#%d\n", func_id_name(func_id), 4199 func_id); 4200 return -EINVAL; 4201 } 4202 4203 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 4204 if (!env->prog->gpl_compatible && fn->gpl_only) { 4205 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); 4206 return -EINVAL; 4207 } 4208 4209 /* With LD_ABS/IND some JITs save/restore skb from r1. */ 4210 changes_data = bpf_helper_changes_pkt_data(fn->func); 4211 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { 4212 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", 4213 func_id_name(func_id), func_id); 4214 return -EINVAL; 4215 } 4216 4217 memset(&meta, 0, sizeof(meta)); 4218 meta.pkt_access = fn->pkt_access; 4219 4220 err = check_func_proto(fn, func_id); 4221 if (err) { 4222 verbose(env, "kernel subsystem misconfigured func %s#%d\n", 4223 func_id_name(func_id), func_id); 4224 return err; 4225 } 4226 4227 meta.func_id = func_id; 4228 /* check args */ 4229 for (i = 0; i < 5; i++) { 4230 err = btf_resolve_helper_id(&env->log, fn, i); 4231 if (err > 0) 4232 meta.btf_id = err; 4233 err = check_func_arg(env, BPF_REG_1 + i, fn->arg_type[i], &meta); 4234 if (err) 4235 return err; 4236 } 4237 4238 err = record_func_map(env, &meta, func_id, insn_idx); 4239 if (err) 4240 return err; 4241 4242 err = record_func_key(env, &meta, func_id, insn_idx); 4243 if (err) 4244 return err; 4245 4246 /* Mark slots with STACK_MISC in case of raw mode, stack offset 4247 * is inferred from register state. 4248 */ 4249 for (i = 0; i < meta.access_size; i++) { 4250 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, 4251 BPF_WRITE, -1, false); 4252 if (err) 4253 return err; 4254 } 4255 4256 if (func_id == BPF_FUNC_tail_call) { 4257 err = check_reference_leak(env); 4258 if (err) { 4259 verbose(env, "tail_call would lead to reference leak\n"); 4260 return err; 4261 } 4262 } else if (is_release_function(func_id)) { 4263 err = release_reference(env, meta.ref_obj_id); 4264 if (err) { 4265 verbose(env, "func %s#%d reference has not been acquired before\n", 4266 func_id_name(func_id), func_id); 4267 return err; 4268 } 4269 } 4270 4271 regs = cur_regs(env); 4272 4273 /* check that flags argument in get_local_storage(map, flags) is 0, 4274 * this is required because get_local_storage() can't return an error. 4275 */ 4276 if (func_id == BPF_FUNC_get_local_storage && 4277 !register_is_null(®s[BPF_REG_2])) { 4278 verbose(env, "get_local_storage() doesn't support non-zero flags\n"); 4279 return -EINVAL; 4280 } 4281 4282 /* reset caller saved regs */ 4283 for (i = 0; i < CALLER_SAVED_REGS; i++) { 4284 mark_reg_not_init(env, regs, caller_saved[i]); 4285 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 4286 } 4287 4288 /* helper call returns 64-bit value. */ 4289 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 4290 4291 /* update return register (already marked as written above) */ 4292 if (fn->ret_type == RET_INTEGER) { 4293 /* sets type to SCALAR_VALUE */ 4294 mark_reg_unknown(env, regs, BPF_REG_0); 4295 } else if (fn->ret_type == RET_VOID) { 4296 regs[BPF_REG_0].type = NOT_INIT; 4297 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || 4298 fn->ret_type == RET_PTR_TO_MAP_VALUE) { 4299 /* There is no offset yet applied, variable or fixed */ 4300 mark_reg_known_zero(env, regs, BPF_REG_0); 4301 /* remember map_ptr, so that check_map_access() 4302 * can check 'value_size' boundary of memory access 4303 * to map element returned from bpf_map_lookup_elem() 4304 */ 4305 if (meta.map_ptr == NULL) { 4306 verbose(env, 4307 "kernel subsystem misconfigured verifier\n"); 4308 return -EINVAL; 4309 } 4310 regs[BPF_REG_0].map_ptr = meta.map_ptr; 4311 if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { 4312 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; 4313 if (map_value_has_spin_lock(meta.map_ptr)) 4314 regs[BPF_REG_0].id = ++env->id_gen; 4315 } else { 4316 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 4317 regs[BPF_REG_0].id = ++env->id_gen; 4318 } 4319 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { 4320 mark_reg_known_zero(env, regs, BPF_REG_0); 4321 regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; 4322 regs[BPF_REG_0].id = ++env->id_gen; 4323 } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) { 4324 mark_reg_known_zero(env, regs, BPF_REG_0); 4325 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL; 4326 regs[BPF_REG_0].id = ++env->id_gen; 4327 } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) { 4328 mark_reg_known_zero(env, regs, BPF_REG_0); 4329 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL; 4330 regs[BPF_REG_0].id = ++env->id_gen; 4331 } else { 4332 verbose(env, "unknown return type %d of func %s#%d\n", 4333 fn->ret_type, func_id_name(func_id), func_id); 4334 return -EINVAL; 4335 } 4336 4337 if (is_ptr_cast_function(func_id)) { 4338 /* For release_reference() */ 4339 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; 4340 } else if (is_acquire_function(func_id)) { 4341 int id = acquire_reference_state(env, insn_idx); 4342 4343 if (id < 0) 4344 return id; 4345 /* For mark_ptr_or_null_reg() */ 4346 regs[BPF_REG_0].id = id; 4347 /* For release_reference() */ 4348 regs[BPF_REG_0].ref_obj_id = id; 4349 } 4350 4351 do_refine_retval_range(regs, fn->ret_type, func_id, &meta); 4352 4353 err = check_map_func_compatibility(env, meta.map_ptr, func_id); 4354 if (err) 4355 return err; 4356 4357 if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) { 4358 const char *err_str; 4359 4360 #ifdef CONFIG_PERF_EVENTS 4361 err = get_callchain_buffers(sysctl_perf_event_max_stack); 4362 err_str = "cannot get callchain buffer for func %s#%d\n"; 4363 #else 4364 err = -ENOTSUPP; 4365 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; 4366 #endif 4367 if (err) { 4368 verbose(env, err_str, func_id_name(func_id), func_id); 4369 return err; 4370 } 4371 4372 env->prog->has_callchain_buf = true; 4373 } 4374 4375 if (changes_data) 4376 clear_all_pkt_pointers(env); 4377 return 0; 4378 } 4379 4380 static bool signed_add_overflows(s64 a, s64 b) 4381 { 4382 /* Do the add in u64, where overflow is well-defined */ 4383 s64 res = (s64)((u64)a + (u64)b); 4384 4385 if (b < 0) 4386 return res > a; 4387 return res < a; 4388 } 4389 4390 static bool signed_sub_overflows(s64 a, s64 b) 4391 { 4392 /* Do the sub in u64, where overflow is well-defined */ 4393 s64 res = (s64)((u64)a - (u64)b); 4394 4395 if (b < 0) 4396 return res < a; 4397 return res > a; 4398 } 4399 4400 static bool check_reg_sane_offset(struct bpf_verifier_env *env, 4401 const struct bpf_reg_state *reg, 4402 enum bpf_reg_type type) 4403 { 4404 bool known = tnum_is_const(reg->var_off); 4405 s64 val = reg->var_off.value; 4406 s64 smin = reg->smin_value; 4407 4408 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { 4409 verbose(env, "math between %s pointer and %lld is not allowed\n", 4410 reg_type_str[type], val); 4411 return false; 4412 } 4413 4414 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { 4415 verbose(env, "%s pointer offset %d is not allowed\n", 4416 reg_type_str[type], reg->off); 4417 return false; 4418 } 4419 4420 if (smin == S64_MIN) { 4421 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", 4422 reg_type_str[type]); 4423 return false; 4424 } 4425 4426 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { 4427 verbose(env, "value %lld makes %s pointer be out of bounds\n", 4428 smin, reg_type_str[type]); 4429 return false; 4430 } 4431 4432 return true; 4433 } 4434 4435 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) 4436 { 4437 return &env->insn_aux_data[env->insn_idx]; 4438 } 4439 4440 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, 4441 u32 *ptr_limit, u8 opcode, bool off_is_neg) 4442 { 4443 bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || 4444 (opcode == BPF_SUB && !off_is_neg); 4445 u32 off; 4446 4447 switch (ptr_reg->type) { 4448 case PTR_TO_STACK: 4449 /* Indirect variable offset stack access is prohibited in 4450 * unprivileged mode so it's not handled here. 4451 */ 4452 off = ptr_reg->off + ptr_reg->var_off.value; 4453 if (mask_to_left) 4454 *ptr_limit = MAX_BPF_STACK + off; 4455 else 4456 *ptr_limit = -off; 4457 return 0; 4458 case PTR_TO_MAP_VALUE: 4459 if (mask_to_left) { 4460 *ptr_limit = ptr_reg->umax_value + ptr_reg->off; 4461 } else { 4462 off = ptr_reg->smin_value + ptr_reg->off; 4463 *ptr_limit = ptr_reg->map_ptr->value_size - off; 4464 } 4465 return 0; 4466 default: 4467 return -EINVAL; 4468 } 4469 } 4470 4471 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, 4472 const struct bpf_insn *insn) 4473 { 4474 return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K; 4475 } 4476 4477 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, 4478 u32 alu_state, u32 alu_limit) 4479 { 4480 /* If we arrived here from different branches with different 4481 * state or limits to sanitize, then this won't work. 4482 */ 4483 if (aux->alu_state && 4484 (aux->alu_state != alu_state || 4485 aux->alu_limit != alu_limit)) 4486 return -EACCES; 4487 4488 /* Corresponding fixup done in fixup_bpf_calls(). */ 4489 aux->alu_state = alu_state; 4490 aux->alu_limit = alu_limit; 4491 return 0; 4492 } 4493 4494 static int sanitize_val_alu(struct bpf_verifier_env *env, 4495 struct bpf_insn *insn) 4496 { 4497 struct bpf_insn_aux_data *aux = cur_aux(env); 4498 4499 if (can_skip_alu_sanitation(env, insn)) 4500 return 0; 4501 4502 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); 4503 } 4504 4505 static int sanitize_ptr_alu(struct bpf_verifier_env *env, 4506 struct bpf_insn *insn, 4507 const struct bpf_reg_state *ptr_reg, 4508 struct bpf_reg_state *dst_reg, 4509 bool off_is_neg) 4510 { 4511 struct bpf_verifier_state *vstate = env->cur_state; 4512 struct bpf_insn_aux_data *aux = cur_aux(env); 4513 bool ptr_is_dst_reg = ptr_reg == dst_reg; 4514 u8 opcode = BPF_OP(insn->code); 4515 u32 alu_state, alu_limit; 4516 struct bpf_reg_state tmp; 4517 bool ret; 4518 4519 if (can_skip_alu_sanitation(env, insn)) 4520 return 0; 4521 4522 /* We already marked aux for masking from non-speculative 4523 * paths, thus we got here in the first place. We only care 4524 * to explore bad access from here. 4525 */ 4526 if (vstate->speculative) 4527 goto do_sim; 4528 4529 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; 4530 alu_state |= ptr_is_dst_reg ? 4531 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; 4532 4533 if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) 4534 return 0; 4535 if (update_alu_sanitation_state(aux, alu_state, alu_limit)) 4536 return -EACCES; 4537 do_sim: 4538 /* Simulate and find potential out-of-bounds access under 4539 * speculative execution from truncation as a result of 4540 * masking when off was not within expected range. If off 4541 * sits in dst, then we temporarily need to move ptr there 4542 * to simulate dst (== 0) +/-= ptr. Needed, for example, 4543 * for cases where we use K-based arithmetic in one direction 4544 * and truncated reg-based in the other in order to explore 4545 * bad access. 4546 */ 4547 if (!ptr_is_dst_reg) { 4548 tmp = *dst_reg; 4549 *dst_reg = *ptr_reg; 4550 } 4551 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); 4552 if (!ptr_is_dst_reg && ret) 4553 *dst_reg = tmp; 4554 return !ret ? -EFAULT : 0; 4555 } 4556 4557 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. 4558 * Caller should also handle BPF_MOV case separately. 4559 * If we return -EACCES, caller may want to try again treating pointer as a 4560 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. 4561 */ 4562 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, 4563 struct bpf_insn *insn, 4564 const struct bpf_reg_state *ptr_reg, 4565 const struct bpf_reg_state *off_reg) 4566 { 4567 struct bpf_verifier_state *vstate = env->cur_state; 4568 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 4569 struct bpf_reg_state *regs = state->regs, *dst_reg; 4570 bool known = tnum_is_const(off_reg->var_off); 4571 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, 4572 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; 4573 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, 4574 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; 4575 u32 dst = insn->dst_reg, src = insn->src_reg; 4576 u8 opcode = BPF_OP(insn->code); 4577 int ret; 4578 4579 dst_reg = ®s[dst]; 4580 4581 if ((known && (smin_val != smax_val || umin_val != umax_val)) || 4582 smin_val > smax_val || umin_val > umax_val) { 4583 /* Taint dst register if offset had invalid bounds derived from 4584 * e.g. dead branches. 4585 */ 4586 __mark_reg_unknown(env, dst_reg); 4587 return 0; 4588 } 4589 4590 if (BPF_CLASS(insn->code) != BPF_ALU64) { 4591 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ 4592 verbose(env, 4593 "R%d 32-bit pointer arithmetic prohibited\n", 4594 dst); 4595 return -EACCES; 4596 } 4597 4598 switch (ptr_reg->type) { 4599 case PTR_TO_MAP_VALUE_OR_NULL: 4600 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", 4601 dst, reg_type_str[ptr_reg->type]); 4602 return -EACCES; 4603 case CONST_PTR_TO_MAP: 4604 case PTR_TO_PACKET_END: 4605 case PTR_TO_SOCKET: 4606 case PTR_TO_SOCKET_OR_NULL: 4607 case PTR_TO_SOCK_COMMON: 4608 case PTR_TO_SOCK_COMMON_OR_NULL: 4609 case PTR_TO_TCP_SOCK: 4610 case PTR_TO_TCP_SOCK_OR_NULL: 4611 case PTR_TO_XDP_SOCK: 4612 verbose(env, "R%d pointer arithmetic on %s prohibited\n", 4613 dst, reg_type_str[ptr_reg->type]); 4614 return -EACCES; 4615 case PTR_TO_MAP_VALUE: 4616 if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) { 4617 verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n", 4618 off_reg == dst_reg ? dst : src); 4619 return -EACCES; 4620 } 4621 /* fall-through */ 4622 default: 4623 break; 4624 } 4625 4626 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. 4627 * The id may be overwritten later if we create a new variable offset. 4628 */ 4629 dst_reg->type = ptr_reg->type; 4630 dst_reg->id = ptr_reg->id; 4631 4632 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || 4633 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) 4634 return -EINVAL; 4635 4636 switch (opcode) { 4637 case BPF_ADD: 4638 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); 4639 if (ret < 0) { 4640 verbose(env, "R%d tried to add from different maps or paths\n", dst); 4641 return ret; 4642 } 4643 /* We can take a fixed offset as long as it doesn't overflow 4644 * the s32 'off' field 4645 */ 4646 if (known && (ptr_reg->off + smin_val == 4647 (s64)(s32)(ptr_reg->off + smin_val))) { 4648 /* pointer += K. Accumulate it into fixed offset */ 4649 dst_reg->smin_value = smin_ptr; 4650 dst_reg->smax_value = smax_ptr; 4651 dst_reg->umin_value = umin_ptr; 4652 dst_reg->umax_value = umax_ptr; 4653 dst_reg->var_off = ptr_reg->var_off; 4654 dst_reg->off = ptr_reg->off + smin_val; 4655 dst_reg->raw = ptr_reg->raw; 4656 break; 4657 } 4658 /* A new variable offset is created. Note that off_reg->off 4659 * == 0, since it's a scalar. 4660 * dst_reg gets the pointer type and since some positive 4661 * integer value was added to the pointer, give it a new 'id' 4662 * if it's a PTR_TO_PACKET. 4663 * this creates a new 'base' pointer, off_reg (variable) gets 4664 * added into the variable offset, and we copy the fixed offset 4665 * from ptr_reg. 4666 */ 4667 if (signed_add_overflows(smin_ptr, smin_val) || 4668 signed_add_overflows(smax_ptr, smax_val)) { 4669 dst_reg->smin_value = S64_MIN; 4670 dst_reg->smax_value = S64_MAX; 4671 } else { 4672 dst_reg->smin_value = smin_ptr + smin_val; 4673 dst_reg->smax_value = smax_ptr + smax_val; 4674 } 4675 if (umin_ptr + umin_val < umin_ptr || 4676 umax_ptr + umax_val < umax_ptr) { 4677 dst_reg->umin_value = 0; 4678 dst_reg->umax_value = U64_MAX; 4679 } else { 4680 dst_reg->umin_value = umin_ptr + umin_val; 4681 dst_reg->umax_value = umax_ptr + umax_val; 4682 } 4683 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); 4684 dst_reg->off = ptr_reg->off; 4685 dst_reg->raw = ptr_reg->raw; 4686 if (reg_is_pkt_pointer(ptr_reg)) { 4687 dst_reg->id = ++env->id_gen; 4688 /* something was added to pkt_ptr, set range to zero */ 4689 dst_reg->raw = 0; 4690 } 4691 break; 4692 case BPF_SUB: 4693 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); 4694 if (ret < 0) { 4695 verbose(env, "R%d tried to sub from different maps or paths\n", dst); 4696 return ret; 4697 } 4698 if (dst_reg == off_reg) { 4699 /* scalar -= pointer. Creates an unknown scalar */ 4700 verbose(env, "R%d tried to subtract pointer from scalar\n", 4701 dst); 4702 return -EACCES; 4703 } 4704 /* We don't allow subtraction from FP, because (according to 4705 * test_verifier.c test "invalid fp arithmetic", JITs might not 4706 * be able to deal with it. 4707 */ 4708 if (ptr_reg->type == PTR_TO_STACK) { 4709 verbose(env, "R%d subtraction from stack pointer prohibited\n", 4710 dst); 4711 return -EACCES; 4712 } 4713 if (known && (ptr_reg->off - smin_val == 4714 (s64)(s32)(ptr_reg->off - smin_val))) { 4715 /* pointer -= K. Subtract it from fixed offset */ 4716 dst_reg->smin_value = smin_ptr; 4717 dst_reg->smax_value = smax_ptr; 4718 dst_reg->umin_value = umin_ptr; 4719 dst_reg->umax_value = umax_ptr; 4720 dst_reg->var_off = ptr_reg->var_off; 4721 dst_reg->id = ptr_reg->id; 4722 dst_reg->off = ptr_reg->off - smin_val; 4723 dst_reg->raw = ptr_reg->raw; 4724 break; 4725 } 4726 /* A new variable offset is created. If the subtrahend is known 4727 * nonnegative, then any reg->range we had before is still good. 4728 */ 4729 if (signed_sub_overflows(smin_ptr, smax_val) || 4730 signed_sub_overflows(smax_ptr, smin_val)) { 4731 /* Overflow possible, we know nothing */ 4732 dst_reg->smin_value = S64_MIN; 4733 dst_reg->smax_value = S64_MAX; 4734 } else { 4735 dst_reg->smin_value = smin_ptr - smax_val; 4736 dst_reg->smax_value = smax_ptr - smin_val; 4737 } 4738 if (umin_ptr < umax_val) { 4739 /* Overflow possible, we know nothing */ 4740 dst_reg->umin_value = 0; 4741 dst_reg->umax_value = U64_MAX; 4742 } else { 4743 /* Cannot overflow (as long as bounds are consistent) */ 4744 dst_reg->umin_value = umin_ptr - umax_val; 4745 dst_reg->umax_value = umax_ptr - umin_val; 4746 } 4747 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); 4748 dst_reg->off = ptr_reg->off; 4749 dst_reg->raw = ptr_reg->raw; 4750 if (reg_is_pkt_pointer(ptr_reg)) { 4751 dst_reg->id = ++env->id_gen; 4752 /* something was added to pkt_ptr, set range to zero */ 4753 if (smin_val < 0) 4754 dst_reg->raw = 0; 4755 } 4756 break; 4757 case BPF_AND: 4758 case BPF_OR: 4759 case BPF_XOR: 4760 /* bitwise ops on pointers are troublesome, prohibit. */ 4761 verbose(env, "R%d bitwise operator %s on pointer prohibited\n", 4762 dst, bpf_alu_string[opcode >> 4]); 4763 return -EACCES; 4764 default: 4765 /* other operators (e.g. MUL,LSH) produce non-pointer results */ 4766 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", 4767 dst, bpf_alu_string[opcode >> 4]); 4768 return -EACCES; 4769 } 4770 4771 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) 4772 return -EINVAL; 4773 4774 __update_reg_bounds(dst_reg); 4775 __reg_deduce_bounds(dst_reg); 4776 __reg_bound_offset(dst_reg); 4777 4778 /* For unprivileged we require that resulting offset must be in bounds 4779 * in order to be able to sanitize access later on. 4780 */ 4781 if (!env->allow_ptr_leaks) { 4782 if (dst_reg->type == PTR_TO_MAP_VALUE && 4783 check_map_access(env, dst, dst_reg->off, 1, false)) { 4784 verbose(env, "R%d pointer arithmetic of map value goes out of range, " 4785 "prohibited for !root\n", dst); 4786 return -EACCES; 4787 } else if (dst_reg->type == PTR_TO_STACK && 4788 check_stack_access(env, dst_reg, dst_reg->off + 4789 dst_reg->var_off.value, 1)) { 4790 verbose(env, "R%d stack pointer arithmetic goes out of range, " 4791 "prohibited for !root\n", dst); 4792 return -EACCES; 4793 } 4794 } 4795 4796 return 0; 4797 } 4798 4799 /* WARNING: This function does calculations on 64-bit values, but the actual 4800 * execution may occur on 32-bit values. Therefore, things like bitshifts 4801 * need extra checks in the 32-bit case. 4802 */ 4803 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 4804 struct bpf_insn *insn, 4805 struct bpf_reg_state *dst_reg, 4806 struct bpf_reg_state src_reg) 4807 { 4808 struct bpf_reg_state *regs = cur_regs(env); 4809 u8 opcode = BPF_OP(insn->code); 4810 bool src_known, dst_known; 4811 s64 smin_val, smax_val; 4812 u64 umin_val, umax_val; 4813 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 4814 u32 dst = insn->dst_reg; 4815 int ret; 4816 4817 if (insn_bitness == 32) { 4818 /* Relevant for 32-bit RSH: Information can propagate towards 4819 * LSB, so it isn't sufficient to only truncate the output to 4820 * 32 bits. 4821 */ 4822 coerce_reg_to_size(dst_reg, 4); 4823 coerce_reg_to_size(&src_reg, 4); 4824 } 4825 4826 smin_val = src_reg.smin_value; 4827 smax_val = src_reg.smax_value; 4828 umin_val = src_reg.umin_value; 4829 umax_val = src_reg.umax_value; 4830 src_known = tnum_is_const(src_reg.var_off); 4831 dst_known = tnum_is_const(dst_reg->var_off); 4832 4833 if ((src_known && (smin_val != smax_val || umin_val != umax_val)) || 4834 smin_val > smax_val || umin_val > umax_val) { 4835 /* Taint dst register if offset had invalid bounds derived from 4836 * e.g. dead branches. 4837 */ 4838 __mark_reg_unknown(env, dst_reg); 4839 return 0; 4840 } 4841 4842 if (!src_known && 4843 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { 4844 __mark_reg_unknown(env, dst_reg); 4845 return 0; 4846 } 4847 4848 switch (opcode) { 4849 case BPF_ADD: 4850 ret = sanitize_val_alu(env, insn); 4851 if (ret < 0) { 4852 verbose(env, "R%d tried to add from different pointers or scalars\n", dst); 4853 return ret; 4854 } 4855 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 4856 signed_add_overflows(dst_reg->smax_value, smax_val)) { 4857 dst_reg->smin_value = S64_MIN; 4858 dst_reg->smax_value = S64_MAX; 4859 } else { 4860 dst_reg->smin_value += smin_val; 4861 dst_reg->smax_value += smax_val; 4862 } 4863 if (dst_reg->umin_value + umin_val < umin_val || 4864 dst_reg->umax_value + umax_val < umax_val) { 4865 dst_reg->umin_value = 0; 4866 dst_reg->umax_value = U64_MAX; 4867 } else { 4868 dst_reg->umin_value += umin_val; 4869 dst_reg->umax_value += umax_val; 4870 } 4871 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); 4872 break; 4873 case BPF_SUB: 4874 ret = sanitize_val_alu(env, insn); 4875 if (ret < 0) { 4876 verbose(env, "R%d tried to sub from different pointers or scalars\n", dst); 4877 return ret; 4878 } 4879 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || 4880 signed_sub_overflows(dst_reg->smax_value, smin_val)) { 4881 /* Overflow possible, we know nothing */ 4882 dst_reg->smin_value = S64_MIN; 4883 dst_reg->smax_value = S64_MAX; 4884 } else { 4885 dst_reg->smin_value -= smax_val; 4886 dst_reg->smax_value -= smin_val; 4887 } 4888 if (dst_reg->umin_value < umax_val) { 4889 /* Overflow possible, we know nothing */ 4890 dst_reg->umin_value = 0; 4891 dst_reg->umax_value = U64_MAX; 4892 } else { 4893 /* Cannot overflow (as long as bounds are consistent) */ 4894 dst_reg->umin_value -= umax_val; 4895 dst_reg->umax_value -= umin_val; 4896 } 4897 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); 4898 break; 4899 case BPF_MUL: 4900 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); 4901 if (smin_val < 0 || dst_reg->smin_value < 0) { 4902 /* Ain't nobody got time to multiply that sign */ 4903 __mark_reg_unbounded(dst_reg); 4904 __update_reg_bounds(dst_reg); 4905 break; 4906 } 4907 /* Both values are positive, so we can work with unsigned and 4908 * copy the result to signed (unless it exceeds S64_MAX). 4909 */ 4910 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { 4911 /* Potential overflow, we know nothing */ 4912 __mark_reg_unbounded(dst_reg); 4913 /* (except what we can learn from the var_off) */ 4914 __update_reg_bounds(dst_reg); 4915 break; 4916 } 4917 dst_reg->umin_value *= umin_val; 4918 dst_reg->umax_value *= umax_val; 4919 if (dst_reg->umax_value > S64_MAX) { 4920 /* Overflow possible, we know nothing */ 4921 dst_reg->smin_value = S64_MIN; 4922 dst_reg->smax_value = S64_MAX; 4923 } else { 4924 dst_reg->smin_value = dst_reg->umin_value; 4925 dst_reg->smax_value = dst_reg->umax_value; 4926 } 4927 break; 4928 case BPF_AND: 4929 if (src_known && dst_known) { 4930 __mark_reg_known(dst_reg, dst_reg->var_off.value & 4931 src_reg.var_off.value); 4932 break; 4933 } 4934 /* We get our minimum from the var_off, since that's inherently 4935 * bitwise. Our maximum is the minimum of the operands' maxima. 4936 */ 4937 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); 4938 dst_reg->umin_value = dst_reg->var_off.value; 4939 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); 4940 if (dst_reg->smin_value < 0 || smin_val < 0) { 4941 /* Lose signed bounds when ANDing negative numbers, 4942 * ain't nobody got time for that. 4943 */ 4944 dst_reg->smin_value = S64_MIN; 4945 dst_reg->smax_value = S64_MAX; 4946 } else { 4947 /* ANDing two positives gives a positive, so safe to 4948 * cast result into s64. 4949 */ 4950 dst_reg->smin_value = dst_reg->umin_value; 4951 dst_reg->smax_value = dst_reg->umax_value; 4952 } 4953 /* We may learn something more from the var_off */ 4954 __update_reg_bounds(dst_reg); 4955 break; 4956 case BPF_OR: 4957 if (src_known && dst_known) { 4958 __mark_reg_known(dst_reg, dst_reg->var_off.value | 4959 src_reg.var_off.value); 4960 break; 4961 } 4962 /* We get our maximum from the var_off, and our minimum is the 4963 * maximum of the operands' minima 4964 */ 4965 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); 4966 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); 4967 dst_reg->umax_value = dst_reg->var_off.value | 4968 dst_reg->var_off.mask; 4969 if (dst_reg->smin_value < 0 || smin_val < 0) { 4970 /* Lose signed bounds when ORing negative numbers, 4971 * ain't nobody got time for that. 4972 */ 4973 dst_reg->smin_value = S64_MIN; 4974 dst_reg->smax_value = S64_MAX; 4975 } else { 4976 /* ORing two positives gives a positive, so safe to 4977 * cast result into s64. 4978 */ 4979 dst_reg->smin_value = dst_reg->umin_value; 4980 dst_reg->smax_value = dst_reg->umax_value; 4981 } 4982 /* We may learn something more from the var_off */ 4983 __update_reg_bounds(dst_reg); 4984 break; 4985 case BPF_LSH: 4986 if (umax_val >= insn_bitness) { 4987 /* Shifts greater than 31 or 63 are undefined. 4988 * This includes shifts by a negative number. 4989 */ 4990 mark_reg_unknown(env, regs, insn->dst_reg); 4991 break; 4992 } 4993 /* We lose all sign bit information (except what we can pick 4994 * up from var_off) 4995 */ 4996 dst_reg->smin_value = S64_MIN; 4997 dst_reg->smax_value = S64_MAX; 4998 /* If we might shift our top bit out, then we know nothing */ 4999 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { 5000 dst_reg->umin_value = 0; 5001 dst_reg->umax_value = U64_MAX; 5002 } else { 5003 dst_reg->umin_value <<= umin_val; 5004 dst_reg->umax_value <<= umax_val; 5005 } 5006 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); 5007 /* We may learn something more from the var_off */ 5008 __update_reg_bounds(dst_reg); 5009 break; 5010 case BPF_RSH: 5011 if (umax_val >= insn_bitness) { 5012 /* Shifts greater than 31 or 63 are undefined. 5013 * This includes shifts by a negative number. 5014 */ 5015 mark_reg_unknown(env, regs, insn->dst_reg); 5016 break; 5017 } 5018 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 5019 * be negative, then either: 5020 * 1) src_reg might be zero, so the sign bit of the result is 5021 * unknown, so we lose our signed bounds 5022 * 2) it's known negative, thus the unsigned bounds capture the 5023 * signed bounds 5024 * 3) the signed bounds cross zero, so they tell us nothing 5025 * about the result 5026 * If the value in dst_reg is known nonnegative, then again the 5027 * unsigned bounts capture the signed bounds. 5028 * Thus, in all cases it suffices to blow away our signed bounds 5029 * and rely on inferring new ones from the unsigned bounds and 5030 * var_off of the result. 5031 */ 5032 dst_reg->smin_value = S64_MIN; 5033 dst_reg->smax_value = S64_MAX; 5034 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); 5035 dst_reg->umin_value >>= umax_val; 5036 dst_reg->umax_value >>= umin_val; 5037 /* We may learn something more from the var_off */ 5038 __update_reg_bounds(dst_reg); 5039 break; 5040 case BPF_ARSH: 5041 if (umax_val >= insn_bitness) { 5042 /* Shifts greater than 31 or 63 are undefined. 5043 * This includes shifts by a negative number. 5044 */ 5045 mark_reg_unknown(env, regs, insn->dst_reg); 5046 break; 5047 } 5048 5049 /* Upon reaching here, src_known is true and 5050 * umax_val is equal to umin_val. 5051 */ 5052 if (insn_bitness == 32) { 5053 dst_reg->smin_value = (u32)(((s32)dst_reg->smin_value) >> umin_val); 5054 dst_reg->smax_value = (u32)(((s32)dst_reg->smax_value) >> umin_val); 5055 } else { 5056 dst_reg->smin_value >>= umin_val; 5057 dst_reg->smax_value >>= umin_val; 5058 } 5059 5060 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 5061 insn_bitness); 5062 5063 /* blow away the dst_reg umin_value/umax_value and rely on 5064 * dst_reg var_off to refine the result. 5065 */ 5066 dst_reg->umin_value = 0; 5067 dst_reg->umax_value = U64_MAX; 5068 __update_reg_bounds(dst_reg); 5069 break; 5070 default: 5071 mark_reg_unknown(env, regs, insn->dst_reg); 5072 break; 5073 } 5074 5075 if (BPF_CLASS(insn->code) != BPF_ALU64) { 5076 /* 32-bit ALU ops are (32,32)->32 */ 5077 coerce_reg_to_size(dst_reg, 4); 5078 } 5079 5080 __reg_deduce_bounds(dst_reg); 5081 __reg_bound_offset(dst_reg); 5082 return 0; 5083 } 5084 5085 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max 5086 * and var_off. 5087 */ 5088 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, 5089 struct bpf_insn *insn) 5090 { 5091 struct bpf_verifier_state *vstate = env->cur_state; 5092 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 5093 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; 5094 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; 5095 u8 opcode = BPF_OP(insn->code); 5096 int err; 5097 5098 dst_reg = ®s[insn->dst_reg]; 5099 src_reg = NULL; 5100 if (dst_reg->type != SCALAR_VALUE) 5101 ptr_reg = dst_reg; 5102 if (BPF_SRC(insn->code) == BPF_X) { 5103 src_reg = ®s[insn->src_reg]; 5104 if (src_reg->type != SCALAR_VALUE) { 5105 if (dst_reg->type != SCALAR_VALUE) { 5106 /* Combining two pointers by any ALU op yields 5107 * an arbitrary scalar. Disallow all math except 5108 * pointer subtraction 5109 */ 5110 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 5111 mark_reg_unknown(env, regs, insn->dst_reg); 5112 return 0; 5113 } 5114 verbose(env, "R%d pointer %s pointer prohibited\n", 5115 insn->dst_reg, 5116 bpf_alu_string[opcode >> 4]); 5117 return -EACCES; 5118 } else { 5119 /* scalar += pointer 5120 * This is legal, but we have to reverse our 5121 * src/dest handling in computing the range 5122 */ 5123 err = mark_chain_precision(env, insn->dst_reg); 5124 if (err) 5125 return err; 5126 return adjust_ptr_min_max_vals(env, insn, 5127 src_reg, dst_reg); 5128 } 5129 } else if (ptr_reg) { 5130 /* pointer += scalar */ 5131 err = mark_chain_precision(env, insn->src_reg); 5132 if (err) 5133 return err; 5134 return adjust_ptr_min_max_vals(env, insn, 5135 dst_reg, src_reg); 5136 } 5137 } else { 5138 /* Pretend the src is a reg with a known value, since we only 5139 * need to be able to read from this state. 5140 */ 5141 off_reg.type = SCALAR_VALUE; 5142 __mark_reg_known(&off_reg, insn->imm); 5143 src_reg = &off_reg; 5144 if (ptr_reg) /* pointer += K */ 5145 return adjust_ptr_min_max_vals(env, insn, 5146 ptr_reg, src_reg); 5147 } 5148 5149 /* Got here implies adding two SCALAR_VALUEs */ 5150 if (WARN_ON_ONCE(ptr_reg)) { 5151 print_verifier_state(env, state); 5152 verbose(env, "verifier internal error: unexpected ptr_reg\n"); 5153 return -EINVAL; 5154 } 5155 if (WARN_ON(!src_reg)) { 5156 print_verifier_state(env, state); 5157 verbose(env, "verifier internal error: no src_reg\n"); 5158 return -EINVAL; 5159 } 5160 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); 5161 } 5162 5163 /* check validity of 32-bit and 64-bit arithmetic operations */ 5164 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) 5165 { 5166 struct bpf_reg_state *regs = cur_regs(env); 5167 u8 opcode = BPF_OP(insn->code); 5168 int err; 5169 5170 if (opcode == BPF_END || opcode == BPF_NEG) { 5171 if (opcode == BPF_NEG) { 5172 if (BPF_SRC(insn->code) != 0 || 5173 insn->src_reg != BPF_REG_0 || 5174 insn->off != 0 || insn->imm != 0) { 5175 verbose(env, "BPF_NEG uses reserved fields\n"); 5176 return -EINVAL; 5177 } 5178 } else { 5179 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 5180 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || 5181 BPF_CLASS(insn->code) == BPF_ALU64) { 5182 verbose(env, "BPF_END uses reserved fields\n"); 5183 return -EINVAL; 5184 } 5185 } 5186 5187 /* check src operand */ 5188 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 5189 if (err) 5190 return err; 5191 5192 if (is_pointer_value(env, insn->dst_reg)) { 5193 verbose(env, "R%d pointer arithmetic prohibited\n", 5194 insn->dst_reg); 5195 return -EACCES; 5196 } 5197 5198 /* check dest operand */ 5199 err = check_reg_arg(env, insn->dst_reg, DST_OP); 5200 if (err) 5201 return err; 5202 5203 } else if (opcode == BPF_MOV) { 5204 5205 if (BPF_SRC(insn->code) == BPF_X) { 5206 if (insn->imm != 0 || insn->off != 0) { 5207 verbose(env, "BPF_MOV uses reserved fields\n"); 5208 return -EINVAL; 5209 } 5210 5211 /* check src operand */ 5212 err = check_reg_arg(env, insn->src_reg, SRC_OP); 5213 if (err) 5214 return err; 5215 } else { 5216 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 5217 verbose(env, "BPF_MOV uses reserved fields\n"); 5218 return -EINVAL; 5219 } 5220 } 5221 5222 /* check dest operand, mark as required later */ 5223 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 5224 if (err) 5225 return err; 5226 5227 if (BPF_SRC(insn->code) == BPF_X) { 5228 struct bpf_reg_state *src_reg = regs + insn->src_reg; 5229 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; 5230 5231 if (BPF_CLASS(insn->code) == BPF_ALU64) { 5232 /* case: R1 = R2 5233 * copy register state to dest reg 5234 */ 5235 *dst_reg = *src_reg; 5236 dst_reg->live |= REG_LIVE_WRITTEN; 5237 dst_reg->subreg_def = DEF_NOT_SUBREG; 5238 } else { 5239 /* R1 = (u32) R2 */ 5240 if (is_pointer_value(env, insn->src_reg)) { 5241 verbose(env, 5242 "R%d partial copy of pointer\n", 5243 insn->src_reg); 5244 return -EACCES; 5245 } else if (src_reg->type == SCALAR_VALUE) { 5246 *dst_reg = *src_reg; 5247 dst_reg->live |= REG_LIVE_WRITTEN; 5248 dst_reg->subreg_def = env->insn_idx + 1; 5249 } else { 5250 mark_reg_unknown(env, regs, 5251 insn->dst_reg); 5252 } 5253 coerce_reg_to_size(dst_reg, 4); 5254 } 5255 } else { 5256 /* case: R = imm 5257 * remember the value we stored into this reg 5258 */ 5259 /* clear any state __mark_reg_known doesn't set */ 5260 mark_reg_unknown(env, regs, insn->dst_reg); 5261 regs[insn->dst_reg].type = SCALAR_VALUE; 5262 if (BPF_CLASS(insn->code) == BPF_ALU64) { 5263 __mark_reg_known(regs + insn->dst_reg, 5264 insn->imm); 5265 } else { 5266 __mark_reg_known(regs + insn->dst_reg, 5267 (u32)insn->imm); 5268 } 5269 } 5270 5271 } else if (opcode > BPF_END) { 5272 verbose(env, "invalid BPF_ALU opcode %x\n", opcode); 5273 return -EINVAL; 5274 5275 } else { /* all other ALU ops: and, sub, xor, add, ... */ 5276 5277 if (BPF_SRC(insn->code) == BPF_X) { 5278 if (insn->imm != 0 || insn->off != 0) { 5279 verbose(env, "BPF_ALU uses reserved fields\n"); 5280 return -EINVAL; 5281 } 5282 /* check src1 operand */ 5283 err = check_reg_arg(env, insn->src_reg, SRC_OP); 5284 if (err) 5285 return err; 5286 } else { 5287 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 5288 verbose(env, "BPF_ALU uses reserved fields\n"); 5289 return -EINVAL; 5290 } 5291 } 5292 5293 /* check src2 operand */ 5294 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 5295 if (err) 5296 return err; 5297 5298 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 5299 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 5300 verbose(env, "div by zero\n"); 5301 return -EINVAL; 5302 } 5303 5304 if ((opcode == BPF_LSH || opcode == BPF_RSH || 5305 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 5306 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 5307 5308 if (insn->imm < 0 || insn->imm >= size) { 5309 verbose(env, "invalid shift %d\n", insn->imm); 5310 return -EINVAL; 5311 } 5312 } 5313 5314 /* check dest operand */ 5315 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 5316 if (err) 5317 return err; 5318 5319 return adjust_reg_min_max_vals(env, insn); 5320 } 5321 5322 return 0; 5323 } 5324 5325 static void __find_good_pkt_pointers(struct bpf_func_state *state, 5326 struct bpf_reg_state *dst_reg, 5327 enum bpf_reg_type type, u16 new_range) 5328 { 5329 struct bpf_reg_state *reg; 5330 int i; 5331 5332 for (i = 0; i < MAX_BPF_REG; i++) { 5333 reg = &state->regs[i]; 5334 if (reg->type == type && reg->id == dst_reg->id) 5335 /* keep the maximum range already checked */ 5336 reg->range = max(reg->range, new_range); 5337 } 5338 5339 bpf_for_each_spilled_reg(i, state, reg) { 5340 if (!reg) 5341 continue; 5342 if (reg->type == type && reg->id == dst_reg->id) 5343 reg->range = max(reg->range, new_range); 5344 } 5345 } 5346 5347 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, 5348 struct bpf_reg_state *dst_reg, 5349 enum bpf_reg_type type, 5350 bool range_right_open) 5351 { 5352 u16 new_range; 5353 int i; 5354 5355 if (dst_reg->off < 0 || 5356 (dst_reg->off == 0 && range_right_open)) 5357 /* This doesn't give us any range */ 5358 return; 5359 5360 if (dst_reg->umax_value > MAX_PACKET_OFF || 5361 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) 5362 /* Risk of overflow. For instance, ptr + (1<<63) may be less 5363 * than pkt_end, but that's because it's also less than pkt. 5364 */ 5365 return; 5366 5367 new_range = dst_reg->off; 5368 if (range_right_open) 5369 new_range--; 5370 5371 /* Examples for register markings: 5372 * 5373 * pkt_data in dst register: 5374 * 5375 * r2 = r3; 5376 * r2 += 8; 5377 * if (r2 > pkt_end) goto <handle exception> 5378 * <access okay> 5379 * 5380 * r2 = r3; 5381 * r2 += 8; 5382 * if (r2 < pkt_end) goto <access okay> 5383 * <handle exception> 5384 * 5385 * Where: 5386 * r2 == dst_reg, pkt_end == src_reg 5387 * r2=pkt(id=n,off=8,r=0) 5388 * r3=pkt(id=n,off=0,r=0) 5389 * 5390 * pkt_data in src register: 5391 * 5392 * r2 = r3; 5393 * r2 += 8; 5394 * if (pkt_end >= r2) goto <access okay> 5395 * <handle exception> 5396 * 5397 * r2 = r3; 5398 * r2 += 8; 5399 * if (pkt_end <= r2) goto <handle exception> 5400 * <access okay> 5401 * 5402 * Where: 5403 * pkt_end == dst_reg, r2 == src_reg 5404 * r2=pkt(id=n,off=8,r=0) 5405 * r3=pkt(id=n,off=0,r=0) 5406 * 5407 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 5408 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) 5409 * and [r3, r3 + 8-1) respectively is safe to access depending on 5410 * the check. 5411 */ 5412 5413 /* If our ids match, then we must have the same max_value. And we 5414 * don't care about the other reg's fixed offset, since if it's too big 5415 * the range won't allow anything. 5416 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. 5417 */ 5418 for (i = 0; i <= vstate->curframe; i++) 5419 __find_good_pkt_pointers(vstate->frame[i], dst_reg, type, 5420 new_range); 5421 } 5422 5423 /* compute branch direction of the expression "if (reg opcode val) goto target;" 5424 * and return: 5425 * 1 - branch will be taken and "goto target" will be executed 5426 * 0 - branch will not be taken and fall-through to next insn 5427 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10] 5428 */ 5429 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, 5430 bool is_jmp32) 5431 { 5432 struct bpf_reg_state reg_lo; 5433 s64 sval; 5434 5435 if (__is_pointer_value(false, reg)) 5436 return -1; 5437 5438 if (is_jmp32) { 5439 reg_lo = *reg; 5440 reg = ®_lo; 5441 /* For JMP32, only low 32 bits are compared, coerce_reg_to_size 5442 * could truncate high bits and update umin/umax according to 5443 * information of low bits. 5444 */ 5445 coerce_reg_to_size(reg, 4); 5446 /* smin/smax need special handling. For example, after coerce, 5447 * if smin_value is 0x00000000ffffffffLL, the value is -1 when 5448 * used as operand to JMP32. It is a negative number from s32's 5449 * point of view, while it is a positive number when seen as 5450 * s64. The smin/smax are kept as s64, therefore, when used with 5451 * JMP32, they need to be transformed into s32, then sign 5452 * extended back to s64. 5453 * 5454 * Also, smin/smax were copied from umin/umax. If umin/umax has 5455 * different sign bit, then min/max relationship doesn't 5456 * maintain after casting into s32, for this case, set smin/smax 5457 * to safest range. 5458 */ 5459 if ((reg->umax_value ^ reg->umin_value) & 5460 (1ULL << 31)) { 5461 reg->smin_value = S32_MIN; 5462 reg->smax_value = S32_MAX; 5463 } 5464 reg->smin_value = (s64)(s32)reg->smin_value; 5465 reg->smax_value = (s64)(s32)reg->smax_value; 5466 5467 val = (u32)val; 5468 sval = (s64)(s32)val; 5469 } else { 5470 sval = (s64)val; 5471 } 5472 5473 switch (opcode) { 5474 case BPF_JEQ: 5475 if (tnum_is_const(reg->var_off)) 5476 return !!tnum_equals_const(reg->var_off, val); 5477 break; 5478 case BPF_JNE: 5479 if (tnum_is_const(reg->var_off)) 5480 return !tnum_equals_const(reg->var_off, val); 5481 break; 5482 case BPF_JSET: 5483 if ((~reg->var_off.mask & reg->var_off.value) & val) 5484 return 1; 5485 if (!((reg->var_off.mask | reg->var_off.value) & val)) 5486 return 0; 5487 break; 5488 case BPF_JGT: 5489 if (reg->umin_value > val) 5490 return 1; 5491 else if (reg->umax_value <= val) 5492 return 0; 5493 break; 5494 case BPF_JSGT: 5495 if (reg->smin_value > sval) 5496 return 1; 5497 else if (reg->smax_value < sval) 5498 return 0; 5499 break; 5500 case BPF_JLT: 5501 if (reg->umax_value < val) 5502 return 1; 5503 else if (reg->umin_value >= val) 5504 return 0; 5505 break; 5506 case BPF_JSLT: 5507 if (reg->smax_value < sval) 5508 return 1; 5509 else if (reg->smin_value >= sval) 5510 return 0; 5511 break; 5512 case BPF_JGE: 5513 if (reg->umin_value >= val) 5514 return 1; 5515 else if (reg->umax_value < val) 5516 return 0; 5517 break; 5518 case BPF_JSGE: 5519 if (reg->smin_value >= sval) 5520 return 1; 5521 else if (reg->smax_value < sval) 5522 return 0; 5523 break; 5524 case BPF_JLE: 5525 if (reg->umax_value <= val) 5526 return 1; 5527 else if (reg->umin_value > val) 5528 return 0; 5529 break; 5530 case BPF_JSLE: 5531 if (reg->smax_value <= sval) 5532 return 1; 5533 else if (reg->smin_value > sval) 5534 return 0; 5535 break; 5536 } 5537 5538 return -1; 5539 } 5540 5541 /* Generate min value of the high 32-bit from TNUM info. */ 5542 static u64 gen_hi_min(struct tnum var) 5543 { 5544 return var.value & ~0xffffffffULL; 5545 } 5546 5547 /* Generate max value of the high 32-bit from TNUM info. */ 5548 static u64 gen_hi_max(struct tnum var) 5549 { 5550 return (var.value | var.mask) & ~0xffffffffULL; 5551 } 5552 5553 /* Return true if VAL is compared with a s64 sign extended from s32, and they 5554 * are with the same signedness. 5555 */ 5556 static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg) 5557 { 5558 return ((s32)sval >= 0 && 5559 reg->smin_value >= 0 && reg->smax_value <= S32_MAX) || 5560 ((s32)sval < 0 && 5561 reg->smax_value <= 0 && reg->smin_value >= S32_MIN); 5562 } 5563 5564 /* Adjusts the register min/max values in the case that the dst_reg is the 5565 * variable register that we are working on, and src_reg is a constant or we're 5566 * simply doing a BPF_K check. 5567 * In JEQ/JNE cases we also adjust the var_off values. 5568 */ 5569 static void reg_set_min_max(struct bpf_reg_state *true_reg, 5570 struct bpf_reg_state *false_reg, u64 val, 5571 u8 opcode, bool is_jmp32) 5572 { 5573 s64 sval; 5574 5575 /* If the dst_reg is a pointer, we can't learn anything about its 5576 * variable offset from the compare (unless src_reg were a pointer into 5577 * the same object, but we don't bother with that. 5578 * Since false_reg and true_reg have the same type by construction, we 5579 * only need to check one of them for pointerness. 5580 */ 5581 if (__is_pointer_value(false, false_reg)) 5582 return; 5583 5584 val = is_jmp32 ? (u32)val : val; 5585 sval = is_jmp32 ? (s64)(s32)val : (s64)val; 5586 5587 switch (opcode) { 5588 case BPF_JEQ: 5589 case BPF_JNE: 5590 { 5591 struct bpf_reg_state *reg = 5592 opcode == BPF_JEQ ? true_reg : false_reg; 5593 5594 /* For BPF_JEQ, if this is false we know nothing Jon Snow, but 5595 * if it is true we know the value for sure. Likewise for 5596 * BPF_JNE. 5597 */ 5598 if (is_jmp32) { 5599 u64 old_v = reg->var_off.value; 5600 u64 hi_mask = ~0xffffffffULL; 5601 5602 reg->var_off.value = (old_v & hi_mask) | val; 5603 reg->var_off.mask &= hi_mask; 5604 } else { 5605 __mark_reg_known(reg, val); 5606 } 5607 break; 5608 } 5609 case BPF_JSET: 5610 false_reg->var_off = tnum_and(false_reg->var_off, 5611 tnum_const(~val)); 5612 if (is_power_of_2(val)) 5613 true_reg->var_off = tnum_or(true_reg->var_off, 5614 tnum_const(val)); 5615 break; 5616 case BPF_JGE: 5617 case BPF_JGT: 5618 { 5619 u64 false_umax = opcode == BPF_JGT ? val : val - 1; 5620 u64 true_umin = opcode == BPF_JGT ? val + 1 : val; 5621 5622 if (is_jmp32) { 5623 false_umax += gen_hi_max(false_reg->var_off); 5624 true_umin += gen_hi_min(true_reg->var_off); 5625 } 5626 false_reg->umax_value = min(false_reg->umax_value, false_umax); 5627 true_reg->umin_value = max(true_reg->umin_value, true_umin); 5628 break; 5629 } 5630 case BPF_JSGE: 5631 case BPF_JSGT: 5632 { 5633 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; 5634 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval; 5635 5636 /* If the full s64 was not sign-extended from s32 then don't 5637 * deduct further info. 5638 */ 5639 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) 5640 break; 5641 false_reg->smax_value = min(false_reg->smax_value, false_smax); 5642 true_reg->smin_value = max(true_reg->smin_value, true_smin); 5643 break; 5644 } 5645 case BPF_JLE: 5646 case BPF_JLT: 5647 { 5648 u64 false_umin = opcode == BPF_JLT ? val : val + 1; 5649 u64 true_umax = opcode == BPF_JLT ? val - 1 : val; 5650 5651 if (is_jmp32) { 5652 false_umin += gen_hi_min(false_reg->var_off); 5653 true_umax += gen_hi_max(true_reg->var_off); 5654 } 5655 false_reg->umin_value = max(false_reg->umin_value, false_umin); 5656 true_reg->umax_value = min(true_reg->umax_value, true_umax); 5657 break; 5658 } 5659 case BPF_JSLE: 5660 case BPF_JSLT: 5661 { 5662 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; 5663 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval; 5664 5665 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) 5666 break; 5667 false_reg->smin_value = max(false_reg->smin_value, false_smin); 5668 true_reg->smax_value = min(true_reg->smax_value, true_smax); 5669 break; 5670 } 5671 default: 5672 break; 5673 } 5674 5675 __reg_deduce_bounds(false_reg); 5676 __reg_deduce_bounds(true_reg); 5677 /* We might have learned some bits from the bounds. */ 5678 __reg_bound_offset(false_reg); 5679 __reg_bound_offset(true_reg); 5680 if (is_jmp32) { 5681 __reg_bound_offset32(false_reg); 5682 __reg_bound_offset32(true_reg); 5683 } 5684 /* Intersecting with the old var_off might have improved our bounds 5685 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 5686 * then new var_off is (0; 0x7f...fc) which improves our umax. 5687 */ 5688 __update_reg_bounds(false_reg); 5689 __update_reg_bounds(true_reg); 5690 } 5691 5692 /* Same as above, but for the case that dst_reg holds a constant and src_reg is 5693 * the variable reg. 5694 */ 5695 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, 5696 struct bpf_reg_state *false_reg, u64 val, 5697 u8 opcode, bool is_jmp32) 5698 { 5699 s64 sval; 5700 5701 if (__is_pointer_value(false, false_reg)) 5702 return; 5703 5704 val = is_jmp32 ? (u32)val : val; 5705 sval = is_jmp32 ? (s64)(s32)val : (s64)val; 5706 5707 switch (opcode) { 5708 case BPF_JEQ: 5709 case BPF_JNE: 5710 { 5711 struct bpf_reg_state *reg = 5712 opcode == BPF_JEQ ? true_reg : false_reg; 5713 5714 if (is_jmp32) { 5715 u64 old_v = reg->var_off.value; 5716 u64 hi_mask = ~0xffffffffULL; 5717 5718 reg->var_off.value = (old_v & hi_mask) | val; 5719 reg->var_off.mask &= hi_mask; 5720 } else { 5721 __mark_reg_known(reg, val); 5722 } 5723 break; 5724 } 5725 case BPF_JSET: 5726 false_reg->var_off = tnum_and(false_reg->var_off, 5727 tnum_const(~val)); 5728 if (is_power_of_2(val)) 5729 true_reg->var_off = tnum_or(true_reg->var_off, 5730 tnum_const(val)); 5731 break; 5732 case BPF_JGE: 5733 case BPF_JGT: 5734 { 5735 u64 false_umin = opcode == BPF_JGT ? val : val + 1; 5736 u64 true_umax = opcode == BPF_JGT ? val - 1 : val; 5737 5738 if (is_jmp32) { 5739 false_umin += gen_hi_min(false_reg->var_off); 5740 true_umax += gen_hi_max(true_reg->var_off); 5741 } 5742 false_reg->umin_value = max(false_reg->umin_value, false_umin); 5743 true_reg->umax_value = min(true_reg->umax_value, true_umax); 5744 break; 5745 } 5746 case BPF_JSGE: 5747 case BPF_JSGT: 5748 { 5749 s64 false_smin = opcode == BPF_JSGT ? sval : sval + 1; 5750 s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval; 5751 5752 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) 5753 break; 5754 false_reg->smin_value = max(false_reg->smin_value, false_smin); 5755 true_reg->smax_value = min(true_reg->smax_value, true_smax); 5756 break; 5757 } 5758 case BPF_JLE: 5759 case BPF_JLT: 5760 { 5761 u64 false_umax = opcode == BPF_JLT ? val : val - 1; 5762 u64 true_umin = opcode == BPF_JLT ? val + 1 : val; 5763 5764 if (is_jmp32) { 5765 false_umax += gen_hi_max(false_reg->var_off); 5766 true_umin += gen_hi_min(true_reg->var_off); 5767 } 5768 false_reg->umax_value = min(false_reg->umax_value, false_umax); 5769 true_reg->umin_value = max(true_reg->umin_value, true_umin); 5770 break; 5771 } 5772 case BPF_JSLE: 5773 case BPF_JSLT: 5774 { 5775 s64 false_smax = opcode == BPF_JSLT ? sval : sval - 1; 5776 s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval; 5777 5778 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) 5779 break; 5780 false_reg->smax_value = min(false_reg->smax_value, false_smax); 5781 true_reg->smin_value = max(true_reg->smin_value, true_smin); 5782 break; 5783 } 5784 default: 5785 break; 5786 } 5787 5788 __reg_deduce_bounds(false_reg); 5789 __reg_deduce_bounds(true_reg); 5790 /* We might have learned some bits from the bounds. */ 5791 __reg_bound_offset(false_reg); 5792 __reg_bound_offset(true_reg); 5793 if (is_jmp32) { 5794 __reg_bound_offset32(false_reg); 5795 __reg_bound_offset32(true_reg); 5796 } 5797 /* Intersecting with the old var_off might have improved our bounds 5798 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 5799 * then new var_off is (0; 0x7f...fc) which improves our umax. 5800 */ 5801 __update_reg_bounds(false_reg); 5802 __update_reg_bounds(true_reg); 5803 } 5804 5805 /* Regs are known to be equal, so intersect their min/max/var_off */ 5806 static void __reg_combine_min_max(struct bpf_reg_state *src_reg, 5807 struct bpf_reg_state *dst_reg) 5808 { 5809 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, 5810 dst_reg->umin_value); 5811 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, 5812 dst_reg->umax_value); 5813 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, 5814 dst_reg->smin_value); 5815 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, 5816 dst_reg->smax_value); 5817 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, 5818 dst_reg->var_off); 5819 /* We might have learned new bounds from the var_off. */ 5820 __update_reg_bounds(src_reg); 5821 __update_reg_bounds(dst_reg); 5822 /* We might have learned something about the sign bit. */ 5823 __reg_deduce_bounds(src_reg); 5824 __reg_deduce_bounds(dst_reg); 5825 /* We might have learned some bits from the bounds. */ 5826 __reg_bound_offset(src_reg); 5827 __reg_bound_offset(dst_reg); 5828 /* Intersecting with the old var_off might have improved our bounds 5829 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 5830 * then new var_off is (0; 0x7f...fc) which improves our umax. 5831 */ 5832 __update_reg_bounds(src_reg); 5833 __update_reg_bounds(dst_reg); 5834 } 5835 5836 static void reg_combine_min_max(struct bpf_reg_state *true_src, 5837 struct bpf_reg_state *true_dst, 5838 struct bpf_reg_state *false_src, 5839 struct bpf_reg_state *false_dst, 5840 u8 opcode) 5841 { 5842 switch (opcode) { 5843 case BPF_JEQ: 5844 __reg_combine_min_max(true_src, true_dst); 5845 break; 5846 case BPF_JNE: 5847 __reg_combine_min_max(false_src, false_dst); 5848 break; 5849 } 5850 } 5851 5852 static void mark_ptr_or_null_reg(struct bpf_func_state *state, 5853 struct bpf_reg_state *reg, u32 id, 5854 bool is_null) 5855 { 5856 if (reg_type_may_be_null(reg->type) && reg->id == id) { 5857 /* Old offset (both fixed and variable parts) should 5858 * have been known-zero, because we don't allow pointer 5859 * arithmetic on pointers that might be NULL. 5860 */ 5861 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || 5862 !tnum_equals_const(reg->var_off, 0) || 5863 reg->off)) { 5864 __mark_reg_known_zero(reg); 5865 reg->off = 0; 5866 } 5867 if (is_null) { 5868 reg->type = SCALAR_VALUE; 5869 } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 5870 if (reg->map_ptr->inner_map_meta) { 5871 reg->type = CONST_PTR_TO_MAP; 5872 reg->map_ptr = reg->map_ptr->inner_map_meta; 5873 } else if (reg->map_ptr->map_type == 5874 BPF_MAP_TYPE_XSKMAP) { 5875 reg->type = PTR_TO_XDP_SOCK; 5876 } else { 5877 reg->type = PTR_TO_MAP_VALUE; 5878 } 5879 } else if (reg->type == PTR_TO_SOCKET_OR_NULL) { 5880 reg->type = PTR_TO_SOCKET; 5881 } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) { 5882 reg->type = PTR_TO_SOCK_COMMON; 5883 } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) { 5884 reg->type = PTR_TO_TCP_SOCK; 5885 } 5886 if (is_null) { 5887 /* We don't need id and ref_obj_id from this point 5888 * onwards anymore, thus we should better reset it, 5889 * so that state pruning has chances to take effect. 5890 */ 5891 reg->id = 0; 5892 reg->ref_obj_id = 0; 5893 } else if (!reg_may_point_to_spin_lock(reg)) { 5894 /* For not-NULL ptr, reg->ref_obj_id will be reset 5895 * in release_reg_references(). 5896 * 5897 * reg->id is still used by spin_lock ptr. Other 5898 * than spin_lock ptr type, reg->id can be reset. 5899 */ 5900 reg->id = 0; 5901 } 5902 } 5903 } 5904 5905 static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id, 5906 bool is_null) 5907 { 5908 struct bpf_reg_state *reg; 5909 int i; 5910 5911 for (i = 0; i < MAX_BPF_REG; i++) 5912 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null); 5913 5914 bpf_for_each_spilled_reg(i, state, reg) { 5915 if (!reg) 5916 continue; 5917 mark_ptr_or_null_reg(state, reg, id, is_null); 5918 } 5919 } 5920 5921 /* The logic is similar to find_good_pkt_pointers(), both could eventually 5922 * be folded together at some point. 5923 */ 5924 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, 5925 bool is_null) 5926 { 5927 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 5928 struct bpf_reg_state *regs = state->regs; 5929 u32 ref_obj_id = regs[regno].ref_obj_id; 5930 u32 id = regs[regno].id; 5931 int i; 5932 5933 if (ref_obj_id && ref_obj_id == id && is_null) 5934 /* regs[regno] is in the " == NULL" branch. 5935 * No one could have freed the reference state before 5936 * doing the NULL check. 5937 */ 5938 WARN_ON_ONCE(release_reference_state(state, id)); 5939 5940 for (i = 0; i <= vstate->curframe; i++) 5941 __mark_ptr_or_null_regs(vstate->frame[i], id, is_null); 5942 } 5943 5944 static bool try_match_pkt_pointers(const struct bpf_insn *insn, 5945 struct bpf_reg_state *dst_reg, 5946 struct bpf_reg_state *src_reg, 5947 struct bpf_verifier_state *this_branch, 5948 struct bpf_verifier_state *other_branch) 5949 { 5950 if (BPF_SRC(insn->code) != BPF_X) 5951 return false; 5952 5953 /* Pointers are always 64-bit. */ 5954 if (BPF_CLASS(insn->code) == BPF_JMP32) 5955 return false; 5956 5957 switch (BPF_OP(insn->code)) { 5958 case BPF_JGT: 5959 if ((dst_reg->type == PTR_TO_PACKET && 5960 src_reg->type == PTR_TO_PACKET_END) || 5961 (dst_reg->type == PTR_TO_PACKET_META && 5962 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 5963 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ 5964 find_good_pkt_pointers(this_branch, dst_reg, 5965 dst_reg->type, false); 5966 } else if ((dst_reg->type == PTR_TO_PACKET_END && 5967 src_reg->type == PTR_TO_PACKET) || 5968 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 5969 src_reg->type == PTR_TO_PACKET_META)) { 5970 /* pkt_end > pkt_data', pkt_data > pkt_meta' */ 5971 find_good_pkt_pointers(other_branch, src_reg, 5972 src_reg->type, true); 5973 } else { 5974 return false; 5975 } 5976 break; 5977 case BPF_JLT: 5978 if ((dst_reg->type == PTR_TO_PACKET && 5979 src_reg->type == PTR_TO_PACKET_END) || 5980 (dst_reg->type == PTR_TO_PACKET_META && 5981 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 5982 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ 5983 find_good_pkt_pointers(other_branch, dst_reg, 5984 dst_reg->type, true); 5985 } else if ((dst_reg->type == PTR_TO_PACKET_END && 5986 src_reg->type == PTR_TO_PACKET) || 5987 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 5988 src_reg->type == PTR_TO_PACKET_META)) { 5989 /* pkt_end < pkt_data', pkt_data > pkt_meta' */ 5990 find_good_pkt_pointers(this_branch, src_reg, 5991 src_reg->type, false); 5992 } else { 5993 return false; 5994 } 5995 break; 5996 case BPF_JGE: 5997 if ((dst_reg->type == PTR_TO_PACKET && 5998 src_reg->type == PTR_TO_PACKET_END) || 5999 (dst_reg->type == PTR_TO_PACKET_META && 6000 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 6001 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ 6002 find_good_pkt_pointers(this_branch, dst_reg, 6003 dst_reg->type, true); 6004 } else if ((dst_reg->type == PTR_TO_PACKET_END && 6005 src_reg->type == PTR_TO_PACKET) || 6006 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 6007 src_reg->type == PTR_TO_PACKET_META)) { 6008 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ 6009 find_good_pkt_pointers(other_branch, src_reg, 6010 src_reg->type, false); 6011 } else { 6012 return false; 6013 } 6014 break; 6015 case BPF_JLE: 6016 if ((dst_reg->type == PTR_TO_PACKET && 6017 src_reg->type == PTR_TO_PACKET_END) || 6018 (dst_reg->type == PTR_TO_PACKET_META && 6019 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 6020 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ 6021 find_good_pkt_pointers(other_branch, dst_reg, 6022 dst_reg->type, false); 6023 } else if ((dst_reg->type == PTR_TO_PACKET_END && 6024 src_reg->type == PTR_TO_PACKET) || 6025 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 6026 src_reg->type == PTR_TO_PACKET_META)) { 6027 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ 6028 find_good_pkt_pointers(this_branch, src_reg, 6029 src_reg->type, true); 6030 } else { 6031 return false; 6032 } 6033 break; 6034 default: 6035 return false; 6036 } 6037 6038 return true; 6039 } 6040 6041 static int check_cond_jmp_op(struct bpf_verifier_env *env, 6042 struct bpf_insn *insn, int *insn_idx) 6043 { 6044 struct bpf_verifier_state *this_branch = env->cur_state; 6045 struct bpf_verifier_state *other_branch; 6046 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; 6047 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; 6048 u8 opcode = BPF_OP(insn->code); 6049 bool is_jmp32; 6050 int pred = -1; 6051 int err; 6052 6053 /* Only conditional jumps are expected to reach here. */ 6054 if (opcode == BPF_JA || opcode > BPF_JSLE) { 6055 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); 6056 return -EINVAL; 6057 } 6058 6059 if (BPF_SRC(insn->code) == BPF_X) { 6060 if (insn->imm != 0) { 6061 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 6062 return -EINVAL; 6063 } 6064 6065 /* check src1 operand */ 6066 err = check_reg_arg(env, insn->src_reg, SRC_OP); 6067 if (err) 6068 return err; 6069 6070 if (is_pointer_value(env, insn->src_reg)) { 6071 verbose(env, "R%d pointer comparison prohibited\n", 6072 insn->src_reg); 6073 return -EACCES; 6074 } 6075 src_reg = ®s[insn->src_reg]; 6076 } else { 6077 if (insn->src_reg != BPF_REG_0) { 6078 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 6079 return -EINVAL; 6080 } 6081 } 6082 6083 /* check src2 operand */ 6084 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 6085 if (err) 6086 return err; 6087 6088 dst_reg = ®s[insn->dst_reg]; 6089 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 6090 6091 if (BPF_SRC(insn->code) == BPF_K) 6092 pred = is_branch_taken(dst_reg, insn->imm, 6093 opcode, is_jmp32); 6094 else if (src_reg->type == SCALAR_VALUE && 6095 tnum_is_const(src_reg->var_off)) 6096 pred = is_branch_taken(dst_reg, src_reg->var_off.value, 6097 opcode, is_jmp32); 6098 if (pred >= 0) { 6099 err = mark_chain_precision(env, insn->dst_reg); 6100 if (BPF_SRC(insn->code) == BPF_X && !err) 6101 err = mark_chain_precision(env, insn->src_reg); 6102 if (err) 6103 return err; 6104 } 6105 if (pred == 1) { 6106 /* only follow the goto, ignore fall-through */ 6107 *insn_idx += insn->off; 6108 return 0; 6109 } else if (pred == 0) { 6110 /* only follow fall-through branch, since 6111 * that's where the program will go 6112 */ 6113 return 0; 6114 } 6115 6116 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, 6117 false); 6118 if (!other_branch) 6119 return -EFAULT; 6120 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; 6121 6122 /* detect if we are comparing against a constant value so we can adjust 6123 * our min/max values for our dst register. 6124 * this is only legit if both are scalars (or pointers to the same 6125 * object, I suppose, but we don't support that right now), because 6126 * otherwise the different base pointers mean the offsets aren't 6127 * comparable. 6128 */ 6129 if (BPF_SRC(insn->code) == BPF_X) { 6130 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 6131 struct bpf_reg_state lo_reg0 = *dst_reg; 6132 struct bpf_reg_state lo_reg1 = *src_reg; 6133 struct bpf_reg_state *src_lo, *dst_lo; 6134 6135 dst_lo = &lo_reg0; 6136 src_lo = &lo_reg1; 6137 coerce_reg_to_size(dst_lo, 4); 6138 coerce_reg_to_size(src_lo, 4); 6139 6140 if (dst_reg->type == SCALAR_VALUE && 6141 src_reg->type == SCALAR_VALUE) { 6142 if (tnum_is_const(src_reg->var_off) || 6143 (is_jmp32 && tnum_is_const(src_lo->var_off))) 6144 reg_set_min_max(&other_branch_regs[insn->dst_reg], 6145 dst_reg, 6146 is_jmp32 6147 ? src_lo->var_off.value 6148 : src_reg->var_off.value, 6149 opcode, is_jmp32); 6150 else if (tnum_is_const(dst_reg->var_off) || 6151 (is_jmp32 && tnum_is_const(dst_lo->var_off))) 6152 reg_set_min_max_inv(&other_branch_regs[insn->src_reg], 6153 src_reg, 6154 is_jmp32 6155 ? dst_lo->var_off.value 6156 : dst_reg->var_off.value, 6157 opcode, is_jmp32); 6158 else if (!is_jmp32 && 6159 (opcode == BPF_JEQ || opcode == BPF_JNE)) 6160 /* Comparing for equality, we can combine knowledge */ 6161 reg_combine_min_max(&other_branch_regs[insn->src_reg], 6162 &other_branch_regs[insn->dst_reg], 6163 src_reg, dst_reg, opcode); 6164 } 6165 } else if (dst_reg->type == SCALAR_VALUE) { 6166 reg_set_min_max(&other_branch_regs[insn->dst_reg], 6167 dst_reg, insn->imm, opcode, is_jmp32); 6168 } 6169 6170 /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). 6171 * NOTE: these optimizations below are related with pointer comparison 6172 * which will never be JMP32. 6173 */ 6174 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && 6175 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 6176 reg_type_may_be_null(dst_reg->type)) { 6177 /* Mark all identical registers in each branch as either 6178 * safe or unknown depending R == 0 or R != 0 conditional. 6179 */ 6180 mark_ptr_or_null_regs(this_branch, insn->dst_reg, 6181 opcode == BPF_JNE); 6182 mark_ptr_or_null_regs(other_branch, insn->dst_reg, 6183 opcode == BPF_JEQ); 6184 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], 6185 this_branch, other_branch) && 6186 is_pointer_value(env, insn->dst_reg)) { 6187 verbose(env, "R%d pointer comparison prohibited\n", 6188 insn->dst_reg); 6189 return -EACCES; 6190 } 6191 if (env->log.level & BPF_LOG_LEVEL) 6192 print_verifier_state(env, this_branch->frame[this_branch->curframe]); 6193 return 0; 6194 } 6195 6196 /* verify BPF_LD_IMM64 instruction */ 6197 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) 6198 { 6199 struct bpf_insn_aux_data *aux = cur_aux(env); 6200 struct bpf_reg_state *regs = cur_regs(env); 6201 struct bpf_map *map; 6202 int err; 6203 6204 if (BPF_SIZE(insn->code) != BPF_DW) { 6205 verbose(env, "invalid BPF_LD_IMM insn\n"); 6206 return -EINVAL; 6207 } 6208 if (insn->off != 0) { 6209 verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); 6210 return -EINVAL; 6211 } 6212 6213 err = check_reg_arg(env, insn->dst_reg, DST_OP); 6214 if (err) 6215 return err; 6216 6217 if (insn->src_reg == 0) { 6218 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 6219 6220 regs[insn->dst_reg].type = SCALAR_VALUE; 6221 __mark_reg_known(®s[insn->dst_reg], imm); 6222 return 0; 6223 } 6224 6225 map = env->used_maps[aux->map_index]; 6226 mark_reg_known_zero(env, regs, insn->dst_reg); 6227 regs[insn->dst_reg].map_ptr = map; 6228 6229 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) { 6230 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; 6231 regs[insn->dst_reg].off = aux->map_off; 6232 if (map_value_has_spin_lock(map)) 6233 regs[insn->dst_reg].id = ++env->id_gen; 6234 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) { 6235 regs[insn->dst_reg].type = CONST_PTR_TO_MAP; 6236 } else { 6237 verbose(env, "bpf verifier is misconfigured\n"); 6238 return -EINVAL; 6239 } 6240 6241 return 0; 6242 } 6243 6244 static bool may_access_skb(enum bpf_prog_type type) 6245 { 6246 switch (type) { 6247 case BPF_PROG_TYPE_SOCKET_FILTER: 6248 case BPF_PROG_TYPE_SCHED_CLS: 6249 case BPF_PROG_TYPE_SCHED_ACT: 6250 return true; 6251 default: 6252 return false; 6253 } 6254 } 6255 6256 /* verify safety of LD_ABS|LD_IND instructions: 6257 * - they can only appear in the programs where ctx == skb 6258 * - since they are wrappers of function calls, they scratch R1-R5 registers, 6259 * preserve R6-R9, and store return value into R0 6260 * 6261 * Implicit input: 6262 * ctx == skb == R6 == CTX 6263 * 6264 * Explicit input: 6265 * SRC == any register 6266 * IMM == 32-bit immediate 6267 * 6268 * Output: 6269 * R0 - 8/16/32-bit skb data converted to cpu endianness 6270 */ 6271 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 6272 { 6273 struct bpf_reg_state *regs = cur_regs(env); 6274 static const int ctx_reg = BPF_REG_6; 6275 u8 mode = BPF_MODE(insn->code); 6276 int i, err; 6277 6278 if (!may_access_skb(env->prog->type)) { 6279 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 6280 return -EINVAL; 6281 } 6282 6283 if (!env->ops->gen_ld_abs) { 6284 verbose(env, "bpf verifier is misconfigured\n"); 6285 return -EINVAL; 6286 } 6287 6288 if (env->subprog_cnt > 1) { 6289 /* when program has LD_ABS insn JITs and interpreter assume 6290 * that r1 == ctx == skb which is not the case for callees 6291 * that can have arbitrary arguments. It's problematic 6292 * for main prog as well since JITs would need to analyze 6293 * all functions in order to make proper register save/restore 6294 * decisions in the main prog. Hence disallow LD_ABS with calls 6295 */ 6296 verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n"); 6297 return -EINVAL; 6298 } 6299 6300 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 6301 BPF_SIZE(insn->code) == BPF_DW || 6302 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 6303 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); 6304 return -EINVAL; 6305 } 6306 6307 /* check whether implicit source operand (register R6) is readable */ 6308 err = check_reg_arg(env, ctx_reg, SRC_OP); 6309 if (err) 6310 return err; 6311 6312 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as 6313 * gen_ld_abs() may terminate the program at runtime, leading to 6314 * reference leak. 6315 */ 6316 err = check_reference_leak(env); 6317 if (err) { 6318 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); 6319 return err; 6320 } 6321 6322 if (env->cur_state->active_spin_lock) { 6323 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); 6324 return -EINVAL; 6325 } 6326 6327 if (regs[ctx_reg].type != PTR_TO_CTX) { 6328 verbose(env, 6329 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 6330 return -EINVAL; 6331 } 6332 6333 if (mode == BPF_IND) { 6334 /* check explicit source operand */ 6335 err = check_reg_arg(env, insn->src_reg, SRC_OP); 6336 if (err) 6337 return err; 6338 } 6339 6340 err = check_ctx_reg(env, ®s[ctx_reg], ctx_reg); 6341 if (err < 0) 6342 return err; 6343 6344 /* reset caller saved regs to unreadable */ 6345 for (i = 0; i < CALLER_SAVED_REGS; i++) { 6346 mark_reg_not_init(env, regs, caller_saved[i]); 6347 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 6348 } 6349 6350 /* mark destination R0 register as readable, since it contains 6351 * the value fetched from the packet. 6352 * Already marked as written above. 6353 */ 6354 mark_reg_unknown(env, regs, BPF_REG_0); 6355 /* ld_abs load up to 32-bit skb data. */ 6356 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; 6357 return 0; 6358 } 6359 6360 static int check_return_code(struct bpf_verifier_env *env) 6361 { 6362 struct tnum enforce_attach_type_range = tnum_unknown; 6363 struct bpf_reg_state *reg; 6364 struct tnum range = tnum_range(0, 1); 6365 6366 switch (env->prog->type) { 6367 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 6368 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || 6369 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG) 6370 range = tnum_range(1, 1); 6371 break; 6372 case BPF_PROG_TYPE_CGROUP_SKB: 6373 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { 6374 range = tnum_range(0, 3); 6375 enforce_attach_type_range = tnum_range(2, 3); 6376 } 6377 break; 6378 case BPF_PROG_TYPE_CGROUP_SOCK: 6379 case BPF_PROG_TYPE_SOCK_OPS: 6380 case BPF_PROG_TYPE_CGROUP_DEVICE: 6381 case BPF_PROG_TYPE_CGROUP_SYSCTL: 6382 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 6383 break; 6384 case BPF_PROG_TYPE_RAW_TRACEPOINT: 6385 if (!env->prog->aux->attach_btf_id) 6386 return 0; 6387 range = tnum_const(0); 6388 break; 6389 default: 6390 return 0; 6391 } 6392 6393 reg = cur_regs(env) + BPF_REG_0; 6394 if (reg->type != SCALAR_VALUE) { 6395 verbose(env, "At program exit the register R0 is not a known value (%s)\n", 6396 reg_type_str[reg->type]); 6397 return -EINVAL; 6398 } 6399 6400 if (!tnum_in(range, reg->var_off)) { 6401 char tn_buf[48]; 6402 6403 verbose(env, "At program exit the register R0 "); 6404 if (!tnum_is_unknown(reg->var_off)) { 6405 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 6406 verbose(env, "has value %s", tn_buf); 6407 } else { 6408 verbose(env, "has unknown scalar value"); 6409 } 6410 tnum_strn(tn_buf, sizeof(tn_buf), range); 6411 verbose(env, " should have been in %s\n", tn_buf); 6412 return -EINVAL; 6413 } 6414 6415 if (!tnum_is_unknown(enforce_attach_type_range) && 6416 tnum_in(enforce_attach_type_range, reg->var_off)) 6417 env->prog->enforce_expected_attach_type = 1; 6418 return 0; 6419 } 6420 6421 /* non-recursive DFS pseudo code 6422 * 1 procedure DFS-iterative(G,v): 6423 * 2 label v as discovered 6424 * 3 let S be a stack 6425 * 4 S.push(v) 6426 * 5 while S is not empty 6427 * 6 t <- S.pop() 6428 * 7 if t is what we're looking for: 6429 * 8 return t 6430 * 9 for all edges e in G.adjacentEdges(t) do 6431 * 10 if edge e is already labelled 6432 * 11 continue with the next edge 6433 * 12 w <- G.adjacentVertex(t,e) 6434 * 13 if vertex w is not discovered and not explored 6435 * 14 label e as tree-edge 6436 * 15 label w as discovered 6437 * 16 S.push(w) 6438 * 17 continue at 5 6439 * 18 else if vertex w is discovered 6440 * 19 label e as back-edge 6441 * 20 else 6442 * 21 // vertex w is explored 6443 * 22 label e as forward- or cross-edge 6444 * 23 label t as explored 6445 * 24 S.pop() 6446 * 6447 * convention: 6448 * 0x10 - discovered 6449 * 0x11 - discovered and fall-through edge labelled 6450 * 0x12 - discovered and fall-through and branch edges labelled 6451 * 0x20 - explored 6452 */ 6453 6454 enum { 6455 DISCOVERED = 0x10, 6456 EXPLORED = 0x20, 6457 FALLTHROUGH = 1, 6458 BRANCH = 2, 6459 }; 6460 6461 static u32 state_htab_size(struct bpf_verifier_env *env) 6462 { 6463 return env->prog->len; 6464 } 6465 6466 static struct bpf_verifier_state_list **explored_state( 6467 struct bpf_verifier_env *env, 6468 int idx) 6469 { 6470 struct bpf_verifier_state *cur = env->cur_state; 6471 struct bpf_func_state *state = cur->frame[cur->curframe]; 6472 6473 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; 6474 } 6475 6476 static void init_explored_state(struct bpf_verifier_env *env, int idx) 6477 { 6478 env->insn_aux_data[idx].prune_point = true; 6479 } 6480 6481 /* t, w, e - match pseudo-code above: 6482 * t - index of current instruction 6483 * w - next instruction 6484 * e - edge 6485 */ 6486 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, 6487 bool loop_ok) 6488 { 6489 int *insn_stack = env->cfg.insn_stack; 6490 int *insn_state = env->cfg.insn_state; 6491 6492 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 6493 return 0; 6494 6495 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 6496 return 0; 6497 6498 if (w < 0 || w >= env->prog->len) { 6499 verbose_linfo(env, t, "%d: ", t); 6500 verbose(env, "jump out of range from insn %d to %d\n", t, w); 6501 return -EINVAL; 6502 } 6503 6504 if (e == BRANCH) 6505 /* mark branch target for state pruning */ 6506 init_explored_state(env, w); 6507 6508 if (insn_state[w] == 0) { 6509 /* tree-edge */ 6510 insn_state[t] = DISCOVERED | e; 6511 insn_state[w] = DISCOVERED; 6512 if (env->cfg.cur_stack >= env->prog->len) 6513 return -E2BIG; 6514 insn_stack[env->cfg.cur_stack++] = w; 6515 return 1; 6516 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 6517 if (loop_ok && env->allow_ptr_leaks) 6518 return 0; 6519 verbose_linfo(env, t, "%d: ", t); 6520 verbose_linfo(env, w, "%d: ", w); 6521 verbose(env, "back-edge from insn %d to %d\n", t, w); 6522 return -EINVAL; 6523 } else if (insn_state[w] == EXPLORED) { 6524 /* forward- or cross-edge */ 6525 insn_state[t] = DISCOVERED | e; 6526 } else { 6527 verbose(env, "insn state internal bug\n"); 6528 return -EFAULT; 6529 } 6530 return 0; 6531 } 6532 6533 /* non-recursive depth-first-search to detect loops in BPF program 6534 * loop == back-edge in directed graph 6535 */ 6536 static int check_cfg(struct bpf_verifier_env *env) 6537 { 6538 struct bpf_insn *insns = env->prog->insnsi; 6539 int insn_cnt = env->prog->len; 6540 int *insn_stack, *insn_state; 6541 int ret = 0; 6542 int i, t; 6543 6544 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 6545 if (!insn_state) 6546 return -ENOMEM; 6547 6548 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 6549 if (!insn_stack) { 6550 kvfree(insn_state); 6551 return -ENOMEM; 6552 } 6553 6554 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 6555 insn_stack[0] = 0; /* 0 is the first instruction */ 6556 env->cfg.cur_stack = 1; 6557 6558 peek_stack: 6559 if (env->cfg.cur_stack == 0) 6560 goto check_state; 6561 t = insn_stack[env->cfg.cur_stack - 1]; 6562 6563 if (BPF_CLASS(insns[t].code) == BPF_JMP || 6564 BPF_CLASS(insns[t].code) == BPF_JMP32) { 6565 u8 opcode = BPF_OP(insns[t].code); 6566 6567 if (opcode == BPF_EXIT) { 6568 goto mark_explored; 6569 } else if (opcode == BPF_CALL) { 6570 ret = push_insn(t, t + 1, FALLTHROUGH, env, false); 6571 if (ret == 1) 6572 goto peek_stack; 6573 else if (ret < 0) 6574 goto err_free; 6575 if (t + 1 < insn_cnt) 6576 init_explored_state(env, t + 1); 6577 if (insns[t].src_reg == BPF_PSEUDO_CALL) { 6578 init_explored_state(env, t); 6579 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, 6580 env, false); 6581 if (ret == 1) 6582 goto peek_stack; 6583 else if (ret < 0) 6584 goto err_free; 6585 } 6586 } else if (opcode == BPF_JA) { 6587 if (BPF_SRC(insns[t].code) != BPF_K) { 6588 ret = -EINVAL; 6589 goto err_free; 6590 } 6591 /* unconditional jump with single edge */ 6592 ret = push_insn(t, t + insns[t].off + 1, 6593 FALLTHROUGH, env, true); 6594 if (ret == 1) 6595 goto peek_stack; 6596 else if (ret < 0) 6597 goto err_free; 6598 /* unconditional jmp is not a good pruning point, 6599 * but it's marked, since backtracking needs 6600 * to record jmp history in is_state_visited(). 6601 */ 6602 init_explored_state(env, t + insns[t].off + 1); 6603 /* tell verifier to check for equivalent states 6604 * after every call and jump 6605 */ 6606 if (t + 1 < insn_cnt) 6607 init_explored_state(env, t + 1); 6608 } else { 6609 /* conditional jump with two edges */ 6610 init_explored_state(env, t); 6611 ret = push_insn(t, t + 1, FALLTHROUGH, env, true); 6612 if (ret == 1) 6613 goto peek_stack; 6614 else if (ret < 0) 6615 goto err_free; 6616 6617 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true); 6618 if (ret == 1) 6619 goto peek_stack; 6620 else if (ret < 0) 6621 goto err_free; 6622 } 6623 } else { 6624 /* all other non-branch instructions with single 6625 * fall-through edge 6626 */ 6627 ret = push_insn(t, t + 1, FALLTHROUGH, env, false); 6628 if (ret == 1) 6629 goto peek_stack; 6630 else if (ret < 0) 6631 goto err_free; 6632 } 6633 6634 mark_explored: 6635 insn_state[t] = EXPLORED; 6636 if (env->cfg.cur_stack-- <= 0) { 6637 verbose(env, "pop stack internal bug\n"); 6638 ret = -EFAULT; 6639 goto err_free; 6640 } 6641 goto peek_stack; 6642 6643 check_state: 6644 for (i = 0; i < insn_cnt; i++) { 6645 if (insn_state[i] != EXPLORED) { 6646 verbose(env, "unreachable insn %d\n", i); 6647 ret = -EINVAL; 6648 goto err_free; 6649 } 6650 } 6651 ret = 0; /* cfg looks good */ 6652 6653 err_free: 6654 kvfree(insn_state); 6655 kvfree(insn_stack); 6656 env->cfg.insn_state = env->cfg.insn_stack = NULL; 6657 return ret; 6658 } 6659 6660 /* The minimum supported BTF func info size */ 6661 #define MIN_BPF_FUNCINFO_SIZE 8 6662 #define MAX_FUNCINFO_REC_SIZE 252 6663 6664 static int check_btf_func(struct bpf_verifier_env *env, 6665 const union bpf_attr *attr, 6666 union bpf_attr __user *uattr) 6667 { 6668 u32 i, nfuncs, urec_size, min_size; 6669 u32 krec_size = sizeof(struct bpf_func_info); 6670 struct bpf_func_info *krecord; 6671 struct bpf_func_info_aux *info_aux = NULL; 6672 const struct btf_type *type; 6673 struct bpf_prog *prog; 6674 const struct btf *btf; 6675 void __user *urecord; 6676 u32 prev_offset = 0; 6677 int ret = 0; 6678 6679 nfuncs = attr->func_info_cnt; 6680 if (!nfuncs) 6681 return 0; 6682 6683 if (nfuncs != env->subprog_cnt) { 6684 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n"); 6685 return -EINVAL; 6686 } 6687 6688 urec_size = attr->func_info_rec_size; 6689 if (urec_size < MIN_BPF_FUNCINFO_SIZE || 6690 urec_size > MAX_FUNCINFO_REC_SIZE || 6691 urec_size % sizeof(u32)) { 6692 verbose(env, "invalid func info rec size %u\n", urec_size); 6693 return -EINVAL; 6694 } 6695 6696 prog = env->prog; 6697 btf = prog->aux->btf; 6698 6699 urecord = u64_to_user_ptr(attr->func_info); 6700 min_size = min_t(u32, krec_size, urec_size); 6701 6702 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); 6703 if (!krecord) 6704 return -ENOMEM; 6705 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN); 6706 if (!info_aux) 6707 goto err_free; 6708 6709 for (i = 0; i < nfuncs; i++) { 6710 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size); 6711 if (ret) { 6712 if (ret == -E2BIG) { 6713 verbose(env, "nonzero tailing record in func info"); 6714 /* set the size kernel expects so loader can zero 6715 * out the rest of the record. 6716 */ 6717 if (put_user(min_size, &uattr->func_info_rec_size)) 6718 ret = -EFAULT; 6719 } 6720 goto err_free; 6721 } 6722 6723 if (copy_from_user(&krecord[i], urecord, min_size)) { 6724 ret = -EFAULT; 6725 goto err_free; 6726 } 6727 6728 /* check insn_off */ 6729 if (i == 0) { 6730 if (krecord[i].insn_off) { 6731 verbose(env, 6732 "nonzero insn_off %u for the first func info record", 6733 krecord[i].insn_off); 6734 ret = -EINVAL; 6735 goto err_free; 6736 } 6737 } else if (krecord[i].insn_off <= prev_offset) { 6738 verbose(env, 6739 "same or smaller insn offset (%u) than previous func info record (%u)", 6740 krecord[i].insn_off, prev_offset); 6741 ret = -EINVAL; 6742 goto err_free; 6743 } 6744 6745 if (env->subprog_info[i].start != krecord[i].insn_off) { 6746 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); 6747 ret = -EINVAL; 6748 goto err_free; 6749 } 6750 6751 /* check type_id */ 6752 type = btf_type_by_id(btf, krecord[i].type_id); 6753 if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) { 6754 verbose(env, "invalid type id %d in func info", 6755 krecord[i].type_id); 6756 ret = -EINVAL; 6757 goto err_free; 6758 } 6759 prev_offset = krecord[i].insn_off; 6760 urecord += urec_size; 6761 } 6762 6763 prog->aux->func_info = krecord; 6764 prog->aux->func_info_cnt = nfuncs; 6765 prog->aux->func_info_aux = info_aux; 6766 return 0; 6767 6768 err_free: 6769 kvfree(krecord); 6770 kfree(info_aux); 6771 return ret; 6772 } 6773 6774 static void adjust_btf_func(struct bpf_verifier_env *env) 6775 { 6776 struct bpf_prog_aux *aux = env->prog->aux; 6777 int i; 6778 6779 if (!aux->func_info) 6780 return; 6781 6782 for (i = 0; i < env->subprog_cnt; i++) 6783 aux->func_info[i].insn_off = env->subprog_info[i].start; 6784 } 6785 6786 #define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \ 6787 sizeof(((struct bpf_line_info *)(0))->line_col)) 6788 #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE 6789 6790 static int check_btf_line(struct bpf_verifier_env *env, 6791 const union bpf_attr *attr, 6792 union bpf_attr __user *uattr) 6793 { 6794 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0; 6795 struct bpf_subprog_info *sub; 6796 struct bpf_line_info *linfo; 6797 struct bpf_prog *prog; 6798 const struct btf *btf; 6799 void __user *ulinfo; 6800 int err; 6801 6802 nr_linfo = attr->line_info_cnt; 6803 if (!nr_linfo) 6804 return 0; 6805 6806 rec_size = attr->line_info_rec_size; 6807 if (rec_size < MIN_BPF_LINEINFO_SIZE || 6808 rec_size > MAX_LINEINFO_REC_SIZE || 6809 rec_size & (sizeof(u32) - 1)) 6810 return -EINVAL; 6811 6812 /* Need to zero it in case the userspace may 6813 * pass in a smaller bpf_line_info object. 6814 */ 6815 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info), 6816 GFP_KERNEL | __GFP_NOWARN); 6817 if (!linfo) 6818 return -ENOMEM; 6819 6820 prog = env->prog; 6821 btf = prog->aux->btf; 6822 6823 s = 0; 6824 sub = env->subprog_info; 6825 ulinfo = u64_to_user_ptr(attr->line_info); 6826 expected_size = sizeof(struct bpf_line_info); 6827 ncopy = min_t(u32, expected_size, rec_size); 6828 for (i = 0; i < nr_linfo; i++) { 6829 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size); 6830 if (err) { 6831 if (err == -E2BIG) { 6832 verbose(env, "nonzero tailing record in line_info"); 6833 if (put_user(expected_size, 6834 &uattr->line_info_rec_size)) 6835 err = -EFAULT; 6836 } 6837 goto err_free; 6838 } 6839 6840 if (copy_from_user(&linfo[i], ulinfo, ncopy)) { 6841 err = -EFAULT; 6842 goto err_free; 6843 } 6844 6845 /* 6846 * Check insn_off to ensure 6847 * 1) strictly increasing AND 6848 * 2) bounded by prog->len 6849 * 6850 * The linfo[0].insn_off == 0 check logically falls into 6851 * the later "missing bpf_line_info for func..." case 6852 * because the first linfo[0].insn_off must be the 6853 * first sub also and the first sub must have 6854 * subprog_info[0].start == 0. 6855 */ 6856 if ((i && linfo[i].insn_off <= prev_offset) || 6857 linfo[i].insn_off >= prog->len) { 6858 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", 6859 i, linfo[i].insn_off, prev_offset, 6860 prog->len); 6861 err = -EINVAL; 6862 goto err_free; 6863 } 6864 6865 if (!prog->insnsi[linfo[i].insn_off].code) { 6866 verbose(env, 6867 "Invalid insn code at line_info[%u].insn_off\n", 6868 i); 6869 err = -EINVAL; 6870 goto err_free; 6871 } 6872 6873 if (!btf_name_by_offset(btf, linfo[i].line_off) || 6874 !btf_name_by_offset(btf, linfo[i].file_name_off)) { 6875 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); 6876 err = -EINVAL; 6877 goto err_free; 6878 } 6879 6880 if (s != env->subprog_cnt) { 6881 if (linfo[i].insn_off == sub[s].start) { 6882 sub[s].linfo_idx = i; 6883 s++; 6884 } else if (sub[s].start < linfo[i].insn_off) { 6885 verbose(env, "missing bpf_line_info for func#%u\n", s); 6886 err = -EINVAL; 6887 goto err_free; 6888 } 6889 } 6890 6891 prev_offset = linfo[i].insn_off; 6892 ulinfo += rec_size; 6893 } 6894 6895 if (s != env->subprog_cnt) { 6896 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n", 6897 env->subprog_cnt - s, s); 6898 err = -EINVAL; 6899 goto err_free; 6900 } 6901 6902 prog->aux->linfo = linfo; 6903 prog->aux->nr_linfo = nr_linfo; 6904 6905 return 0; 6906 6907 err_free: 6908 kvfree(linfo); 6909 return err; 6910 } 6911 6912 static int check_btf_info(struct bpf_verifier_env *env, 6913 const union bpf_attr *attr, 6914 union bpf_attr __user *uattr) 6915 { 6916 struct btf *btf; 6917 int err; 6918 6919 if (!attr->func_info_cnt && !attr->line_info_cnt) 6920 return 0; 6921 6922 btf = btf_get_by_fd(attr->prog_btf_fd); 6923 if (IS_ERR(btf)) 6924 return PTR_ERR(btf); 6925 env->prog->aux->btf = btf; 6926 6927 err = check_btf_func(env, attr, uattr); 6928 if (err) 6929 return err; 6930 6931 err = check_btf_line(env, attr, uattr); 6932 if (err) 6933 return err; 6934 6935 return 0; 6936 } 6937 6938 /* check %cur's range satisfies %old's */ 6939 static bool range_within(struct bpf_reg_state *old, 6940 struct bpf_reg_state *cur) 6941 { 6942 return old->umin_value <= cur->umin_value && 6943 old->umax_value >= cur->umax_value && 6944 old->smin_value <= cur->smin_value && 6945 old->smax_value >= cur->smax_value; 6946 } 6947 6948 /* Maximum number of register states that can exist at once */ 6949 #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) 6950 struct idpair { 6951 u32 old; 6952 u32 cur; 6953 }; 6954 6955 /* If in the old state two registers had the same id, then they need to have 6956 * the same id in the new state as well. But that id could be different from 6957 * the old state, so we need to track the mapping from old to new ids. 6958 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent 6959 * regs with old id 5 must also have new id 9 for the new state to be safe. But 6960 * regs with a different old id could still have new id 9, we don't care about 6961 * that. 6962 * So we look through our idmap to see if this old id has been seen before. If 6963 * so, we require the new id to match; otherwise, we add the id pair to the map. 6964 */ 6965 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) 6966 { 6967 unsigned int i; 6968 6969 for (i = 0; i < ID_MAP_SIZE; i++) { 6970 if (!idmap[i].old) { 6971 /* Reached an empty slot; haven't seen this id before */ 6972 idmap[i].old = old_id; 6973 idmap[i].cur = cur_id; 6974 return true; 6975 } 6976 if (idmap[i].old == old_id) 6977 return idmap[i].cur == cur_id; 6978 } 6979 /* We ran out of idmap slots, which should be impossible */ 6980 WARN_ON_ONCE(1); 6981 return false; 6982 } 6983 6984 static void clean_func_state(struct bpf_verifier_env *env, 6985 struct bpf_func_state *st) 6986 { 6987 enum bpf_reg_liveness live; 6988 int i, j; 6989 6990 for (i = 0; i < BPF_REG_FP; i++) { 6991 live = st->regs[i].live; 6992 /* liveness must not touch this register anymore */ 6993 st->regs[i].live |= REG_LIVE_DONE; 6994 if (!(live & REG_LIVE_READ)) 6995 /* since the register is unused, clear its state 6996 * to make further comparison simpler 6997 */ 6998 __mark_reg_not_init(env, &st->regs[i]); 6999 } 7000 7001 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { 7002 live = st->stack[i].spilled_ptr.live; 7003 /* liveness must not touch this stack slot anymore */ 7004 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; 7005 if (!(live & REG_LIVE_READ)) { 7006 __mark_reg_not_init(env, &st->stack[i].spilled_ptr); 7007 for (j = 0; j < BPF_REG_SIZE; j++) 7008 st->stack[i].slot_type[j] = STACK_INVALID; 7009 } 7010 } 7011 } 7012 7013 static void clean_verifier_state(struct bpf_verifier_env *env, 7014 struct bpf_verifier_state *st) 7015 { 7016 int i; 7017 7018 if (st->frame[0]->regs[0].live & REG_LIVE_DONE) 7019 /* all regs in this state in all frames were already marked */ 7020 return; 7021 7022 for (i = 0; i <= st->curframe; i++) 7023 clean_func_state(env, st->frame[i]); 7024 } 7025 7026 /* the parentage chains form a tree. 7027 * the verifier states are added to state lists at given insn and 7028 * pushed into state stack for future exploration. 7029 * when the verifier reaches bpf_exit insn some of the verifer states 7030 * stored in the state lists have their final liveness state already, 7031 * but a lot of states will get revised from liveness point of view when 7032 * the verifier explores other branches. 7033 * Example: 7034 * 1: r0 = 1 7035 * 2: if r1 == 100 goto pc+1 7036 * 3: r0 = 2 7037 * 4: exit 7038 * when the verifier reaches exit insn the register r0 in the state list of 7039 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch 7040 * of insn 2 and goes exploring further. At the insn 4 it will walk the 7041 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ. 7042 * 7043 * Since the verifier pushes the branch states as it sees them while exploring 7044 * the program the condition of walking the branch instruction for the second 7045 * time means that all states below this branch were already explored and 7046 * their final liveness markes are already propagated. 7047 * Hence when the verifier completes the search of state list in is_state_visited() 7048 * we can call this clean_live_states() function to mark all liveness states 7049 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state' 7050 * will not be used. 7051 * This function also clears the registers and stack for states that !READ 7052 * to simplify state merging. 7053 * 7054 * Important note here that walking the same branch instruction in the callee 7055 * doesn't meant that the states are DONE. The verifier has to compare 7056 * the callsites 7057 */ 7058 static void clean_live_states(struct bpf_verifier_env *env, int insn, 7059 struct bpf_verifier_state *cur) 7060 { 7061 struct bpf_verifier_state_list *sl; 7062 int i; 7063 7064 sl = *explored_state(env, insn); 7065 while (sl) { 7066 if (sl->state.branches) 7067 goto next; 7068 if (sl->state.insn_idx != insn || 7069 sl->state.curframe != cur->curframe) 7070 goto next; 7071 for (i = 0; i <= cur->curframe; i++) 7072 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) 7073 goto next; 7074 clean_verifier_state(env, &sl->state); 7075 next: 7076 sl = sl->next; 7077 } 7078 } 7079 7080 /* Returns true if (rold safe implies rcur safe) */ 7081 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, 7082 struct idpair *idmap) 7083 { 7084 bool equal; 7085 7086 if (!(rold->live & REG_LIVE_READ)) 7087 /* explored state didn't use this */ 7088 return true; 7089 7090 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0; 7091 7092 if (rold->type == PTR_TO_STACK) 7093 /* two stack pointers are equal only if they're pointing to 7094 * the same stack frame, since fp-8 in foo != fp-8 in bar 7095 */ 7096 return equal && rold->frameno == rcur->frameno; 7097 7098 if (equal) 7099 return true; 7100 7101 if (rold->type == NOT_INIT) 7102 /* explored state can't have used this */ 7103 return true; 7104 if (rcur->type == NOT_INIT) 7105 return false; 7106 switch (rold->type) { 7107 case SCALAR_VALUE: 7108 if (rcur->type == SCALAR_VALUE) { 7109 if (!rold->precise && !rcur->precise) 7110 return true; 7111 /* new val must satisfy old val knowledge */ 7112 return range_within(rold, rcur) && 7113 tnum_in(rold->var_off, rcur->var_off); 7114 } else { 7115 /* We're trying to use a pointer in place of a scalar. 7116 * Even if the scalar was unbounded, this could lead to 7117 * pointer leaks because scalars are allowed to leak 7118 * while pointers are not. We could make this safe in 7119 * special cases if root is calling us, but it's 7120 * probably not worth the hassle. 7121 */ 7122 return false; 7123 } 7124 case PTR_TO_MAP_VALUE: 7125 /* If the new min/max/var_off satisfy the old ones and 7126 * everything else matches, we are OK. 7127 * 'id' is not compared, since it's only used for maps with 7128 * bpf_spin_lock inside map element and in such cases if 7129 * the rest of the prog is valid for one map element then 7130 * it's valid for all map elements regardless of the key 7131 * used in bpf_map_lookup() 7132 */ 7133 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 7134 range_within(rold, rcur) && 7135 tnum_in(rold->var_off, rcur->var_off); 7136 case PTR_TO_MAP_VALUE_OR_NULL: 7137 /* a PTR_TO_MAP_VALUE could be safe to use as a 7138 * PTR_TO_MAP_VALUE_OR_NULL into the same map. 7139 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- 7140 * checked, doing so could have affected others with the same 7141 * id, and we can't check for that because we lost the id when 7142 * we converted to a PTR_TO_MAP_VALUE. 7143 */ 7144 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) 7145 return false; 7146 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) 7147 return false; 7148 /* Check our ids match any regs they're supposed to */ 7149 return check_ids(rold->id, rcur->id, idmap); 7150 case PTR_TO_PACKET_META: 7151 case PTR_TO_PACKET: 7152 if (rcur->type != rold->type) 7153 return false; 7154 /* We must have at least as much range as the old ptr 7155 * did, so that any accesses which were safe before are 7156 * still safe. This is true even if old range < old off, 7157 * since someone could have accessed through (ptr - k), or 7158 * even done ptr -= k in a register, to get a safe access. 7159 */ 7160 if (rold->range > rcur->range) 7161 return false; 7162 /* If the offsets don't match, we can't trust our alignment; 7163 * nor can we be sure that we won't fall out of range. 7164 */ 7165 if (rold->off != rcur->off) 7166 return false; 7167 /* id relations must be preserved */ 7168 if (rold->id && !check_ids(rold->id, rcur->id, idmap)) 7169 return false; 7170 /* new val must satisfy old val knowledge */ 7171 return range_within(rold, rcur) && 7172 tnum_in(rold->var_off, rcur->var_off); 7173 case PTR_TO_CTX: 7174 case CONST_PTR_TO_MAP: 7175 case PTR_TO_PACKET_END: 7176 case PTR_TO_FLOW_KEYS: 7177 case PTR_TO_SOCKET: 7178 case PTR_TO_SOCKET_OR_NULL: 7179 case PTR_TO_SOCK_COMMON: 7180 case PTR_TO_SOCK_COMMON_OR_NULL: 7181 case PTR_TO_TCP_SOCK: 7182 case PTR_TO_TCP_SOCK_OR_NULL: 7183 case PTR_TO_XDP_SOCK: 7184 /* Only valid matches are exact, which memcmp() above 7185 * would have accepted 7186 */ 7187 default: 7188 /* Don't know what's going on, just say it's not safe */ 7189 return false; 7190 } 7191 7192 /* Shouldn't get here; if we do, say it's not safe */ 7193 WARN_ON_ONCE(1); 7194 return false; 7195 } 7196 7197 static bool stacksafe(struct bpf_func_state *old, 7198 struct bpf_func_state *cur, 7199 struct idpair *idmap) 7200 { 7201 int i, spi; 7202 7203 /* walk slots of the explored stack and ignore any additional 7204 * slots in the current stack, since explored(safe) state 7205 * didn't use them 7206 */ 7207 for (i = 0; i < old->allocated_stack; i++) { 7208 spi = i / BPF_REG_SIZE; 7209 7210 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) { 7211 i += BPF_REG_SIZE - 1; 7212 /* explored state didn't use this */ 7213 continue; 7214 } 7215 7216 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) 7217 continue; 7218 7219 /* explored stack has more populated slots than current stack 7220 * and these slots were used 7221 */ 7222 if (i >= cur->allocated_stack) 7223 return false; 7224 7225 /* if old state was safe with misc data in the stack 7226 * it will be safe with zero-initialized stack. 7227 * The opposite is not true 7228 */ 7229 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && 7230 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) 7231 continue; 7232 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != 7233 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) 7234 /* Ex: old explored (safe) state has STACK_SPILL in 7235 * this stack slot, but current has has STACK_MISC -> 7236 * this verifier states are not equivalent, 7237 * return false to continue verification of this path 7238 */ 7239 return false; 7240 if (i % BPF_REG_SIZE) 7241 continue; 7242 if (old->stack[spi].slot_type[0] != STACK_SPILL) 7243 continue; 7244 if (!regsafe(&old->stack[spi].spilled_ptr, 7245 &cur->stack[spi].spilled_ptr, 7246 idmap)) 7247 /* when explored and current stack slot are both storing 7248 * spilled registers, check that stored pointers types 7249 * are the same as well. 7250 * Ex: explored safe path could have stored 7251 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} 7252 * but current path has stored: 7253 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} 7254 * such verifier states are not equivalent. 7255 * return false to continue verification of this path 7256 */ 7257 return false; 7258 } 7259 return true; 7260 } 7261 7262 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur) 7263 { 7264 if (old->acquired_refs != cur->acquired_refs) 7265 return false; 7266 return !memcmp(old->refs, cur->refs, 7267 sizeof(*old->refs) * old->acquired_refs); 7268 } 7269 7270 /* compare two verifier states 7271 * 7272 * all states stored in state_list are known to be valid, since 7273 * verifier reached 'bpf_exit' instruction through them 7274 * 7275 * this function is called when verifier exploring different branches of 7276 * execution popped from the state stack. If it sees an old state that has 7277 * more strict register state and more strict stack state then this execution 7278 * branch doesn't need to be explored further, since verifier already 7279 * concluded that more strict state leads to valid finish. 7280 * 7281 * Therefore two states are equivalent if register state is more conservative 7282 * and explored stack state is more conservative than the current one. 7283 * Example: 7284 * explored current 7285 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 7286 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 7287 * 7288 * In other words if current stack state (one being explored) has more 7289 * valid slots than old one that already passed validation, it means 7290 * the verifier can stop exploring and conclude that current state is valid too 7291 * 7292 * Similarly with registers. If explored state has register type as invalid 7293 * whereas register type in current state is meaningful, it means that 7294 * the current state will reach 'bpf_exit' instruction safely 7295 */ 7296 static bool func_states_equal(struct bpf_func_state *old, 7297 struct bpf_func_state *cur) 7298 { 7299 struct idpair *idmap; 7300 bool ret = false; 7301 int i; 7302 7303 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); 7304 /* If we failed to allocate the idmap, just say it's not safe */ 7305 if (!idmap) 7306 return false; 7307 7308 for (i = 0; i < MAX_BPF_REG; i++) { 7309 if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) 7310 goto out_free; 7311 } 7312 7313 if (!stacksafe(old, cur, idmap)) 7314 goto out_free; 7315 7316 if (!refsafe(old, cur)) 7317 goto out_free; 7318 ret = true; 7319 out_free: 7320 kfree(idmap); 7321 return ret; 7322 } 7323 7324 static bool states_equal(struct bpf_verifier_env *env, 7325 struct bpf_verifier_state *old, 7326 struct bpf_verifier_state *cur) 7327 { 7328 int i; 7329 7330 if (old->curframe != cur->curframe) 7331 return false; 7332 7333 /* Verification state from speculative execution simulation 7334 * must never prune a non-speculative execution one. 7335 */ 7336 if (old->speculative && !cur->speculative) 7337 return false; 7338 7339 if (old->active_spin_lock != cur->active_spin_lock) 7340 return false; 7341 7342 /* for states to be equal callsites have to be the same 7343 * and all frame states need to be equivalent 7344 */ 7345 for (i = 0; i <= old->curframe; i++) { 7346 if (old->frame[i]->callsite != cur->frame[i]->callsite) 7347 return false; 7348 if (!func_states_equal(old->frame[i], cur->frame[i])) 7349 return false; 7350 } 7351 return true; 7352 } 7353 7354 /* Return 0 if no propagation happened. Return negative error code if error 7355 * happened. Otherwise, return the propagated bit. 7356 */ 7357 static int propagate_liveness_reg(struct bpf_verifier_env *env, 7358 struct bpf_reg_state *reg, 7359 struct bpf_reg_state *parent_reg) 7360 { 7361 u8 parent_flag = parent_reg->live & REG_LIVE_READ; 7362 u8 flag = reg->live & REG_LIVE_READ; 7363 int err; 7364 7365 /* When comes here, read flags of PARENT_REG or REG could be any of 7366 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need 7367 * of propagation if PARENT_REG has strongest REG_LIVE_READ64. 7368 */ 7369 if (parent_flag == REG_LIVE_READ64 || 7370 /* Or if there is no read flag from REG. */ 7371 !flag || 7372 /* Or if the read flag from REG is the same as PARENT_REG. */ 7373 parent_flag == flag) 7374 return 0; 7375 7376 err = mark_reg_read(env, reg, parent_reg, flag); 7377 if (err) 7378 return err; 7379 7380 return flag; 7381 } 7382 7383 /* A write screens off any subsequent reads; but write marks come from the 7384 * straight-line code between a state and its parent. When we arrive at an 7385 * equivalent state (jump target or such) we didn't arrive by the straight-line 7386 * code, so read marks in the state must propagate to the parent regardless 7387 * of the state's write marks. That's what 'parent == state->parent' comparison 7388 * in mark_reg_read() is for. 7389 */ 7390 static int propagate_liveness(struct bpf_verifier_env *env, 7391 const struct bpf_verifier_state *vstate, 7392 struct bpf_verifier_state *vparent) 7393 { 7394 struct bpf_reg_state *state_reg, *parent_reg; 7395 struct bpf_func_state *state, *parent; 7396 int i, frame, err = 0; 7397 7398 if (vparent->curframe != vstate->curframe) { 7399 WARN(1, "propagate_live: parent frame %d current frame %d\n", 7400 vparent->curframe, vstate->curframe); 7401 return -EFAULT; 7402 } 7403 /* Propagate read liveness of registers... */ 7404 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 7405 for (frame = 0; frame <= vstate->curframe; frame++) { 7406 parent = vparent->frame[frame]; 7407 state = vstate->frame[frame]; 7408 parent_reg = parent->regs; 7409 state_reg = state->regs; 7410 /* We don't need to worry about FP liveness, it's read-only */ 7411 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { 7412 err = propagate_liveness_reg(env, &state_reg[i], 7413 &parent_reg[i]); 7414 if (err < 0) 7415 return err; 7416 if (err == REG_LIVE_READ64) 7417 mark_insn_zext(env, &parent_reg[i]); 7418 } 7419 7420 /* Propagate stack slots. */ 7421 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && 7422 i < parent->allocated_stack / BPF_REG_SIZE; i++) { 7423 parent_reg = &parent->stack[i].spilled_ptr; 7424 state_reg = &state->stack[i].spilled_ptr; 7425 err = propagate_liveness_reg(env, state_reg, 7426 parent_reg); 7427 if (err < 0) 7428 return err; 7429 } 7430 } 7431 return 0; 7432 } 7433 7434 /* find precise scalars in the previous equivalent state and 7435 * propagate them into the current state 7436 */ 7437 static int propagate_precision(struct bpf_verifier_env *env, 7438 const struct bpf_verifier_state *old) 7439 { 7440 struct bpf_reg_state *state_reg; 7441 struct bpf_func_state *state; 7442 int i, err = 0; 7443 7444 state = old->frame[old->curframe]; 7445 state_reg = state->regs; 7446 for (i = 0; i < BPF_REG_FP; i++, state_reg++) { 7447 if (state_reg->type != SCALAR_VALUE || 7448 !state_reg->precise) 7449 continue; 7450 if (env->log.level & BPF_LOG_LEVEL2) 7451 verbose(env, "propagating r%d\n", i); 7452 err = mark_chain_precision(env, i); 7453 if (err < 0) 7454 return err; 7455 } 7456 7457 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 7458 if (state->stack[i].slot_type[0] != STACK_SPILL) 7459 continue; 7460 state_reg = &state->stack[i].spilled_ptr; 7461 if (state_reg->type != SCALAR_VALUE || 7462 !state_reg->precise) 7463 continue; 7464 if (env->log.level & BPF_LOG_LEVEL2) 7465 verbose(env, "propagating fp%d\n", 7466 (-i - 1) * BPF_REG_SIZE); 7467 err = mark_chain_precision_stack(env, i); 7468 if (err < 0) 7469 return err; 7470 } 7471 return 0; 7472 } 7473 7474 static bool states_maybe_looping(struct bpf_verifier_state *old, 7475 struct bpf_verifier_state *cur) 7476 { 7477 struct bpf_func_state *fold, *fcur; 7478 int i, fr = cur->curframe; 7479 7480 if (old->curframe != fr) 7481 return false; 7482 7483 fold = old->frame[fr]; 7484 fcur = cur->frame[fr]; 7485 for (i = 0; i < MAX_BPF_REG; i++) 7486 if (memcmp(&fold->regs[i], &fcur->regs[i], 7487 offsetof(struct bpf_reg_state, parent))) 7488 return false; 7489 return true; 7490 } 7491 7492 7493 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) 7494 { 7495 struct bpf_verifier_state_list *new_sl; 7496 struct bpf_verifier_state_list *sl, **pprev; 7497 struct bpf_verifier_state *cur = env->cur_state, *new; 7498 int i, j, err, states_cnt = 0; 7499 bool add_new_state = env->test_state_freq ? true : false; 7500 7501 cur->last_insn_idx = env->prev_insn_idx; 7502 if (!env->insn_aux_data[insn_idx].prune_point) 7503 /* this 'insn_idx' instruction wasn't marked, so we will not 7504 * be doing state search here 7505 */ 7506 return 0; 7507 7508 /* bpf progs typically have pruning point every 4 instructions 7509 * http://vger.kernel.org/bpfconf2019.html#session-1 7510 * Do not add new state for future pruning if the verifier hasn't seen 7511 * at least 2 jumps and at least 8 instructions. 7512 * This heuristics helps decrease 'total_states' and 'peak_states' metric. 7513 * In tests that amounts to up to 50% reduction into total verifier 7514 * memory consumption and 20% verifier time speedup. 7515 */ 7516 if (env->jmps_processed - env->prev_jmps_processed >= 2 && 7517 env->insn_processed - env->prev_insn_processed >= 8) 7518 add_new_state = true; 7519 7520 pprev = explored_state(env, insn_idx); 7521 sl = *pprev; 7522 7523 clean_live_states(env, insn_idx, cur); 7524 7525 while (sl) { 7526 states_cnt++; 7527 if (sl->state.insn_idx != insn_idx) 7528 goto next; 7529 if (sl->state.branches) { 7530 if (states_maybe_looping(&sl->state, cur) && 7531 states_equal(env, &sl->state, cur)) { 7532 verbose_linfo(env, insn_idx, "; "); 7533 verbose(env, "infinite loop detected at insn %d\n", insn_idx); 7534 return -EINVAL; 7535 } 7536 /* if the verifier is processing a loop, avoid adding new state 7537 * too often, since different loop iterations have distinct 7538 * states and may not help future pruning. 7539 * This threshold shouldn't be too low to make sure that 7540 * a loop with large bound will be rejected quickly. 7541 * The most abusive loop will be: 7542 * r1 += 1 7543 * if r1 < 1000000 goto pc-2 7544 * 1M insn_procssed limit / 100 == 10k peak states. 7545 * This threshold shouldn't be too high either, since states 7546 * at the end of the loop are likely to be useful in pruning. 7547 */ 7548 if (env->jmps_processed - env->prev_jmps_processed < 20 && 7549 env->insn_processed - env->prev_insn_processed < 100) 7550 add_new_state = false; 7551 goto miss; 7552 } 7553 if (states_equal(env, &sl->state, cur)) { 7554 sl->hit_cnt++; 7555 /* reached equivalent register/stack state, 7556 * prune the search. 7557 * Registers read by the continuation are read by us. 7558 * If we have any write marks in env->cur_state, they 7559 * will prevent corresponding reads in the continuation 7560 * from reaching our parent (an explored_state). Our 7561 * own state will get the read marks recorded, but 7562 * they'll be immediately forgotten as we're pruning 7563 * this state and will pop a new one. 7564 */ 7565 err = propagate_liveness(env, &sl->state, cur); 7566 7567 /* if previous state reached the exit with precision and 7568 * current state is equivalent to it (except precsion marks) 7569 * the precision needs to be propagated back in 7570 * the current state. 7571 */ 7572 err = err ? : push_jmp_history(env, cur); 7573 err = err ? : propagate_precision(env, &sl->state); 7574 if (err) 7575 return err; 7576 return 1; 7577 } 7578 miss: 7579 /* when new state is not going to be added do not increase miss count. 7580 * Otherwise several loop iterations will remove the state 7581 * recorded earlier. The goal of these heuristics is to have 7582 * states from some iterations of the loop (some in the beginning 7583 * and some at the end) to help pruning. 7584 */ 7585 if (add_new_state) 7586 sl->miss_cnt++; 7587 /* heuristic to determine whether this state is beneficial 7588 * to keep checking from state equivalence point of view. 7589 * Higher numbers increase max_states_per_insn and verification time, 7590 * but do not meaningfully decrease insn_processed. 7591 */ 7592 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) { 7593 /* the state is unlikely to be useful. Remove it to 7594 * speed up verification 7595 */ 7596 *pprev = sl->next; 7597 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) { 7598 u32 br = sl->state.branches; 7599 7600 WARN_ONCE(br, 7601 "BUG live_done but branches_to_explore %d\n", 7602 br); 7603 free_verifier_state(&sl->state, false); 7604 kfree(sl); 7605 env->peak_states--; 7606 } else { 7607 /* cannot free this state, since parentage chain may 7608 * walk it later. Add it for free_list instead to 7609 * be freed at the end of verification 7610 */ 7611 sl->next = env->free_list; 7612 env->free_list = sl; 7613 } 7614 sl = *pprev; 7615 continue; 7616 } 7617 next: 7618 pprev = &sl->next; 7619 sl = *pprev; 7620 } 7621 7622 if (env->max_states_per_insn < states_cnt) 7623 env->max_states_per_insn = states_cnt; 7624 7625 if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) 7626 return push_jmp_history(env, cur); 7627 7628 if (!add_new_state) 7629 return push_jmp_history(env, cur); 7630 7631 /* There were no equivalent states, remember the current one. 7632 * Technically the current state is not proven to be safe yet, 7633 * but it will either reach outer most bpf_exit (which means it's safe) 7634 * or it will be rejected. When there are no loops the verifier won't be 7635 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) 7636 * again on the way to bpf_exit. 7637 * When looping the sl->state.branches will be > 0 and this state 7638 * will not be considered for equivalence until branches == 0. 7639 */ 7640 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); 7641 if (!new_sl) 7642 return -ENOMEM; 7643 env->total_states++; 7644 env->peak_states++; 7645 env->prev_jmps_processed = env->jmps_processed; 7646 env->prev_insn_processed = env->insn_processed; 7647 7648 /* add new state to the head of linked list */ 7649 new = &new_sl->state; 7650 err = copy_verifier_state(new, cur); 7651 if (err) { 7652 free_verifier_state(new, false); 7653 kfree(new_sl); 7654 return err; 7655 } 7656 new->insn_idx = insn_idx; 7657 WARN_ONCE(new->branches != 1, 7658 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); 7659 7660 cur->parent = new; 7661 cur->first_insn_idx = insn_idx; 7662 clear_jmp_history(cur); 7663 new_sl->next = *explored_state(env, insn_idx); 7664 *explored_state(env, insn_idx) = new_sl; 7665 /* connect new state to parentage chain. Current frame needs all 7666 * registers connected. Only r6 - r9 of the callers are alive (pushed 7667 * to the stack implicitly by JITs) so in callers' frames connect just 7668 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to 7669 * the state of the call instruction (with WRITTEN set), and r0 comes 7670 * from callee with its full parentage chain, anyway. 7671 */ 7672 /* clear write marks in current state: the writes we did are not writes 7673 * our child did, so they don't screen off its reads from us. 7674 * (There are no read marks in current state, because reads always mark 7675 * their parent and current state never has children yet. Only 7676 * explored_states can get read marks.) 7677 */ 7678 for (j = 0; j <= cur->curframe; j++) { 7679 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) 7680 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; 7681 for (i = 0; i < BPF_REG_FP; i++) 7682 cur->frame[j]->regs[i].live = REG_LIVE_NONE; 7683 } 7684 7685 /* all stack frames are accessible from callee, clear them all */ 7686 for (j = 0; j <= cur->curframe; j++) { 7687 struct bpf_func_state *frame = cur->frame[j]; 7688 struct bpf_func_state *newframe = new->frame[j]; 7689 7690 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { 7691 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; 7692 frame->stack[i].spilled_ptr.parent = 7693 &newframe->stack[i].spilled_ptr; 7694 } 7695 } 7696 return 0; 7697 } 7698 7699 /* Return true if it's OK to have the same insn return a different type. */ 7700 static bool reg_type_mismatch_ok(enum bpf_reg_type type) 7701 { 7702 switch (type) { 7703 case PTR_TO_CTX: 7704 case PTR_TO_SOCKET: 7705 case PTR_TO_SOCKET_OR_NULL: 7706 case PTR_TO_SOCK_COMMON: 7707 case PTR_TO_SOCK_COMMON_OR_NULL: 7708 case PTR_TO_TCP_SOCK: 7709 case PTR_TO_TCP_SOCK_OR_NULL: 7710 case PTR_TO_XDP_SOCK: 7711 case PTR_TO_BTF_ID: 7712 return false; 7713 default: 7714 return true; 7715 } 7716 } 7717 7718 /* If an instruction was previously used with particular pointer types, then we 7719 * need to be careful to avoid cases such as the below, where it may be ok 7720 * for one branch accessing the pointer, but not ok for the other branch: 7721 * 7722 * R1 = sock_ptr 7723 * goto X; 7724 * ... 7725 * R1 = some_other_valid_ptr; 7726 * goto X; 7727 * ... 7728 * R2 = *(u32 *)(R1 + 0); 7729 */ 7730 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) 7731 { 7732 return src != prev && (!reg_type_mismatch_ok(src) || 7733 !reg_type_mismatch_ok(prev)); 7734 } 7735 7736 static int do_check(struct bpf_verifier_env *env) 7737 { 7738 struct bpf_verifier_state *state; 7739 struct bpf_insn *insns = env->prog->insnsi; 7740 struct bpf_reg_state *regs; 7741 int insn_cnt = env->prog->len; 7742 bool do_print_state = false; 7743 int prev_insn_idx = -1; 7744 7745 env->prev_linfo = NULL; 7746 7747 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); 7748 if (!state) 7749 return -ENOMEM; 7750 state->curframe = 0; 7751 state->speculative = false; 7752 state->branches = 1; 7753 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); 7754 if (!state->frame[0]) { 7755 kfree(state); 7756 return -ENOMEM; 7757 } 7758 env->cur_state = state; 7759 init_func_state(env, state->frame[0], 7760 BPF_MAIN_FUNC /* callsite */, 7761 0 /* frameno */, 7762 0 /* subprogno, zero == main subprog */); 7763 7764 if (btf_check_func_arg_match(env, 0)) 7765 return -EINVAL; 7766 7767 for (;;) { 7768 struct bpf_insn *insn; 7769 u8 class; 7770 int err; 7771 7772 env->prev_insn_idx = prev_insn_idx; 7773 if (env->insn_idx >= insn_cnt) { 7774 verbose(env, "invalid insn idx %d insn_cnt %d\n", 7775 env->insn_idx, insn_cnt); 7776 return -EFAULT; 7777 } 7778 7779 insn = &insns[env->insn_idx]; 7780 class = BPF_CLASS(insn->code); 7781 7782 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 7783 verbose(env, 7784 "BPF program is too large. Processed %d insn\n", 7785 env->insn_processed); 7786 return -E2BIG; 7787 } 7788 7789 err = is_state_visited(env, env->insn_idx); 7790 if (err < 0) 7791 return err; 7792 if (err == 1) { 7793 /* found equivalent state, can prune the search */ 7794 if (env->log.level & BPF_LOG_LEVEL) { 7795 if (do_print_state) 7796 verbose(env, "\nfrom %d to %d%s: safe\n", 7797 env->prev_insn_idx, env->insn_idx, 7798 env->cur_state->speculative ? 7799 " (speculative execution)" : ""); 7800 else 7801 verbose(env, "%d: safe\n", env->insn_idx); 7802 } 7803 goto process_bpf_exit; 7804 } 7805 7806 if (signal_pending(current)) 7807 return -EAGAIN; 7808 7809 if (need_resched()) 7810 cond_resched(); 7811 7812 if (env->log.level & BPF_LOG_LEVEL2 || 7813 (env->log.level & BPF_LOG_LEVEL && do_print_state)) { 7814 if (env->log.level & BPF_LOG_LEVEL2) 7815 verbose(env, "%d:", env->insn_idx); 7816 else 7817 verbose(env, "\nfrom %d to %d%s:", 7818 env->prev_insn_idx, env->insn_idx, 7819 env->cur_state->speculative ? 7820 " (speculative execution)" : ""); 7821 print_verifier_state(env, state->frame[state->curframe]); 7822 do_print_state = false; 7823 } 7824 7825 if (env->log.level & BPF_LOG_LEVEL) { 7826 const struct bpf_insn_cbs cbs = { 7827 .cb_print = verbose, 7828 .private_data = env, 7829 }; 7830 7831 verbose_linfo(env, env->insn_idx, "; "); 7832 verbose(env, "%d: ", env->insn_idx); 7833 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 7834 } 7835 7836 if (bpf_prog_is_dev_bound(env->prog->aux)) { 7837 err = bpf_prog_offload_verify_insn(env, env->insn_idx, 7838 env->prev_insn_idx); 7839 if (err) 7840 return err; 7841 } 7842 7843 regs = cur_regs(env); 7844 env->insn_aux_data[env->insn_idx].seen = true; 7845 prev_insn_idx = env->insn_idx; 7846 7847 if (class == BPF_ALU || class == BPF_ALU64) { 7848 err = check_alu_op(env, insn); 7849 if (err) 7850 return err; 7851 7852 } else if (class == BPF_LDX) { 7853 enum bpf_reg_type *prev_src_type, src_reg_type; 7854 7855 /* check for reserved fields is already done */ 7856 7857 /* check src operand */ 7858 err = check_reg_arg(env, insn->src_reg, SRC_OP); 7859 if (err) 7860 return err; 7861 7862 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 7863 if (err) 7864 return err; 7865 7866 src_reg_type = regs[insn->src_reg].type; 7867 7868 /* check that memory (src_reg + off) is readable, 7869 * the state of dst_reg will be updated by this func 7870 */ 7871 err = check_mem_access(env, env->insn_idx, insn->src_reg, 7872 insn->off, BPF_SIZE(insn->code), 7873 BPF_READ, insn->dst_reg, false); 7874 if (err) 7875 return err; 7876 7877 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type; 7878 7879 if (*prev_src_type == NOT_INIT) { 7880 /* saw a valid insn 7881 * dst_reg = *(u32 *)(src_reg + off) 7882 * save type to validate intersecting paths 7883 */ 7884 *prev_src_type = src_reg_type; 7885 7886 } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) { 7887 /* ABuser program is trying to use the same insn 7888 * dst_reg = *(u32*) (src_reg + off) 7889 * with different pointer types: 7890 * src_reg == ctx in one branch and 7891 * src_reg == stack|map in some other branch. 7892 * Reject it. 7893 */ 7894 verbose(env, "same insn cannot be used with different pointers\n"); 7895 return -EINVAL; 7896 } 7897 7898 } else if (class == BPF_STX) { 7899 enum bpf_reg_type *prev_dst_type, dst_reg_type; 7900 7901 if (BPF_MODE(insn->code) == BPF_XADD) { 7902 err = check_xadd(env, env->insn_idx, insn); 7903 if (err) 7904 return err; 7905 env->insn_idx++; 7906 continue; 7907 } 7908 7909 /* check src1 operand */ 7910 err = check_reg_arg(env, insn->src_reg, SRC_OP); 7911 if (err) 7912 return err; 7913 /* check src2 operand */ 7914 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 7915 if (err) 7916 return err; 7917 7918 dst_reg_type = regs[insn->dst_reg].type; 7919 7920 /* check that memory (dst_reg + off) is writeable */ 7921 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 7922 insn->off, BPF_SIZE(insn->code), 7923 BPF_WRITE, insn->src_reg, false); 7924 if (err) 7925 return err; 7926 7927 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type; 7928 7929 if (*prev_dst_type == NOT_INIT) { 7930 *prev_dst_type = dst_reg_type; 7931 } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) { 7932 verbose(env, "same insn cannot be used with different pointers\n"); 7933 return -EINVAL; 7934 } 7935 7936 } else if (class == BPF_ST) { 7937 if (BPF_MODE(insn->code) != BPF_MEM || 7938 insn->src_reg != BPF_REG_0) { 7939 verbose(env, "BPF_ST uses reserved fields\n"); 7940 return -EINVAL; 7941 } 7942 /* check src operand */ 7943 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 7944 if (err) 7945 return err; 7946 7947 if (is_ctx_reg(env, insn->dst_reg)) { 7948 verbose(env, "BPF_ST stores into R%d %s is not allowed\n", 7949 insn->dst_reg, 7950 reg_type_str[reg_state(env, insn->dst_reg)->type]); 7951 return -EACCES; 7952 } 7953 7954 /* check that memory (dst_reg + off) is writeable */ 7955 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 7956 insn->off, BPF_SIZE(insn->code), 7957 BPF_WRITE, -1, false); 7958 if (err) 7959 return err; 7960 7961 } else if (class == BPF_JMP || class == BPF_JMP32) { 7962 u8 opcode = BPF_OP(insn->code); 7963 7964 env->jmps_processed++; 7965 if (opcode == BPF_CALL) { 7966 if (BPF_SRC(insn->code) != BPF_K || 7967 insn->off != 0 || 7968 (insn->src_reg != BPF_REG_0 && 7969 insn->src_reg != BPF_PSEUDO_CALL) || 7970 insn->dst_reg != BPF_REG_0 || 7971 class == BPF_JMP32) { 7972 verbose(env, "BPF_CALL uses reserved fields\n"); 7973 return -EINVAL; 7974 } 7975 7976 if (env->cur_state->active_spin_lock && 7977 (insn->src_reg == BPF_PSEUDO_CALL || 7978 insn->imm != BPF_FUNC_spin_unlock)) { 7979 verbose(env, "function calls are not allowed while holding a lock\n"); 7980 return -EINVAL; 7981 } 7982 if (insn->src_reg == BPF_PSEUDO_CALL) 7983 err = check_func_call(env, insn, &env->insn_idx); 7984 else 7985 err = check_helper_call(env, insn->imm, env->insn_idx); 7986 if (err) 7987 return err; 7988 7989 } else if (opcode == BPF_JA) { 7990 if (BPF_SRC(insn->code) != BPF_K || 7991 insn->imm != 0 || 7992 insn->src_reg != BPF_REG_0 || 7993 insn->dst_reg != BPF_REG_0 || 7994 class == BPF_JMP32) { 7995 verbose(env, "BPF_JA uses reserved fields\n"); 7996 return -EINVAL; 7997 } 7998 7999 env->insn_idx += insn->off + 1; 8000 continue; 8001 8002 } else if (opcode == BPF_EXIT) { 8003 if (BPF_SRC(insn->code) != BPF_K || 8004 insn->imm != 0 || 8005 insn->src_reg != BPF_REG_0 || 8006 insn->dst_reg != BPF_REG_0 || 8007 class == BPF_JMP32) { 8008 verbose(env, "BPF_EXIT uses reserved fields\n"); 8009 return -EINVAL; 8010 } 8011 8012 if (env->cur_state->active_spin_lock) { 8013 verbose(env, "bpf_spin_unlock is missing\n"); 8014 return -EINVAL; 8015 } 8016 8017 if (state->curframe) { 8018 /* exit from nested function */ 8019 err = prepare_func_exit(env, &env->insn_idx); 8020 if (err) 8021 return err; 8022 do_print_state = true; 8023 continue; 8024 } 8025 8026 err = check_reference_leak(env); 8027 if (err) 8028 return err; 8029 8030 /* eBPF calling convetion is such that R0 is used 8031 * to return the value from eBPF program. 8032 * Make sure that it's readable at this time 8033 * of bpf_exit, which means that program wrote 8034 * something into it earlier 8035 */ 8036 err = check_reg_arg(env, BPF_REG_0, SRC_OP); 8037 if (err) 8038 return err; 8039 8040 if (is_pointer_value(env, BPF_REG_0)) { 8041 verbose(env, "R0 leaks addr as return value\n"); 8042 return -EACCES; 8043 } 8044 8045 err = check_return_code(env); 8046 if (err) 8047 return err; 8048 process_bpf_exit: 8049 update_branch_counts(env, env->cur_state); 8050 err = pop_stack(env, &prev_insn_idx, 8051 &env->insn_idx); 8052 if (err < 0) { 8053 if (err != -ENOENT) 8054 return err; 8055 break; 8056 } else { 8057 do_print_state = true; 8058 continue; 8059 } 8060 } else { 8061 err = check_cond_jmp_op(env, insn, &env->insn_idx); 8062 if (err) 8063 return err; 8064 } 8065 } else if (class == BPF_LD) { 8066 u8 mode = BPF_MODE(insn->code); 8067 8068 if (mode == BPF_ABS || mode == BPF_IND) { 8069 err = check_ld_abs(env, insn); 8070 if (err) 8071 return err; 8072 8073 } else if (mode == BPF_IMM) { 8074 err = check_ld_imm(env, insn); 8075 if (err) 8076 return err; 8077 8078 env->insn_idx++; 8079 env->insn_aux_data[env->insn_idx].seen = true; 8080 } else { 8081 verbose(env, "invalid BPF_LD mode\n"); 8082 return -EINVAL; 8083 } 8084 } else { 8085 verbose(env, "unknown insn class %d\n", class); 8086 return -EINVAL; 8087 } 8088 8089 env->insn_idx++; 8090 } 8091 8092 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; 8093 return 0; 8094 } 8095 8096 static int check_map_prealloc(struct bpf_map *map) 8097 { 8098 return (map->map_type != BPF_MAP_TYPE_HASH && 8099 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 8100 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || 8101 !(map->map_flags & BPF_F_NO_PREALLOC); 8102 } 8103 8104 static bool is_tracing_prog_type(enum bpf_prog_type type) 8105 { 8106 switch (type) { 8107 case BPF_PROG_TYPE_KPROBE: 8108 case BPF_PROG_TYPE_TRACEPOINT: 8109 case BPF_PROG_TYPE_PERF_EVENT: 8110 case BPF_PROG_TYPE_RAW_TRACEPOINT: 8111 return true; 8112 default: 8113 return false; 8114 } 8115 } 8116 8117 static int check_map_prog_compatibility(struct bpf_verifier_env *env, 8118 struct bpf_map *map, 8119 struct bpf_prog *prog) 8120 8121 { 8122 /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use 8123 * preallocated hash maps, since doing memory allocation 8124 * in overflow_handler can crash depending on where nmi got 8125 * triggered. 8126 */ 8127 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { 8128 if (!check_map_prealloc(map)) { 8129 verbose(env, "perf_event programs can only use preallocated hash map\n"); 8130 return -EINVAL; 8131 } 8132 if (map->inner_map_meta && 8133 !check_map_prealloc(map->inner_map_meta)) { 8134 verbose(env, "perf_event programs can only use preallocated inner hash map\n"); 8135 return -EINVAL; 8136 } 8137 } 8138 8139 if ((is_tracing_prog_type(prog->type) || 8140 prog->type == BPF_PROG_TYPE_SOCKET_FILTER) && 8141 map_value_has_spin_lock(map)) { 8142 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); 8143 return -EINVAL; 8144 } 8145 8146 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && 8147 !bpf_offload_prog_map_match(prog, map)) { 8148 verbose(env, "offload device mismatch between prog and map\n"); 8149 return -EINVAL; 8150 } 8151 8152 return 0; 8153 } 8154 8155 static bool bpf_map_is_cgroup_storage(struct bpf_map *map) 8156 { 8157 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || 8158 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); 8159 } 8160 8161 /* look for pseudo eBPF instructions that access map FDs and 8162 * replace them with actual map pointers 8163 */ 8164 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) 8165 { 8166 struct bpf_insn *insn = env->prog->insnsi; 8167 int insn_cnt = env->prog->len; 8168 int i, j, err; 8169 8170 err = bpf_prog_calc_tag(env->prog); 8171 if (err) 8172 return err; 8173 8174 for (i = 0; i < insn_cnt; i++, insn++) { 8175 if (BPF_CLASS(insn->code) == BPF_LDX && 8176 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 8177 verbose(env, "BPF_LDX uses reserved fields\n"); 8178 return -EINVAL; 8179 } 8180 8181 if (BPF_CLASS(insn->code) == BPF_STX && 8182 ((BPF_MODE(insn->code) != BPF_MEM && 8183 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { 8184 verbose(env, "BPF_STX uses reserved fields\n"); 8185 return -EINVAL; 8186 } 8187 8188 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 8189 struct bpf_insn_aux_data *aux; 8190 struct bpf_map *map; 8191 struct fd f; 8192 u64 addr; 8193 8194 if (i == insn_cnt - 1 || insn[1].code != 0 || 8195 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 8196 insn[1].off != 0) { 8197 verbose(env, "invalid bpf_ld_imm64 insn\n"); 8198 return -EINVAL; 8199 } 8200 8201 if (insn[0].src_reg == 0) 8202 /* valid generic load 64-bit imm */ 8203 goto next_insn; 8204 8205 /* In final convert_pseudo_ld_imm64() step, this is 8206 * converted into regular 64-bit imm load insn. 8207 */ 8208 if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD && 8209 insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) || 8210 (insn[0].src_reg == BPF_PSEUDO_MAP_FD && 8211 insn[1].imm != 0)) { 8212 verbose(env, 8213 "unrecognized bpf_ld_imm64 insn\n"); 8214 return -EINVAL; 8215 } 8216 8217 f = fdget(insn[0].imm); 8218 map = __bpf_map_get(f); 8219 if (IS_ERR(map)) { 8220 verbose(env, "fd %d is not pointing to valid bpf_map\n", 8221 insn[0].imm); 8222 return PTR_ERR(map); 8223 } 8224 8225 err = check_map_prog_compatibility(env, map, env->prog); 8226 if (err) { 8227 fdput(f); 8228 return err; 8229 } 8230 8231 aux = &env->insn_aux_data[i]; 8232 if (insn->src_reg == BPF_PSEUDO_MAP_FD) { 8233 addr = (unsigned long)map; 8234 } else { 8235 u32 off = insn[1].imm; 8236 8237 if (off >= BPF_MAX_VAR_OFF) { 8238 verbose(env, "direct value offset of %u is not allowed\n", off); 8239 fdput(f); 8240 return -EINVAL; 8241 } 8242 8243 if (!map->ops->map_direct_value_addr) { 8244 verbose(env, "no direct value access support for this map type\n"); 8245 fdput(f); 8246 return -EINVAL; 8247 } 8248 8249 err = map->ops->map_direct_value_addr(map, &addr, off); 8250 if (err) { 8251 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n", 8252 map->value_size, off); 8253 fdput(f); 8254 return err; 8255 } 8256 8257 aux->map_off = off; 8258 addr += off; 8259 } 8260 8261 insn[0].imm = (u32)addr; 8262 insn[1].imm = addr >> 32; 8263 8264 /* check whether we recorded this map already */ 8265 for (j = 0; j < env->used_map_cnt; j++) { 8266 if (env->used_maps[j] == map) { 8267 aux->map_index = j; 8268 fdput(f); 8269 goto next_insn; 8270 } 8271 } 8272 8273 if (env->used_map_cnt >= MAX_USED_MAPS) { 8274 fdput(f); 8275 return -E2BIG; 8276 } 8277 8278 /* hold the map. If the program is rejected by verifier, 8279 * the map will be released by release_maps() or it 8280 * will be used by the valid program until it's unloaded 8281 * and all maps are released in free_used_maps() 8282 */ 8283 bpf_map_inc(map); 8284 8285 aux->map_index = env->used_map_cnt; 8286 env->used_maps[env->used_map_cnt++] = map; 8287 8288 if (bpf_map_is_cgroup_storage(map) && 8289 bpf_cgroup_storage_assign(env->prog->aux, map)) { 8290 verbose(env, "only one cgroup storage of each type is allowed\n"); 8291 fdput(f); 8292 return -EBUSY; 8293 } 8294 8295 fdput(f); 8296 next_insn: 8297 insn++; 8298 i++; 8299 continue; 8300 } 8301 8302 /* Basic sanity check before we invest more work here. */ 8303 if (!bpf_opcode_in_insntable(insn->code)) { 8304 verbose(env, "unknown opcode %02x\n", insn->code); 8305 return -EINVAL; 8306 } 8307 } 8308 8309 /* now all pseudo BPF_LD_IMM64 instructions load valid 8310 * 'struct bpf_map *' into a register instead of user map_fd. 8311 * These pointers will be used later by verifier to validate map access. 8312 */ 8313 return 0; 8314 } 8315 8316 /* drop refcnt of maps used by the rejected program */ 8317 static void release_maps(struct bpf_verifier_env *env) 8318 { 8319 __bpf_free_used_maps(env->prog->aux, env->used_maps, 8320 env->used_map_cnt); 8321 } 8322 8323 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 8324 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) 8325 { 8326 struct bpf_insn *insn = env->prog->insnsi; 8327 int insn_cnt = env->prog->len; 8328 int i; 8329 8330 for (i = 0; i < insn_cnt; i++, insn++) 8331 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) 8332 insn->src_reg = 0; 8333 } 8334 8335 /* single env->prog->insni[off] instruction was replaced with the range 8336 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying 8337 * [0, off) and [off, end) to new locations, so the patched range stays zero 8338 */ 8339 static int adjust_insn_aux_data(struct bpf_verifier_env *env, 8340 struct bpf_prog *new_prog, u32 off, u32 cnt) 8341 { 8342 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; 8343 struct bpf_insn *insn = new_prog->insnsi; 8344 u32 prog_len; 8345 int i; 8346 8347 /* aux info at OFF always needs adjustment, no matter fast path 8348 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the 8349 * original insn at old prog. 8350 */ 8351 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); 8352 8353 if (cnt == 1) 8354 return 0; 8355 prog_len = new_prog->len; 8356 new_data = vzalloc(array_size(prog_len, 8357 sizeof(struct bpf_insn_aux_data))); 8358 if (!new_data) 8359 return -ENOMEM; 8360 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 8361 memcpy(new_data + off + cnt - 1, old_data + off, 8362 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 8363 for (i = off; i < off + cnt - 1; i++) { 8364 new_data[i].seen = true; 8365 new_data[i].zext_dst = insn_has_def32(env, insn + i); 8366 } 8367 env->insn_aux_data = new_data; 8368 vfree(old_data); 8369 return 0; 8370 } 8371 8372 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) 8373 { 8374 int i; 8375 8376 if (len == 1) 8377 return; 8378 /* NOTE: fake 'exit' subprog should be updated as well. */ 8379 for (i = 0; i <= env->subprog_cnt; i++) { 8380 if (env->subprog_info[i].start <= off) 8381 continue; 8382 env->subprog_info[i].start += len - 1; 8383 } 8384 } 8385 8386 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 8387 const struct bpf_insn *patch, u32 len) 8388 { 8389 struct bpf_prog *new_prog; 8390 8391 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); 8392 if (IS_ERR(new_prog)) { 8393 if (PTR_ERR(new_prog) == -ERANGE) 8394 verbose(env, 8395 "insn %d cannot be patched due to 16-bit range\n", 8396 env->insn_aux_data[off].orig_idx); 8397 return NULL; 8398 } 8399 if (adjust_insn_aux_data(env, new_prog, off, len)) 8400 return NULL; 8401 adjust_subprog_starts(env, off, len); 8402 return new_prog; 8403 } 8404 8405 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env, 8406 u32 off, u32 cnt) 8407 { 8408 int i, j; 8409 8410 /* find first prog starting at or after off (first to remove) */ 8411 for (i = 0; i < env->subprog_cnt; i++) 8412 if (env->subprog_info[i].start >= off) 8413 break; 8414 /* find first prog starting at or after off + cnt (first to stay) */ 8415 for (j = i; j < env->subprog_cnt; j++) 8416 if (env->subprog_info[j].start >= off + cnt) 8417 break; 8418 /* if j doesn't start exactly at off + cnt, we are just removing 8419 * the front of previous prog 8420 */ 8421 if (env->subprog_info[j].start != off + cnt) 8422 j--; 8423 8424 if (j > i) { 8425 struct bpf_prog_aux *aux = env->prog->aux; 8426 int move; 8427 8428 /* move fake 'exit' subprog as well */ 8429 move = env->subprog_cnt + 1 - j; 8430 8431 memmove(env->subprog_info + i, 8432 env->subprog_info + j, 8433 sizeof(*env->subprog_info) * move); 8434 env->subprog_cnt -= j - i; 8435 8436 /* remove func_info */ 8437 if (aux->func_info) { 8438 move = aux->func_info_cnt - j; 8439 8440 memmove(aux->func_info + i, 8441 aux->func_info + j, 8442 sizeof(*aux->func_info) * move); 8443 aux->func_info_cnt -= j - i; 8444 /* func_info->insn_off is set after all code rewrites, 8445 * in adjust_btf_func() - no need to adjust 8446 */ 8447 } 8448 } else { 8449 /* convert i from "first prog to remove" to "first to adjust" */ 8450 if (env->subprog_info[i].start == off) 8451 i++; 8452 } 8453 8454 /* update fake 'exit' subprog as well */ 8455 for (; i <= env->subprog_cnt; i++) 8456 env->subprog_info[i].start -= cnt; 8457 8458 return 0; 8459 } 8460 8461 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off, 8462 u32 cnt) 8463 { 8464 struct bpf_prog *prog = env->prog; 8465 u32 i, l_off, l_cnt, nr_linfo; 8466 struct bpf_line_info *linfo; 8467 8468 nr_linfo = prog->aux->nr_linfo; 8469 if (!nr_linfo) 8470 return 0; 8471 8472 linfo = prog->aux->linfo; 8473 8474 /* find first line info to remove, count lines to be removed */ 8475 for (i = 0; i < nr_linfo; i++) 8476 if (linfo[i].insn_off >= off) 8477 break; 8478 8479 l_off = i; 8480 l_cnt = 0; 8481 for (; i < nr_linfo; i++) 8482 if (linfo[i].insn_off < off + cnt) 8483 l_cnt++; 8484 else 8485 break; 8486 8487 /* First live insn doesn't match first live linfo, it needs to "inherit" 8488 * last removed linfo. prog is already modified, so prog->len == off 8489 * means no live instructions after (tail of the program was removed). 8490 */ 8491 if (prog->len != off && l_cnt && 8492 (i == nr_linfo || linfo[i].insn_off != off + cnt)) { 8493 l_cnt--; 8494 linfo[--i].insn_off = off + cnt; 8495 } 8496 8497 /* remove the line info which refer to the removed instructions */ 8498 if (l_cnt) { 8499 memmove(linfo + l_off, linfo + i, 8500 sizeof(*linfo) * (nr_linfo - i)); 8501 8502 prog->aux->nr_linfo -= l_cnt; 8503 nr_linfo = prog->aux->nr_linfo; 8504 } 8505 8506 /* pull all linfo[i].insn_off >= off + cnt in by cnt */ 8507 for (i = l_off; i < nr_linfo; i++) 8508 linfo[i].insn_off -= cnt; 8509 8510 /* fix up all subprogs (incl. 'exit') which start >= off */ 8511 for (i = 0; i <= env->subprog_cnt; i++) 8512 if (env->subprog_info[i].linfo_idx > l_off) { 8513 /* program may have started in the removed region but 8514 * may not be fully removed 8515 */ 8516 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) 8517 env->subprog_info[i].linfo_idx -= l_cnt; 8518 else 8519 env->subprog_info[i].linfo_idx = l_off; 8520 } 8521 8522 return 0; 8523 } 8524 8525 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) 8526 { 8527 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 8528 unsigned int orig_prog_len = env->prog->len; 8529 int err; 8530 8531 if (bpf_prog_is_dev_bound(env->prog->aux)) 8532 bpf_prog_offload_remove_insns(env, off, cnt); 8533 8534 err = bpf_remove_insns(env->prog, off, cnt); 8535 if (err) 8536 return err; 8537 8538 err = adjust_subprog_starts_after_remove(env, off, cnt); 8539 if (err) 8540 return err; 8541 8542 err = bpf_adj_linfo_after_remove(env, off, cnt); 8543 if (err) 8544 return err; 8545 8546 memmove(aux_data + off, aux_data + off + cnt, 8547 sizeof(*aux_data) * (orig_prog_len - off - cnt)); 8548 8549 return 0; 8550 } 8551 8552 /* The verifier does more data flow analysis than llvm and will not 8553 * explore branches that are dead at run time. Malicious programs can 8554 * have dead code too. Therefore replace all dead at-run-time code 8555 * with 'ja -1'. 8556 * 8557 * Just nops are not optimal, e.g. if they would sit at the end of the 8558 * program and through another bug we would manage to jump there, then 8559 * we'd execute beyond program memory otherwise. Returning exception 8560 * code also wouldn't work since we can have subprogs where the dead 8561 * code could be located. 8562 */ 8563 static void sanitize_dead_code(struct bpf_verifier_env *env) 8564 { 8565 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 8566 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); 8567 struct bpf_insn *insn = env->prog->insnsi; 8568 const int insn_cnt = env->prog->len; 8569 int i; 8570 8571 for (i = 0; i < insn_cnt; i++) { 8572 if (aux_data[i].seen) 8573 continue; 8574 memcpy(insn + i, &trap, sizeof(trap)); 8575 } 8576 } 8577 8578 static bool insn_is_cond_jump(u8 code) 8579 { 8580 u8 op; 8581 8582 if (BPF_CLASS(code) == BPF_JMP32) 8583 return true; 8584 8585 if (BPF_CLASS(code) != BPF_JMP) 8586 return false; 8587 8588 op = BPF_OP(code); 8589 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; 8590 } 8591 8592 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) 8593 { 8594 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 8595 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 8596 struct bpf_insn *insn = env->prog->insnsi; 8597 const int insn_cnt = env->prog->len; 8598 int i; 8599 8600 for (i = 0; i < insn_cnt; i++, insn++) { 8601 if (!insn_is_cond_jump(insn->code)) 8602 continue; 8603 8604 if (!aux_data[i + 1].seen) 8605 ja.off = insn->off; 8606 else if (!aux_data[i + 1 + insn->off].seen) 8607 ja.off = 0; 8608 else 8609 continue; 8610 8611 if (bpf_prog_is_dev_bound(env->prog->aux)) 8612 bpf_prog_offload_replace_insn(env, i, &ja); 8613 8614 memcpy(insn, &ja, sizeof(ja)); 8615 } 8616 } 8617 8618 static int opt_remove_dead_code(struct bpf_verifier_env *env) 8619 { 8620 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 8621 int insn_cnt = env->prog->len; 8622 int i, err; 8623 8624 for (i = 0; i < insn_cnt; i++) { 8625 int j; 8626 8627 j = 0; 8628 while (i + j < insn_cnt && !aux_data[i + j].seen) 8629 j++; 8630 if (!j) 8631 continue; 8632 8633 err = verifier_remove_insns(env, i, j); 8634 if (err) 8635 return err; 8636 insn_cnt = env->prog->len; 8637 } 8638 8639 return 0; 8640 } 8641 8642 static int opt_remove_nops(struct bpf_verifier_env *env) 8643 { 8644 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 8645 struct bpf_insn *insn = env->prog->insnsi; 8646 int insn_cnt = env->prog->len; 8647 int i, err; 8648 8649 for (i = 0; i < insn_cnt; i++) { 8650 if (memcmp(&insn[i], &ja, sizeof(ja))) 8651 continue; 8652 8653 err = verifier_remove_insns(env, i, 1); 8654 if (err) 8655 return err; 8656 insn_cnt--; 8657 i--; 8658 } 8659 8660 return 0; 8661 } 8662 8663 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, 8664 const union bpf_attr *attr) 8665 { 8666 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4]; 8667 struct bpf_insn_aux_data *aux = env->insn_aux_data; 8668 int i, patch_len, delta = 0, len = env->prog->len; 8669 struct bpf_insn *insns = env->prog->insnsi; 8670 struct bpf_prog *new_prog; 8671 bool rnd_hi32; 8672 8673 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; 8674 zext_patch[1] = BPF_ZEXT_REG(0); 8675 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0); 8676 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 8677 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX); 8678 for (i = 0; i < len; i++) { 8679 int adj_idx = i + delta; 8680 struct bpf_insn insn; 8681 8682 insn = insns[adj_idx]; 8683 if (!aux[adj_idx].zext_dst) { 8684 u8 code, class; 8685 u32 imm_rnd; 8686 8687 if (!rnd_hi32) 8688 continue; 8689 8690 code = insn.code; 8691 class = BPF_CLASS(code); 8692 if (insn_no_def(&insn)) 8693 continue; 8694 8695 /* NOTE: arg "reg" (the fourth one) is only used for 8696 * BPF_STX which has been ruled out in above 8697 * check, it is safe to pass NULL here. 8698 */ 8699 if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) { 8700 if (class == BPF_LD && 8701 BPF_MODE(code) == BPF_IMM) 8702 i++; 8703 continue; 8704 } 8705 8706 /* ctx load could be transformed into wider load. */ 8707 if (class == BPF_LDX && 8708 aux[adj_idx].ptr_type == PTR_TO_CTX) 8709 continue; 8710 8711 imm_rnd = get_random_int(); 8712 rnd_hi32_patch[0] = insn; 8713 rnd_hi32_patch[1].imm = imm_rnd; 8714 rnd_hi32_patch[3].dst_reg = insn.dst_reg; 8715 patch = rnd_hi32_patch; 8716 patch_len = 4; 8717 goto apply_patch_buffer; 8718 } 8719 8720 if (!bpf_jit_needs_zext()) 8721 continue; 8722 8723 zext_patch[0] = insn; 8724 zext_patch[1].dst_reg = insn.dst_reg; 8725 zext_patch[1].src_reg = insn.dst_reg; 8726 patch = zext_patch; 8727 patch_len = 2; 8728 apply_patch_buffer: 8729 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len); 8730 if (!new_prog) 8731 return -ENOMEM; 8732 env->prog = new_prog; 8733 insns = new_prog->insnsi; 8734 aux = env->insn_aux_data; 8735 delta += patch_len - 1; 8736 } 8737 8738 return 0; 8739 } 8740 8741 /* convert load instructions that access fields of a context type into a 8742 * sequence of instructions that access fields of the underlying structure: 8743 * struct __sk_buff -> struct sk_buff 8744 * struct bpf_sock_ops -> struct sock 8745 */ 8746 static int convert_ctx_accesses(struct bpf_verifier_env *env) 8747 { 8748 const struct bpf_verifier_ops *ops = env->ops; 8749 int i, cnt, size, ctx_field_size, delta = 0; 8750 const int insn_cnt = env->prog->len; 8751 struct bpf_insn insn_buf[16], *insn; 8752 u32 target_size, size_default, off; 8753 struct bpf_prog *new_prog; 8754 enum bpf_access_type type; 8755 bool is_narrower_load; 8756 8757 if (ops->gen_prologue || env->seen_direct_write) { 8758 if (!ops->gen_prologue) { 8759 verbose(env, "bpf verifier is misconfigured\n"); 8760 return -EINVAL; 8761 } 8762 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 8763 env->prog); 8764 if (cnt >= ARRAY_SIZE(insn_buf)) { 8765 verbose(env, "bpf verifier is misconfigured\n"); 8766 return -EINVAL; 8767 } else if (cnt) { 8768 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 8769 if (!new_prog) 8770 return -ENOMEM; 8771 8772 env->prog = new_prog; 8773 delta += cnt - 1; 8774 } 8775 } 8776 8777 if (bpf_prog_is_dev_bound(env->prog->aux)) 8778 return 0; 8779 8780 insn = env->prog->insnsi + delta; 8781 8782 for (i = 0; i < insn_cnt; i++, insn++) { 8783 bpf_convert_ctx_access_t convert_ctx_access; 8784 8785 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || 8786 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || 8787 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 8788 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) 8789 type = BPF_READ; 8790 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || 8791 insn->code == (BPF_STX | BPF_MEM | BPF_H) || 8792 insn->code == (BPF_STX | BPF_MEM | BPF_W) || 8793 insn->code == (BPF_STX | BPF_MEM | BPF_DW)) 8794 type = BPF_WRITE; 8795 else 8796 continue; 8797 8798 if (type == BPF_WRITE && 8799 env->insn_aux_data[i + delta].sanitize_stack_off) { 8800 struct bpf_insn patch[] = { 8801 /* Sanitize suspicious stack slot with zero. 8802 * There are no memory dependencies for this store, 8803 * since it's only using frame pointer and immediate 8804 * constant of zero 8805 */ 8806 BPF_ST_MEM(BPF_DW, BPF_REG_FP, 8807 env->insn_aux_data[i + delta].sanitize_stack_off, 8808 0), 8809 /* the original STX instruction will immediately 8810 * overwrite the same stack slot with appropriate value 8811 */ 8812 *insn, 8813 }; 8814 8815 cnt = ARRAY_SIZE(patch); 8816 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); 8817 if (!new_prog) 8818 return -ENOMEM; 8819 8820 delta += cnt - 1; 8821 env->prog = new_prog; 8822 insn = new_prog->insnsi + i + delta; 8823 continue; 8824 } 8825 8826 switch (env->insn_aux_data[i + delta].ptr_type) { 8827 case PTR_TO_CTX: 8828 if (!ops->convert_ctx_access) 8829 continue; 8830 convert_ctx_access = ops->convert_ctx_access; 8831 break; 8832 case PTR_TO_SOCKET: 8833 case PTR_TO_SOCK_COMMON: 8834 convert_ctx_access = bpf_sock_convert_ctx_access; 8835 break; 8836 case PTR_TO_TCP_SOCK: 8837 convert_ctx_access = bpf_tcp_sock_convert_ctx_access; 8838 break; 8839 case PTR_TO_XDP_SOCK: 8840 convert_ctx_access = bpf_xdp_sock_convert_ctx_access; 8841 break; 8842 case PTR_TO_BTF_ID: 8843 if (type == BPF_WRITE) { 8844 verbose(env, "Writes through BTF pointers are not allowed\n"); 8845 return -EINVAL; 8846 } 8847 insn->code = BPF_LDX | BPF_PROBE_MEM | BPF_SIZE((insn)->code); 8848 env->prog->aux->num_exentries++; 8849 continue; 8850 default: 8851 continue; 8852 } 8853 8854 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; 8855 size = BPF_LDST_BYTES(insn); 8856 8857 /* If the read access is a narrower load of the field, 8858 * convert to a 4/8-byte load, to minimum program type specific 8859 * convert_ctx_access changes. If conversion is successful, 8860 * we will apply proper mask to the result. 8861 */ 8862 is_narrower_load = size < ctx_field_size; 8863 size_default = bpf_ctx_off_adjust_machine(ctx_field_size); 8864 off = insn->off; 8865 if (is_narrower_load) { 8866 u8 size_code; 8867 8868 if (type == BPF_WRITE) { 8869 verbose(env, "bpf verifier narrow ctx access misconfigured\n"); 8870 return -EINVAL; 8871 } 8872 8873 size_code = BPF_H; 8874 if (ctx_field_size == 4) 8875 size_code = BPF_W; 8876 else if (ctx_field_size == 8) 8877 size_code = BPF_DW; 8878 8879 insn->off = off & ~(size_default - 1); 8880 insn->code = BPF_LDX | BPF_MEM | size_code; 8881 } 8882 8883 target_size = 0; 8884 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, 8885 &target_size); 8886 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || 8887 (ctx_field_size && !target_size)) { 8888 verbose(env, "bpf verifier is misconfigured\n"); 8889 return -EINVAL; 8890 } 8891 8892 if (is_narrower_load && size < target_size) { 8893 u8 shift = bpf_ctx_narrow_access_offset( 8894 off, size, size_default) * 8; 8895 if (ctx_field_size <= 4) { 8896 if (shift) 8897 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, 8898 insn->dst_reg, 8899 shift); 8900 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 8901 (1 << size * 8) - 1); 8902 } else { 8903 if (shift) 8904 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, 8905 insn->dst_reg, 8906 shift); 8907 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, 8908 (1ULL << size * 8) - 1); 8909 } 8910 } 8911 8912 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 8913 if (!new_prog) 8914 return -ENOMEM; 8915 8916 delta += cnt - 1; 8917 8918 /* keep walking new program and skip insns we just inserted */ 8919 env->prog = new_prog; 8920 insn = new_prog->insnsi + i + delta; 8921 } 8922 8923 return 0; 8924 } 8925 8926 static int jit_subprogs(struct bpf_verifier_env *env) 8927 { 8928 struct bpf_prog *prog = env->prog, **func, *tmp; 8929 int i, j, subprog_start, subprog_end = 0, len, subprog; 8930 struct bpf_insn *insn; 8931 void *old_bpf_func; 8932 int err; 8933 8934 if (env->subprog_cnt <= 1) 8935 return 0; 8936 8937 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 8938 if (insn->code != (BPF_JMP | BPF_CALL) || 8939 insn->src_reg != BPF_PSEUDO_CALL) 8940 continue; 8941 /* Upon error here we cannot fall back to interpreter but 8942 * need a hard reject of the program. Thus -EFAULT is 8943 * propagated in any case. 8944 */ 8945 subprog = find_subprog(env, i + insn->imm + 1); 8946 if (subprog < 0) { 8947 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 8948 i + insn->imm + 1); 8949 return -EFAULT; 8950 } 8951 /* temporarily remember subprog id inside insn instead of 8952 * aux_data, since next loop will split up all insns into funcs 8953 */ 8954 insn->off = subprog; 8955 /* remember original imm in case JIT fails and fallback 8956 * to interpreter will be needed 8957 */ 8958 env->insn_aux_data[i].call_imm = insn->imm; 8959 /* point imm to __bpf_call_base+1 from JITs point of view */ 8960 insn->imm = 1; 8961 } 8962 8963 err = bpf_prog_alloc_jited_linfo(prog); 8964 if (err) 8965 goto out_undo_insn; 8966 8967 err = -ENOMEM; 8968 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); 8969 if (!func) 8970 goto out_undo_insn; 8971 8972 for (i = 0; i < env->subprog_cnt; i++) { 8973 subprog_start = subprog_end; 8974 subprog_end = env->subprog_info[i + 1].start; 8975 8976 len = subprog_end - subprog_start; 8977 /* BPF_PROG_RUN doesn't call subprogs directly, 8978 * hence main prog stats include the runtime of subprogs. 8979 * subprogs don't have IDs and not reachable via prog_get_next_id 8980 * func[i]->aux->stats will never be accessed and stays NULL 8981 */ 8982 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER); 8983 if (!func[i]) 8984 goto out_free; 8985 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], 8986 len * sizeof(struct bpf_insn)); 8987 func[i]->type = prog->type; 8988 func[i]->len = len; 8989 if (bpf_prog_calc_tag(func[i])) 8990 goto out_free; 8991 func[i]->is_func = 1; 8992 func[i]->aux->func_idx = i; 8993 /* the btf and func_info will be freed only at prog->aux */ 8994 func[i]->aux->btf = prog->aux->btf; 8995 func[i]->aux->func_info = prog->aux->func_info; 8996 8997 /* Use bpf_prog_F_tag to indicate functions in stack traces. 8998 * Long term would need debug info to populate names 8999 */ 9000 func[i]->aux->name[0] = 'F'; 9001 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; 9002 func[i]->jit_requested = 1; 9003 func[i]->aux->linfo = prog->aux->linfo; 9004 func[i]->aux->nr_linfo = prog->aux->nr_linfo; 9005 func[i]->aux->jited_linfo = prog->aux->jited_linfo; 9006 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; 9007 func[i] = bpf_int_jit_compile(func[i]); 9008 if (!func[i]->jited) { 9009 err = -ENOTSUPP; 9010 goto out_free; 9011 } 9012 cond_resched(); 9013 } 9014 /* at this point all bpf functions were successfully JITed 9015 * now populate all bpf_calls with correct addresses and 9016 * run last pass of JIT 9017 */ 9018 for (i = 0; i < env->subprog_cnt; i++) { 9019 insn = func[i]->insnsi; 9020 for (j = 0; j < func[i]->len; j++, insn++) { 9021 if (insn->code != (BPF_JMP | BPF_CALL) || 9022 insn->src_reg != BPF_PSEUDO_CALL) 9023 continue; 9024 subprog = insn->off; 9025 insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) - 9026 __bpf_call_base; 9027 } 9028 9029 /* we use the aux data to keep a list of the start addresses 9030 * of the JITed images for each function in the program 9031 * 9032 * for some architectures, such as powerpc64, the imm field 9033 * might not be large enough to hold the offset of the start 9034 * address of the callee's JITed image from __bpf_call_base 9035 * 9036 * in such cases, we can lookup the start address of a callee 9037 * by using its subprog id, available from the off field of 9038 * the call instruction, as an index for this list 9039 */ 9040 func[i]->aux->func = func; 9041 func[i]->aux->func_cnt = env->subprog_cnt; 9042 } 9043 for (i = 0; i < env->subprog_cnt; i++) { 9044 old_bpf_func = func[i]->bpf_func; 9045 tmp = bpf_int_jit_compile(func[i]); 9046 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { 9047 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); 9048 err = -ENOTSUPP; 9049 goto out_free; 9050 } 9051 cond_resched(); 9052 } 9053 9054 /* finally lock prog and jit images for all functions and 9055 * populate kallsysm 9056 */ 9057 for (i = 0; i < env->subprog_cnt; i++) { 9058 bpf_prog_lock_ro(func[i]); 9059 bpf_prog_kallsyms_add(func[i]); 9060 } 9061 9062 /* Last step: make now unused interpreter insns from main 9063 * prog consistent for later dump requests, so they can 9064 * later look the same as if they were interpreted only. 9065 */ 9066 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 9067 if (insn->code != (BPF_JMP | BPF_CALL) || 9068 insn->src_reg != BPF_PSEUDO_CALL) 9069 continue; 9070 insn->off = env->insn_aux_data[i].call_imm; 9071 subprog = find_subprog(env, i + insn->off + 1); 9072 insn->imm = subprog; 9073 } 9074 9075 prog->jited = 1; 9076 prog->bpf_func = func[0]->bpf_func; 9077 prog->aux->func = func; 9078 prog->aux->func_cnt = env->subprog_cnt; 9079 bpf_prog_free_unused_jited_linfo(prog); 9080 return 0; 9081 out_free: 9082 for (i = 0; i < env->subprog_cnt; i++) 9083 if (func[i]) 9084 bpf_jit_free(func[i]); 9085 kfree(func); 9086 out_undo_insn: 9087 /* cleanup main prog to be interpreted */ 9088 prog->jit_requested = 0; 9089 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 9090 if (insn->code != (BPF_JMP | BPF_CALL) || 9091 insn->src_reg != BPF_PSEUDO_CALL) 9092 continue; 9093 insn->off = 0; 9094 insn->imm = env->insn_aux_data[i].call_imm; 9095 } 9096 bpf_prog_free_jited_linfo(prog); 9097 return err; 9098 } 9099 9100 static int fixup_call_args(struct bpf_verifier_env *env) 9101 { 9102 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 9103 struct bpf_prog *prog = env->prog; 9104 struct bpf_insn *insn = prog->insnsi; 9105 int i, depth; 9106 #endif 9107 int err = 0; 9108 9109 if (env->prog->jit_requested && 9110 !bpf_prog_is_dev_bound(env->prog->aux)) { 9111 err = jit_subprogs(env); 9112 if (err == 0) 9113 return 0; 9114 if (err == -EFAULT) 9115 return err; 9116 } 9117 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 9118 for (i = 0; i < prog->len; i++, insn++) { 9119 if (insn->code != (BPF_JMP | BPF_CALL) || 9120 insn->src_reg != BPF_PSEUDO_CALL) 9121 continue; 9122 depth = get_callee_stack_depth(env, insn, i); 9123 if (depth < 0) 9124 return depth; 9125 bpf_patch_call_args(insn, depth); 9126 } 9127 err = 0; 9128 #endif 9129 return err; 9130 } 9131 9132 /* fixup insn->imm field of bpf_call instructions 9133 * and inline eligible helpers as explicit sequence of BPF instructions 9134 * 9135 * this function is called after eBPF program passed verification 9136 */ 9137 static int fixup_bpf_calls(struct bpf_verifier_env *env) 9138 { 9139 struct bpf_prog *prog = env->prog; 9140 bool expect_blinding = bpf_jit_blinding_enabled(prog); 9141 struct bpf_insn *insn = prog->insnsi; 9142 const struct bpf_func_proto *fn; 9143 const int insn_cnt = prog->len; 9144 const struct bpf_map_ops *ops; 9145 struct bpf_insn_aux_data *aux; 9146 struct bpf_insn insn_buf[16]; 9147 struct bpf_prog *new_prog; 9148 struct bpf_map *map_ptr; 9149 int i, ret, cnt, delta = 0; 9150 9151 for (i = 0; i < insn_cnt; i++, insn++) { 9152 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || 9153 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 9154 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || 9155 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 9156 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 9157 struct bpf_insn mask_and_div[] = { 9158 BPF_MOV32_REG(insn->src_reg, insn->src_reg), 9159 /* Rx div 0 -> 0 */ 9160 BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), 9161 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), 9162 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 9163 *insn, 9164 }; 9165 struct bpf_insn mask_and_mod[] = { 9166 BPF_MOV32_REG(insn->src_reg, insn->src_reg), 9167 /* Rx mod 0 -> Rx */ 9168 BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), 9169 *insn, 9170 }; 9171 struct bpf_insn *patchlet; 9172 9173 if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 9174 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 9175 patchlet = mask_and_div + (is64 ? 1 : 0); 9176 cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); 9177 } else { 9178 patchlet = mask_and_mod + (is64 ? 1 : 0); 9179 cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); 9180 } 9181 9182 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); 9183 if (!new_prog) 9184 return -ENOMEM; 9185 9186 delta += cnt - 1; 9187 env->prog = prog = new_prog; 9188 insn = new_prog->insnsi + i + delta; 9189 continue; 9190 } 9191 9192 if (BPF_CLASS(insn->code) == BPF_LD && 9193 (BPF_MODE(insn->code) == BPF_ABS || 9194 BPF_MODE(insn->code) == BPF_IND)) { 9195 cnt = env->ops->gen_ld_abs(insn, insn_buf); 9196 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 9197 verbose(env, "bpf verifier is misconfigured\n"); 9198 return -EINVAL; 9199 } 9200 9201 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 9202 if (!new_prog) 9203 return -ENOMEM; 9204 9205 delta += cnt - 1; 9206 env->prog = prog = new_prog; 9207 insn = new_prog->insnsi + i + delta; 9208 continue; 9209 } 9210 9211 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || 9212 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { 9213 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; 9214 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; 9215 struct bpf_insn insn_buf[16]; 9216 struct bpf_insn *patch = &insn_buf[0]; 9217 bool issrc, isneg; 9218 u32 off_reg; 9219 9220 aux = &env->insn_aux_data[i + delta]; 9221 if (!aux->alu_state || 9222 aux->alu_state == BPF_ALU_NON_POINTER) 9223 continue; 9224 9225 isneg = aux->alu_state & BPF_ALU_NEG_VALUE; 9226 issrc = (aux->alu_state & BPF_ALU_SANITIZE) == 9227 BPF_ALU_SANITIZE_SRC; 9228 9229 off_reg = issrc ? insn->src_reg : insn->dst_reg; 9230 if (isneg) 9231 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 9232 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1); 9233 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); 9234 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); 9235 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); 9236 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); 9237 if (issrc) { 9238 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, 9239 off_reg); 9240 insn->src_reg = BPF_REG_AX; 9241 } else { 9242 *patch++ = BPF_ALU64_REG(BPF_AND, off_reg, 9243 BPF_REG_AX); 9244 } 9245 if (isneg) 9246 insn->code = insn->code == code_add ? 9247 code_sub : code_add; 9248 *patch++ = *insn; 9249 if (issrc && isneg) 9250 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 9251 cnt = patch - insn_buf; 9252 9253 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 9254 if (!new_prog) 9255 return -ENOMEM; 9256 9257 delta += cnt - 1; 9258 env->prog = prog = new_prog; 9259 insn = new_prog->insnsi + i + delta; 9260 continue; 9261 } 9262 9263 if (insn->code != (BPF_JMP | BPF_CALL)) 9264 continue; 9265 if (insn->src_reg == BPF_PSEUDO_CALL) 9266 continue; 9267 9268 if (insn->imm == BPF_FUNC_get_route_realm) 9269 prog->dst_needed = 1; 9270 if (insn->imm == BPF_FUNC_get_prandom_u32) 9271 bpf_user_rnd_init_once(); 9272 if (insn->imm == BPF_FUNC_override_return) 9273 prog->kprobe_override = 1; 9274 if (insn->imm == BPF_FUNC_tail_call) { 9275 /* If we tail call into other programs, we 9276 * cannot make any assumptions since they can 9277 * be replaced dynamically during runtime in 9278 * the program array. 9279 */ 9280 prog->cb_access = 1; 9281 env->prog->aux->stack_depth = MAX_BPF_STACK; 9282 env->prog->aux->max_pkt_offset = MAX_PACKET_OFF; 9283 9284 /* mark bpf_tail_call as different opcode to avoid 9285 * conditional branch in the interpeter for every normal 9286 * call and to prevent accidental JITing by JIT compiler 9287 * that doesn't support bpf_tail_call yet 9288 */ 9289 insn->imm = 0; 9290 insn->code = BPF_JMP | BPF_TAIL_CALL; 9291 9292 aux = &env->insn_aux_data[i + delta]; 9293 if (env->allow_ptr_leaks && !expect_blinding && 9294 prog->jit_requested && 9295 !bpf_map_key_poisoned(aux) && 9296 !bpf_map_ptr_poisoned(aux) && 9297 !bpf_map_ptr_unpriv(aux)) { 9298 struct bpf_jit_poke_descriptor desc = { 9299 .reason = BPF_POKE_REASON_TAIL_CALL, 9300 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state), 9301 .tail_call.key = bpf_map_key_immediate(aux), 9302 }; 9303 9304 ret = bpf_jit_add_poke_descriptor(prog, &desc); 9305 if (ret < 0) { 9306 verbose(env, "adding tail call poke descriptor failed\n"); 9307 return ret; 9308 } 9309 9310 insn->imm = ret + 1; 9311 continue; 9312 } 9313 9314 if (!bpf_map_ptr_unpriv(aux)) 9315 continue; 9316 9317 /* instead of changing every JIT dealing with tail_call 9318 * emit two extra insns: 9319 * if (index >= max_entries) goto out; 9320 * index &= array->index_mask; 9321 * to avoid out-of-bounds cpu speculation 9322 */ 9323 if (bpf_map_ptr_poisoned(aux)) { 9324 verbose(env, "tail_call abusing map_ptr\n"); 9325 return -EINVAL; 9326 } 9327 9328 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 9329 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, 9330 map_ptr->max_entries, 2); 9331 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, 9332 container_of(map_ptr, 9333 struct bpf_array, 9334 map)->index_mask); 9335 insn_buf[2] = *insn; 9336 cnt = 3; 9337 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 9338 if (!new_prog) 9339 return -ENOMEM; 9340 9341 delta += cnt - 1; 9342 env->prog = prog = new_prog; 9343 insn = new_prog->insnsi + i + delta; 9344 continue; 9345 } 9346 9347 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup 9348 * and other inlining handlers are currently limited to 64 bit 9349 * only. 9350 */ 9351 if (prog->jit_requested && BITS_PER_LONG == 64 && 9352 (insn->imm == BPF_FUNC_map_lookup_elem || 9353 insn->imm == BPF_FUNC_map_update_elem || 9354 insn->imm == BPF_FUNC_map_delete_elem || 9355 insn->imm == BPF_FUNC_map_push_elem || 9356 insn->imm == BPF_FUNC_map_pop_elem || 9357 insn->imm == BPF_FUNC_map_peek_elem)) { 9358 aux = &env->insn_aux_data[i + delta]; 9359 if (bpf_map_ptr_poisoned(aux)) 9360 goto patch_call_imm; 9361 9362 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 9363 ops = map_ptr->ops; 9364 if (insn->imm == BPF_FUNC_map_lookup_elem && 9365 ops->map_gen_lookup) { 9366 cnt = ops->map_gen_lookup(map_ptr, insn_buf); 9367 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 9368 verbose(env, "bpf verifier is misconfigured\n"); 9369 return -EINVAL; 9370 } 9371 9372 new_prog = bpf_patch_insn_data(env, i + delta, 9373 insn_buf, cnt); 9374 if (!new_prog) 9375 return -ENOMEM; 9376 9377 delta += cnt - 1; 9378 env->prog = prog = new_prog; 9379 insn = new_prog->insnsi + i + delta; 9380 continue; 9381 } 9382 9383 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, 9384 (void *(*)(struct bpf_map *map, void *key))NULL)); 9385 BUILD_BUG_ON(!__same_type(ops->map_delete_elem, 9386 (int (*)(struct bpf_map *map, void *key))NULL)); 9387 BUILD_BUG_ON(!__same_type(ops->map_update_elem, 9388 (int (*)(struct bpf_map *map, void *key, void *value, 9389 u64 flags))NULL)); 9390 BUILD_BUG_ON(!__same_type(ops->map_push_elem, 9391 (int (*)(struct bpf_map *map, void *value, 9392 u64 flags))NULL)); 9393 BUILD_BUG_ON(!__same_type(ops->map_pop_elem, 9394 (int (*)(struct bpf_map *map, void *value))NULL)); 9395 BUILD_BUG_ON(!__same_type(ops->map_peek_elem, 9396 (int (*)(struct bpf_map *map, void *value))NULL)); 9397 9398 switch (insn->imm) { 9399 case BPF_FUNC_map_lookup_elem: 9400 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - 9401 __bpf_call_base; 9402 continue; 9403 case BPF_FUNC_map_update_elem: 9404 insn->imm = BPF_CAST_CALL(ops->map_update_elem) - 9405 __bpf_call_base; 9406 continue; 9407 case BPF_FUNC_map_delete_elem: 9408 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - 9409 __bpf_call_base; 9410 continue; 9411 case BPF_FUNC_map_push_elem: 9412 insn->imm = BPF_CAST_CALL(ops->map_push_elem) - 9413 __bpf_call_base; 9414 continue; 9415 case BPF_FUNC_map_pop_elem: 9416 insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - 9417 __bpf_call_base; 9418 continue; 9419 case BPF_FUNC_map_peek_elem: 9420 insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - 9421 __bpf_call_base; 9422 continue; 9423 } 9424 9425 goto patch_call_imm; 9426 } 9427 9428 patch_call_imm: 9429 fn = env->ops->get_func_proto(insn->imm, env->prog); 9430 /* all functions that have prototype and verifier allowed 9431 * programs to call them, must be real in-kernel functions 9432 */ 9433 if (!fn->func) { 9434 verbose(env, 9435 "kernel subsystem misconfigured func %s#%d\n", 9436 func_id_name(insn->imm), insn->imm); 9437 return -EFAULT; 9438 } 9439 insn->imm = fn->func - __bpf_call_base; 9440 } 9441 9442 /* Since poke tab is now finalized, publish aux to tracker. */ 9443 for (i = 0; i < prog->aux->size_poke_tab; i++) { 9444 map_ptr = prog->aux->poke_tab[i].tail_call.map; 9445 if (!map_ptr->ops->map_poke_track || 9446 !map_ptr->ops->map_poke_untrack || 9447 !map_ptr->ops->map_poke_run) { 9448 verbose(env, "bpf verifier is misconfigured\n"); 9449 return -EINVAL; 9450 } 9451 9452 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); 9453 if (ret < 0) { 9454 verbose(env, "tracking tail call prog failed\n"); 9455 return ret; 9456 } 9457 } 9458 9459 return 0; 9460 } 9461 9462 static void free_states(struct bpf_verifier_env *env) 9463 { 9464 struct bpf_verifier_state_list *sl, *sln; 9465 int i; 9466 9467 sl = env->free_list; 9468 while (sl) { 9469 sln = sl->next; 9470 free_verifier_state(&sl->state, false); 9471 kfree(sl); 9472 sl = sln; 9473 } 9474 9475 if (!env->explored_states) 9476 return; 9477 9478 for (i = 0; i < state_htab_size(env); i++) { 9479 sl = env->explored_states[i]; 9480 9481 while (sl) { 9482 sln = sl->next; 9483 free_verifier_state(&sl->state, false); 9484 kfree(sl); 9485 sl = sln; 9486 } 9487 } 9488 9489 kvfree(env->explored_states); 9490 } 9491 9492 static void print_verification_stats(struct bpf_verifier_env *env) 9493 { 9494 int i; 9495 9496 if (env->log.level & BPF_LOG_STATS) { 9497 verbose(env, "verification time %lld usec\n", 9498 div_u64(env->verification_time, 1000)); 9499 verbose(env, "stack depth "); 9500 for (i = 0; i < env->subprog_cnt; i++) { 9501 u32 depth = env->subprog_info[i].stack_depth; 9502 9503 verbose(env, "%d", depth); 9504 if (i + 1 < env->subprog_cnt) 9505 verbose(env, "+"); 9506 } 9507 verbose(env, "\n"); 9508 } 9509 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d " 9510 "total_states %d peak_states %d mark_read %d\n", 9511 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, 9512 env->max_states_per_insn, env->total_states, 9513 env->peak_states, env->longest_mark_read_walk); 9514 } 9515 9516 static int check_attach_btf_id(struct bpf_verifier_env *env) 9517 { 9518 struct bpf_prog *prog = env->prog; 9519 struct bpf_prog *tgt_prog = prog->aux->linked_prog; 9520 u32 btf_id = prog->aux->attach_btf_id; 9521 const char prefix[] = "btf_trace_"; 9522 int ret = 0, subprog = -1, i; 9523 struct bpf_trampoline *tr; 9524 const struct btf_type *t; 9525 bool conservative = true; 9526 const char *tname; 9527 struct btf *btf; 9528 long addr; 9529 u64 key; 9530 9531 if (prog->type != BPF_PROG_TYPE_TRACING) 9532 return 0; 9533 9534 if (!btf_id) { 9535 verbose(env, "Tracing programs must provide btf_id\n"); 9536 return -EINVAL; 9537 } 9538 btf = bpf_prog_get_target_btf(prog); 9539 if (!btf) { 9540 verbose(env, 9541 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n"); 9542 return -EINVAL; 9543 } 9544 t = btf_type_by_id(btf, btf_id); 9545 if (!t) { 9546 verbose(env, "attach_btf_id %u is invalid\n", btf_id); 9547 return -EINVAL; 9548 } 9549 tname = btf_name_by_offset(btf, t->name_off); 9550 if (!tname) { 9551 verbose(env, "attach_btf_id %u doesn't have a name\n", btf_id); 9552 return -EINVAL; 9553 } 9554 if (tgt_prog) { 9555 struct bpf_prog_aux *aux = tgt_prog->aux; 9556 9557 for (i = 0; i < aux->func_info_cnt; i++) 9558 if (aux->func_info[i].type_id == btf_id) { 9559 subprog = i; 9560 break; 9561 } 9562 if (subprog == -1) { 9563 verbose(env, "Subprog %s doesn't exist\n", tname); 9564 return -EINVAL; 9565 } 9566 conservative = aux->func_info_aux[subprog].unreliable; 9567 key = ((u64)aux->id) << 32 | btf_id; 9568 } else { 9569 key = btf_id; 9570 } 9571 9572 switch (prog->expected_attach_type) { 9573 case BPF_TRACE_RAW_TP: 9574 if (tgt_prog) { 9575 verbose(env, 9576 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n"); 9577 return -EINVAL; 9578 } 9579 if (!btf_type_is_typedef(t)) { 9580 verbose(env, "attach_btf_id %u is not a typedef\n", 9581 btf_id); 9582 return -EINVAL; 9583 } 9584 if (strncmp(prefix, tname, sizeof(prefix) - 1)) { 9585 verbose(env, "attach_btf_id %u points to wrong type name %s\n", 9586 btf_id, tname); 9587 return -EINVAL; 9588 } 9589 tname += sizeof(prefix) - 1; 9590 t = btf_type_by_id(btf, t->type); 9591 if (!btf_type_is_ptr(t)) 9592 /* should never happen in valid vmlinux build */ 9593 return -EINVAL; 9594 t = btf_type_by_id(btf, t->type); 9595 if (!btf_type_is_func_proto(t)) 9596 /* should never happen in valid vmlinux build */ 9597 return -EINVAL; 9598 9599 /* remember two read only pointers that are valid for 9600 * the life time of the kernel 9601 */ 9602 prog->aux->attach_func_name = tname; 9603 prog->aux->attach_func_proto = t; 9604 prog->aux->attach_btf_trace = true; 9605 return 0; 9606 case BPF_TRACE_FENTRY: 9607 case BPF_TRACE_FEXIT: 9608 if (!btf_type_is_func(t)) { 9609 verbose(env, "attach_btf_id %u is not a function\n", 9610 btf_id); 9611 return -EINVAL; 9612 } 9613 t = btf_type_by_id(btf, t->type); 9614 if (!btf_type_is_func_proto(t)) 9615 return -EINVAL; 9616 tr = bpf_trampoline_lookup(key); 9617 if (!tr) 9618 return -ENOMEM; 9619 prog->aux->attach_func_name = tname; 9620 /* t is either vmlinux type or another program's type */ 9621 prog->aux->attach_func_proto = t; 9622 mutex_lock(&tr->mutex); 9623 if (tr->func.addr) { 9624 prog->aux->trampoline = tr; 9625 goto out; 9626 } 9627 if (tgt_prog && conservative) { 9628 prog->aux->attach_func_proto = NULL; 9629 t = NULL; 9630 } 9631 ret = btf_distill_func_proto(&env->log, btf, t, 9632 tname, &tr->func.model); 9633 if (ret < 0) 9634 goto out; 9635 if (tgt_prog) { 9636 if (!tgt_prog->jited) { 9637 /* for now */ 9638 verbose(env, "Can trace only JITed BPF progs\n"); 9639 ret = -EINVAL; 9640 goto out; 9641 } 9642 if (tgt_prog->type == BPF_PROG_TYPE_TRACING) { 9643 /* prevent cycles */ 9644 verbose(env, "Cannot recursively attach\n"); 9645 ret = -EINVAL; 9646 goto out; 9647 } 9648 if (subprog == 0) 9649 addr = (long) tgt_prog->bpf_func; 9650 else 9651 addr = (long) tgt_prog->aux->func[subprog]->bpf_func; 9652 } else { 9653 addr = kallsyms_lookup_name(tname); 9654 if (!addr) { 9655 verbose(env, 9656 "The address of function %s cannot be found\n", 9657 tname); 9658 ret = -ENOENT; 9659 goto out; 9660 } 9661 } 9662 tr->func.addr = (void *)addr; 9663 prog->aux->trampoline = tr; 9664 out: 9665 mutex_unlock(&tr->mutex); 9666 if (ret) 9667 bpf_trampoline_put(tr); 9668 return ret; 9669 default: 9670 return -EINVAL; 9671 } 9672 } 9673 9674 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, 9675 union bpf_attr __user *uattr) 9676 { 9677 u64 start_time = ktime_get_ns(); 9678 struct bpf_verifier_env *env; 9679 struct bpf_verifier_log *log; 9680 int i, len, ret = -EINVAL; 9681 bool is_priv; 9682 9683 /* no program is valid */ 9684 if (ARRAY_SIZE(bpf_verifier_ops) == 0) 9685 return -EINVAL; 9686 9687 /* 'struct bpf_verifier_env' can be global, but since it's not small, 9688 * allocate/free it every time bpf_check() is called 9689 */ 9690 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 9691 if (!env) 9692 return -ENOMEM; 9693 log = &env->log; 9694 9695 len = (*prog)->len; 9696 env->insn_aux_data = 9697 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len)); 9698 ret = -ENOMEM; 9699 if (!env->insn_aux_data) 9700 goto err_free_env; 9701 for (i = 0; i < len; i++) 9702 env->insn_aux_data[i].orig_idx = i; 9703 env->prog = *prog; 9704 env->ops = bpf_verifier_ops[env->prog->type]; 9705 is_priv = capable(CAP_SYS_ADMIN); 9706 9707 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { 9708 mutex_lock(&bpf_verifier_lock); 9709 if (!btf_vmlinux) 9710 btf_vmlinux = btf_parse_vmlinux(); 9711 mutex_unlock(&bpf_verifier_lock); 9712 } 9713 9714 /* grab the mutex to protect few globals used by verifier */ 9715 if (!is_priv) 9716 mutex_lock(&bpf_verifier_lock); 9717 9718 if (attr->log_level || attr->log_buf || attr->log_size) { 9719 /* user requested verbose verifier output 9720 * and supplied buffer to store the verification trace 9721 */ 9722 log->level = attr->log_level; 9723 log->ubuf = (char __user *) (unsigned long) attr->log_buf; 9724 log->len_total = attr->log_size; 9725 9726 ret = -EINVAL; 9727 /* log attributes have to be sane */ 9728 if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 || 9729 !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK) 9730 goto err_unlock; 9731 } 9732 9733 if (IS_ERR(btf_vmlinux)) { 9734 /* Either gcc or pahole or kernel are broken. */ 9735 verbose(env, "in-kernel BTF is malformed\n"); 9736 ret = PTR_ERR(btf_vmlinux); 9737 goto skip_full_check; 9738 } 9739 9740 ret = check_attach_btf_id(env); 9741 if (ret) 9742 goto skip_full_check; 9743 9744 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); 9745 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 9746 env->strict_alignment = true; 9747 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) 9748 env->strict_alignment = false; 9749 9750 env->allow_ptr_leaks = is_priv; 9751 9752 if (is_priv) 9753 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; 9754 9755 ret = replace_map_fd_with_map_ptr(env); 9756 if (ret < 0) 9757 goto skip_full_check; 9758 9759 if (bpf_prog_is_dev_bound(env->prog->aux)) { 9760 ret = bpf_prog_offload_verifier_prep(env->prog); 9761 if (ret) 9762 goto skip_full_check; 9763 } 9764 9765 env->explored_states = kvcalloc(state_htab_size(env), 9766 sizeof(struct bpf_verifier_state_list *), 9767 GFP_USER); 9768 ret = -ENOMEM; 9769 if (!env->explored_states) 9770 goto skip_full_check; 9771 9772 ret = check_subprogs(env); 9773 if (ret < 0) 9774 goto skip_full_check; 9775 9776 ret = check_btf_info(env, attr, uattr); 9777 if (ret < 0) 9778 goto skip_full_check; 9779 9780 ret = check_cfg(env); 9781 if (ret < 0) 9782 goto skip_full_check; 9783 9784 ret = do_check(env); 9785 if (env->cur_state) { 9786 free_verifier_state(env->cur_state, true); 9787 env->cur_state = NULL; 9788 } 9789 9790 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux)) 9791 ret = bpf_prog_offload_finalize(env); 9792 9793 skip_full_check: 9794 while (!pop_stack(env, NULL, NULL)); 9795 free_states(env); 9796 9797 if (ret == 0) 9798 ret = check_max_stack_depth(env); 9799 9800 /* instruction rewrites happen after this point */ 9801 if (is_priv) { 9802 if (ret == 0) 9803 opt_hard_wire_dead_code_branches(env); 9804 if (ret == 0) 9805 ret = opt_remove_dead_code(env); 9806 if (ret == 0) 9807 ret = opt_remove_nops(env); 9808 } else { 9809 if (ret == 0) 9810 sanitize_dead_code(env); 9811 } 9812 9813 if (ret == 0) 9814 /* program is valid, convert *(u32*)(ctx + off) accesses */ 9815 ret = convert_ctx_accesses(env); 9816 9817 if (ret == 0) 9818 ret = fixup_bpf_calls(env); 9819 9820 /* do 32-bit optimization after insn patching has done so those patched 9821 * insns could be handled correctly. 9822 */ 9823 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) { 9824 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr); 9825 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret 9826 : false; 9827 } 9828 9829 if (ret == 0) 9830 ret = fixup_call_args(env); 9831 9832 env->verification_time = ktime_get_ns() - start_time; 9833 print_verification_stats(env); 9834 9835 if (log->level && bpf_verifier_log_full(log)) 9836 ret = -ENOSPC; 9837 if (log->level && !log->ubuf) { 9838 ret = -EFAULT; 9839 goto err_release_maps; 9840 } 9841 9842 if (ret == 0 && env->used_map_cnt) { 9843 /* if program passed verifier, update used_maps in bpf_prog_info */ 9844 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 9845 sizeof(env->used_maps[0]), 9846 GFP_KERNEL); 9847 9848 if (!env->prog->aux->used_maps) { 9849 ret = -ENOMEM; 9850 goto err_release_maps; 9851 } 9852 9853 memcpy(env->prog->aux->used_maps, env->used_maps, 9854 sizeof(env->used_maps[0]) * env->used_map_cnt); 9855 env->prog->aux->used_map_cnt = env->used_map_cnt; 9856 9857 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 9858 * bpf_ld_imm64 instructions 9859 */ 9860 convert_pseudo_ld_imm64(env); 9861 } 9862 9863 if (ret == 0) 9864 adjust_btf_func(env); 9865 9866 err_release_maps: 9867 if (!env->prog->aux->used_maps) 9868 /* if we didn't copy map pointers into bpf_prog_info, release 9869 * them now. Otherwise free_used_maps() will release them. 9870 */ 9871 release_maps(env); 9872 *prog = env->prog; 9873 err_unlock: 9874 if (!is_priv) 9875 mutex_unlock(&bpf_verifier_lock); 9876 vfree(env->insn_aux_data); 9877 err_free_env: 9878 kfree(env); 9879 return ret; 9880 } 9881